mirror of
https://github.com/tw93/Mole.git
synced 2026-02-05 00:59:41 +00:00
Merge main into dev (resolve conflict in .gitignore)
This commit is contained in:
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -8,7 +8,7 @@ assignees: ''
|
||||
|
||||
## Describe the bug
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
A clear and concise description of what the bug is. We suggest using English for better global understanding.
|
||||
|
||||
## Steps to reproduce
|
||||
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -8,7 +8,7 @@ assignees: ''
|
||||
|
||||
## Feature description
|
||||
|
||||
A clear and concise description of the feature you'd like to see.
|
||||
A clear and concise description of the feature you'd like to see. We suggest using English for better global understanding.
|
||||
|
||||
## Use case
|
||||
|
||||
|
||||
1
.github/copilot-instructions.md
vendored
1
.github/copilot-instructions.md
vendored
@@ -1 +0,0 @@
|
||||
../AGENT.md
|
||||
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Quality
|
||||
name: Check
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -10,18 +10,18 @@ permissions:
|
||||
|
||||
jobs:
|
||||
format:
|
||||
name: Auto Format
|
||||
name: Format
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
ref: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository && github.head_ref) || github.ref }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Cache Homebrew
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v4
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/Homebrew
|
||||
@@ -35,19 +35,16 @@ jobs:
|
||||
run: brew install shfmt shellcheck
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version: '1.24.6'
|
||||
|
||||
- name: Format all code
|
||||
run: |
|
||||
echo "Formatting shell scripts..."
|
||||
./scripts/format.sh
|
||||
echo "Formatting Go code..."
|
||||
gofmt -w ./cmd
|
||||
echo "✓ All code formatted"
|
||||
./scripts/check.sh --format
|
||||
|
||||
- name: Commit formatting changes
|
||||
if: ${{ github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository }}
|
||||
run: |
|
||||
git config user.name "Tw93"
|
||||
git config user.email "tw93@qq.com"
|
||||
@@ -61,18 +58,18 @@ jobs:
|
||||
fi
|
||||
|
||||
quality:
|
||||
name: Code Quality
|
||||
name: Check
|
||||
runs-on: macos-latest
|
||||
needs: format
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
ref: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository && github.head_ref) || github.ref }}
|
||||
|
||||
- name: Cache Homebrew
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v4
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/Homebrew
|
||||
@@ -85,22 +82,5 @@ jobs:
|
||||
- name: Install tools
|
||||
run: brew install shfmt shellcheck
|
||||
|
||||
- name: ShellCheck
|
||||
run: |
|
||||
echo "Running ShellCheck on all shell scripts..."
|
||||
shellcheck mole
|
||||
shellcheck bin/*.sh
|
||||
find lib -name "*.sh" -exec shellcheck {} +
|
||||
echo "✓ ShellCheck passed"
|
||||
|
||||
- name: Syntax check
|
||||
run: |
|
||||
echo "Checking Bash syntax..."
|
||||
bash -n mole
|
||||
for script in bin/*.sh; do
|
||||
bash -n "$script"
|
||||
done
|
||||
find lib -name "*.sh" | while read -r script; do
|
||||
bash -n "$script"
|
||||
done
|
||||
echo "✓ All scripts have valid syntax"
|
||||
- name: Run check script
|
||||
run: ./scripts/check.sh --no-format
|
||||
135
.github/workflows/release.yml
vendored
135
.github/workflows/release.yml
vendored
@@ -9,73 +9,78 @@ permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
build-release:
|
||||
runs-on: macos-latest
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- os: macos-latest
|
||||
target: release-amd64
|
||||
artifact_name: binaries-amd64
|
||||
- os: macos-latest
|
||||
target: release-arm64
|
||||
artifact_name: binaries-arm64
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v4
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v5
|
||||
with:
|
||||
go-version: "1.24.6"
|
||||
cache: true
|
||||
|
||||
- name: Build Universal Binary for disk analyzer
|
||||
run: ./scripts/build-analyze.sh
|
||||
|
||||
- name: Build Universal Binary for system status
|
||||
run: ./scripts/build-status.sh
|
||||
|
||||
- name: Verify binary is valid
|
||||
- name: Build Binaries
|
||||
run: |
|
||||
if [[ ! -x bin/analyze-go ]]; then
|
||||
echo "Error: bin/analyze-go is not executable"
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -x bin/status-go ]]; then
|
||||
echo "Error: bin/status-go is not executable"
|
||||
exit 1
|
||||
fi
|
||||
echo "Binary info:"
|
||||
file bin/analyze-go
|
||||
ls -lh bin/analyze-go
|
||||
file bin/status-go
|
||||
ls -lh bin/status-go
|
||||
echo ""
|
||||
echo "✓ Universal binary built successfully"
|
||||
make ${{ matrix.target }}
|
||||
ls -l bin/
|
||||
|
||||
- name: Commit binaries for release
|
||||
- name: Package binaries for Homebrew
|
||||
run: |
|
||||
# Configure Git
|
||||
git config user.name "Tw93"
|
||||
git config user.email "tw93@qq.com"
|
||||
|
||||
# Save binaries to temp location
|
||||
cp bin/analyze-go /tmp/analyze-go
|
||||
cp bin/status-go /tmp/status-go
|
||||
|
||||
# Switch to main branch
|
||||
git fetch origin main
|
||||
git checkout main
|
||||
git pull origin main
|
||||
|
||||
# Restore binaries
|
||||
mv /tmp/analyze-go bin/analyze-go
|
||||
mv /tmp/status-go bin/status-go
|
||||
|
||||
# Commit and Push
|
||||
git add bin/analyze-go bin/status-go
|
||||
if git diff --staged --quiet; then
|
||||
echo "No changes to commit"
|
||||
cd bin
|
||||
# Package binaries into tar.gz for Homebrew resource
|
||||
if [[ "${{ matrix.target }}" == "release-arm64" ]]; then
|
||||
tar -czf binaries-darwin-arm64.tar.gz analyze-darwin-arm64 status-darwin-arm64
|
||||
ls -lh binaries-darwin-arm64.tar.gz
|
||||
else
|
||||
git commit -m "chore: update binaries for ${GITHUB_REF#refs/tags/}"
|
||||
git push origin main
|
||||
tar -czf binaries-darwin-amd64.tar.gz analyze-darwin-amd64 status-darwin-amd64
|
||||
ls -lh binaries-darwin-amd64.tar.gz
|
||||
fi
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: ${{ matrix.artifact_name }}
|
||||
path: bin/*-darwin-*
|
||||
retention-days: 1
|
||||
|
||||
release:
|
||||
name: Publish Release
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
with:
|
||||
path: bin
|
||||
pattern: binaries-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Display structure of downloaded files
|
||||
run: ls -R bin/
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
files: bin/*
|
||||
generate_release_notes: true
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
update-formula:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-release
|
||||
needs: release
|
||||
steps:
|
||||
- name: Extract version from tag
|
||||
id: tag_version
|
||||
@@ -86,8 +91,8 @@ jobs:
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Releasing version: $VERSION (tag: $TAG)"
|
||||
|
||||
- name: Update Homebrew formula
|
||||
uses: mislav/bump-homebrew-formula-action@v3
|
||||
- name: Update Homebrew formula (Personal Tap)
|
||||
uses: mislav/bump-homebrew-formula-action@56a283fa15557e9abaa4bdb63b8212abc68e655c # v3.6
|
||||
with:
|
||||
formula-name: mole
|
||||
formula-path: Formula/mole.rb
|
||||
@@ -100,9 +105,25 @@ jobs:
|
||||
env:
|
||||
COMMITTER_TOKEN: ${{ secrets.PAT_TOKEN }}
|
||||
|
||||
- name: Verify formula update
|
||||
- name: Update Homebrew formula (Official Core)
|
||||
uses: mislav/bump-homebrew-formula-action@56a283fa15557e9abaa4bdb63b8212abc68e655c # v3.6
|
||||
with:
|
||||
formula-name: mole
|
||||
homebrew-tap: Homebrew/homebrew-core
|
||||
tag-name: ${{ steps.tag_version.outputs.tag }}
|
||||
commit-message: |
|
||||
mole ${{ steps.tag_version.outputs.version }}
|
||||
|
||||
Automated release via GitHub Actions
|
||||
env:
|
||||
COMMITTER_TOKEN: ${{ secrets.HOMEBREW_GITHUB_API_TOKEN }}
|
||||
continue-on-error: true
|
||||
|
||||
- name: Verify formula updates
|
||||
if: success()
|
||||
run: |
|
||||
echo "✓ Homebrew formula updated successfully"
|
||||
echo "✓ Homebrew formulae updated successfully"
|
||||
echo " Version: ${{ steps.tag_version.outputs.version }}"
|
||||
echo " Tag: ${{ steps.tag_version.outputs.tag }}"
|
||||
echo " Personal tap: tw93/homebrew-tap"
|
||||
echo " Official core: Homebrew/homebrew-core (PR created)"
|
||||
|
||||
85
.github/workflows/test.yml
vendored
Normal file
85
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, dev]
|
||||
pull_request:
|
||||
branches: [main, dev]
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: Test
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4
|
||||
|
||||
- name: Install tools
|
||||
run: brew install bats-core shellcheck
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v5
|
||||
with:
|
||||
go-version: "1.24.6"
|
||||
|
||||
- name: Run test script
|
||||
env:
|
||||
MOLE_PERF_BYTES_TO_HUMAN_LIMIT_MS: "6000"
|
||||
MOLE_PERF_GET_FILE_SIZE_LIMIT_MS: "3000"
|
||||
run: ./scripts/test.sh
|
||||
|
||||
compatibility:
|
||||
name: macOS Compatibility
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-14, macos-15]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4
|
||||
|
||||
- name: Test on ${{ matrix.os }}
|
||||
run: |
|
||||
echo "Testing on ${{ matrix.os }}..."
|
||||
bash -n mole
|
||||
source lib/core/common.sh
|
||||
echo "✓ Successfully loaded on ${{ matrix.os }}"
|
||||
|
||||
security:
|
||||
name: Security Checks
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4
|
||||
|
||||
- name: Check for unsafe rm usage
|
||||
run: |
|
||||
echo "Checking for unsafe rm patterns..."
|
||||
if grep -r "rm -rf" --include="*.sh" lib/ | grep -v "safe_remove\|validate_path\|# "; then
|
||||
echo "✗ Unsafe rm -rf usage found"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ No unsafe rm usage found"
|
||||
|
||||
- name: Verify app protection
|
||||
run: |
|
||||
echo "Verifying critical file protection..."
|
||||
bash -c '
|
||||
source lib/core/common.sh
|
||||
if should_protect_from_uninstall "com.apple.Safari"; then
|
||||
echo "✓ Safari is protected"
|
||||
else
|
||||
echo "✗ Safari protection failed"
|
||||
exit 1
|
||||
fi
|
||||
'
|
||||
|
||||
- name: Check for secrets
|
||||
run: |
|
||||
echo "Checking for hardcoded secrets..."
|
||||
matches=$(grep -r "password\|secret\|api_key" --include="*.sh" . \
|
||||
| grep -v "# \|test" \
|
||||
| grep -v -E "lib/core/sudo\.sh|lib/core/app_protection\.sh|lib/clean/user\.sh|lib/clean/brew\.sh|bin/optimize\.sh" || true)
|
||||
if [[ -n "$matches" ]]; then
|
||||
echo "$matches"
|
||||
echo "✗ Potential secrets found"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ No secrets found"
|
||||
140
.github/workflows/tests.yml
vendored
140
.github/workflows/tests.yml
vendored
@@ -1,140 +0,0 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, dev]
|
||||
pull_request:
|
||||
branches: [main, dev]
|
||||
|
||||
jobs:
|
||||
unit-tests:
|
||||
name: Unit Tests
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install bats
|
||||
run: brew install bats-core
|
||||
|
||||
- name: Run all test suites
|
||||
run: |
|
||||
echo "Running all test suites..."
|
||||
bats tests/*.bats --formatter tap
|
||||
echo ""
|
||||
echo "Test summary:"
|
||||
echo " Total test files: $(ls tests/*.bats | wc -l | tr -d ' ')"
|
||||
echo " Total tests: $(grep -c "^@test" tests/*.bats | awk -F: '{sum+=$2} END {print sum}')"
|
||||
echo "✓ All tests passed"
|
||||
|
||||
go-tests:
|
||||
name: Go Tests
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Build Go binaries
|
||||
run: |
|
||||
echo "Building Go binaries..."
|
||||
go build ./...
|
||||
echo "✓ Build successful"
|
||||
|
||||
- name: Run go vet
|
||||
run: |
|
||||
echo "Running go vet..."
|
||||
go vet ./cmd/...
|
||||
echo "✓ Vet passed"
|
||||
|
||||
- name: Run go test
|
||||
run: |
|
||||
echo "Running go test..."
|
||||
go test ./cmd/...
|
||||
echo "✓ Go tests passed"
|
||||
|
||||
integration-tests:
|
||||
name: Integration Tests
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install dependencies
|
||||
run: brew install coreutils
|
||||
|
||||
- name: Test module loading
|
||||
run: |
|
||||
echo "Testing module loading..."
|
||||
bash -c 'source lib/core/common.sh && echo "✓ Modules loaded successfully"'
|
||||
|
||||
- name: Test clean --dry-run
|
||||
run: |
|
||||
echo "Testing clean --dry-run..."
|
||||
./bin/clean.sh --dry-run
|
||||
echo "✓ Clean dry-run completed"
|
||||
|
||||
- name: Test installation
|
||||
run: |
|
||||
echo "Testing installation script..."
|
||||
./install.sh --prefix /tmp/mole-test
|
||||
test -f /tmp/mole-test/mole
|
||||
echo "✓ Installation successful"
|
||||
|
||||
compatibility:
|
||||
name: macOS Compatibility
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-14, macos-15]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Test on ${{ matrix.os }}
|
||||
run: |
|
||||
echo "Testing on ${{ matrix.os }}..."
|
||||
bash -n mole
|
||||
source lib/core/common.sh
|
||||
echo "✓ Successfully loaded on ${{ matrix.os }}"
|
||||
|
||||
security:
|
||||
name: Security Checks
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check for unsafe rm usage
|
||||
run: |
|
||||
echo "Checking for unsafe rm patterns..."
|
||||
if grep -r "rm -rf" --include="*.sh" lib/ | grep -v "safe_remove\|validate_path\|# "; then
|
||||
echo "✗ Unsafe rm -rf usage found"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ No unsafe rm usage found"
|
||||
|
||||
- name: Verify app protection
|
||||
run: |
|
||||
echo "Verifying critical file protection..."
|
||||
bash -c '
|
||||
source lib/core/common.sh
|
||||
if should_protect_from_uninstall "com.apple.Safari"; then
|
||||
echo "✓ Safari is protected"
|
||||
else
|
||||
echo "✗ Safari protection failed"
|
||||
exit 1
|
||||
fi
|
||||
'
|
||||
|
||||
- name: Check for secrets
|
||||
run: |
|
||||
echo "Checking for hardcoded secrets..."
|
||||
matches=$(grep -r "password\|secret\|api_key" --include="*.sh" . \
|
||||
| grep -v "# \|test" \
|
||||
| grep -v -E "lib/core/sudo\.sh|lib/core/app_protection\.sh|lib/clean/user\.sh|lib/clean/brew\.sh" || true)
|
||||
if [[ -n "$matches" ]]; then
|
||||
echo "$matches"
|
||||
echo "✗ Potential secrets found"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ No secrets found"
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -43,6 +43,7 @@ temp/
|
||||
# AI Assistant Instructions
|
||||
.claude/
|
||||
.gemini/
|
||||
.kiro/
|
||||
CLAUDE.md
|
||||
GEMINI.md
|
||||
.cursorrules
|
||||
@@ -51,8 +52,11 @@ GEMINI.md
|
||||
cmd/analyze/analyze
|
||||
cmd/status/status
|
||||
/status
|
||||
/analyze
|
||||
mole-analyze
|
||||
# Note: bin/analyze-go and bin/status-go are released binaries and should be tracked
|
||||
bin/analyze-darwin-*
|
||||
bin/status-darwin-*
|
||||
|
||||
# Swift / Xcode
|
||||
.build/
|
||||
|
||||
157
AGENT.md
157
AGENT.md
@@ -1,130 +1,49 @@
|
||||
# Mole AI Agent Documentation
|
||||
# Mole AI Agent Notes
|
||||
|
||||
> **READ THIS FIRST**: This file serves as the single source of truth for any AI agent trying to work on the Mole repository. It aggregates architectural context, development workflows, and behavioral guidelines.
|
||||
Use this file as the single source of truth for how to work on Mole.
|
||||
|
||||
## 1. Philosophy & Guidelines
|
||||
## Principles
|
||||
|
||||
### Core Philosophy
|
||||
- Safety first: never risk user data or system stability.
|
||||
- Never run destructive operations that could break the user's machine.
|
||||
- Do not delete user-important files; cleanup must be conservative and reversible.
|
||||
- Always use `safe_*` helpers (no raw `rm -rf`).
|
||||
- Keep changes small and confirm uncertain behavior.
|
||||
- Follow the local code style in the file you are editing (Bash 3.2 compatible).
|
||||
- Comments must be English, concise, and intent-focused.
|
||||
- Use comments for safety boundaries, non-obvious logic, or flow context.
|
||||
- Entry scripts start with ~3 short lines describing purpose/behavior.
|
||||
- Shell code must use shell-only helpers (no Python).
|
||||
- Go code must use Go-only helpers (no Python).
|
||||
- Do not remove installer flags `--prefix`/`--config` (update flow depends on them).
|
||||
- Do not commit or submit code changes unless explicitly requested.
|
||||
- You may use `gh` to access GitHub information when needed.
|
||||
|
||||
- **Safety First**: Never risk user data. Always use `safe_*` wrappers. When in doubt, ask.
|
||||
- **Incremental Progress**: Break complex tasks into manageable stages.
|
||||
- **Clear Intent**: Prioritize readability and maintainability over clever hacks.
|
||||
- **Native Performance**: Use Go for heavy lifting (scanning), Bash for system glue.
|
||||
## Architecture
|
||||
|
||||
### Eight Honors and Eight Shames
|
||||
- `mole`: main CLI entrypoint (menu + command routing).
|
||||
- `mo`: CLI alias wrapper.
|
||||
- `install.sh`: manual installer/updater (download/build + install).
|
||||
- `bin/`: command entry points (`clean.sh`, `uninstall.sh`, `optimize.sh`, `purge.sh`, `touchid.sh`,
|
||||
`analyze.sh`, `status.sh`).
|
||||
- `lib/`: shell logic (`core/`, `clean/`, `ui/`).
|
||||
- `cmd/`: Go apps (`analyze/`, `status/`).
|
||||
- `scripts/`: build/test helpers.
|
||||
- `tests/`: BATS integration tests.
|
||||
|
||||
- **Shame** in guessing APIs, **Honor** in careful research.
|
||||
- **Shame** in vague execution, **Honor** in seeking confirmation.
|
||||
- **Shame** in assuming business logic, **Honor** in human verification.
|
||||
- **Shame** in creating interfaces, **Honor** in reusing existing ones.
|
||||
- **Shame** in skipping validation, **Honor** in proactive testing.
|
||||
- **Shame** in breaking architecture, **Honor** in following specifications.
|
||||
- **Shame** in pretending to understand, **Honor** in honest ignorance.
|
||||
- **Shame** in blind modification, **Honor** in careful refactoring.
|
||||
## Workflow
|
||||
|
||||
### Quality Standards
|
||||
- Shell work: add logic under `lib/`, call from `bin/`.
|
||||
- Go work: edit `cmd/<app>/*.go`.
|
||||
- Prefer dry-run modes while validating cleanup behavior.
|
||||
|
||||
- **English Only**: Comments and code must be in English.
|
||||
- **No Unnecessary Comments**: Code should be self-explanatory.
|
||||
- **Pure Shell Style**: Use `[[ ]]` over `[ ]`, avoid `local var` assignments on definition line if exit code matters.
|
||||
- **Go Formatting**: Always run `gofmt` (or let the build script do it).
|
||||
## Build & Test
|
||||
|
||||
## 2. Project Identity
|
||||
- `./scripts/test.sh` runs unit/go/integration tests.
|
||||
- `make build` builds Go binaries for local development.
|
||||
- `go run ./cmd/analyze` for dev runs without building.
|
||||
|
||||
- **Name**: Mole
|
||||
- **Purpose**: A lightweight, robust macOS cleanup and system analysis tool.
|
||||
- **Core Value**: Native, fast, safe, and dependency-free (pure Bash + static Go binary).
|
||||
- **Mechanism**:
|
||||
- **Cleaning**: Pure Bash scripts for transparency and safety.
|
||||
- **Analysis**: High-concurrency Go TUI (Bubble Tea) for disk scanning.
|
||||
- **Monitoring**: Real-time Go TUI for system status.
|
||||
## Key Behaviors
|
||||
|
||||
## 3. Technology Stack
|
||||
|
||||
- **Shell**: Bash 3.2+ (macOS default compatible).
|
||||
- **Go**: Latest Stable (Bubble Tea framework).
|
||||
- **Testing**:
|
||||
- **Shell**: `bats-core`, `shellcheck`.
|
||||
- **Go**: Native `testing` package.
|
||||
|
||||
## 4. Repository Architecture
|
||||
|
||||
### Directory Structure
|
||||
|
||||
- **`bin/`**: Standalone entry points.
|
||||
- `mole`: Main CLI wrapper.
|
||||
- `clean.sh`, `uninstall.sh`: Logic wrappers calling `lib/`.
|
||||
- **`cmd/`**: Go applications.
|
||||
- `analyze/`: Disk space analyzer (concurrent, TUI).
|
||||
- `status/`: System monitor (TUI).
|
||||
- **`lib/`**: Core Shell Logic.
|
||||
- `core/`: Low-level utilities (logging, `safe_remove`, sudo helpers).
|
||||
- `clean/`: Domain-specific cleanup tasks (`brew`, `caches`, `system`).
|
||||
- `ui/`: Reusable TUI components (`menu_paginated.sh`).
|
||||
- **`scripts/`**: Development tools (`run-tests.sh`, `build-analyze.sh`).
|
||||
- **`tests/`**: BATS integration tests.
|
||||
|
||||
## 5. Key Workflows
|
||||
|
||||
### Development
|
||||
|
||||
1. **Understand**: Read `lib/core/` to know what tools are available.
|
||||
2. **Implement**:
|
||||
- For Shell: Add functions to `lib/`, source them in `bin/`.
|
||||
- For Go: Edit `cmd/app/*.go`.
|
||||
3. **Verify**: Use dry-run modes first.
|
||||
|
||||
**Commands**:
|
||||
|
||||
- `./scripts/run-tests.sh`: **Run EVERYTHING** (Lint, Syntax, Unit, Go).
|
||||
- `./bin/clean.sh --dry-run`: Test cleanup logic safely.
|
||||
- `go run ./cmd/analyze`: Run analyzer in dev mode.
|
||||
|
||||
### Building
|
||||
|
||||
- `./scripts/build-analyze.sh`: Compiles `analyze-go` binary (Universal).
|
||||
- `./scripts/build-status.sh`: Compiles `status-go` binary.
|
||||
|
||||
### Release
|
||||
|
||||
- Versions managed via git tags.
|
||||
- Build scripts embed version info into binaries.
|
||||
|
||||
## 6. Implementation Details
|
||||
|
||||
### Safety System (`lib/core/file_ops.sh`)
|
||||
|
||||
- **Crucial**: Never use `rm -rf` directly.
|
||||
- **Use**:
|
||||
- `safe_remove "/path"`
|
||||
- `safe_find_delete "/path" "*.log" 7 "f"`
|
||||
- **Protection**:
|
||||
- `validate_path_for_deletion` prevents root/system deletion.
|
||||
- `checks` ensure path is absolute and safe.
|
||||
|
||||
### Go Concurrency (`cmd/analyze`)
|
||||
|
||||
- **Worker Pool**: Tuned dynamically (16-64 workers) to respect system load.
|
||||
- **Throttling**: UI updates throttled (every 100 items) to keep TUI responsive (80ms tick).
|
||||
- **Memory**: Uses Heaps for top-file tracking to minimize RAM usage.
|
||||
|
||||
### TUI Unification
|
||||
|
||||
- **Keybindings**: `j/k` (Nav), `space` (Select), `enter` (Action), `R` (Refresh).
|
||||
- **Style**: Compact footers ` | ` and standard colors defined in `lib/core/base.sh` or Go constants.
|
||||
|
||||
## 7. Common AI Tasks
|
||||
|
||||
- **Adding a Cleanup Task**:
|
||||
1. Create/Edit `lib/clean/topic.sh`.
|
||||
2. Define `clean_topic()`.
|
||||
3. Register in `lib/optimize/tasks.sh` or `bin/clean.sh`.
|
||||
4. **MUST** use `safe_*` functions.
|
||||
- **Modifying Go UI**:
|
||||
1. Update `model` struct in `main.go`.
|
||||
2. Update `View()` in `view.go`.
|
||||
3. Run `./scripts/build-analyze.sh` to test.
|
||||
- **Fixing a Bug**:
|
||||
1. Reproduce with a new BATS test in `tests/`.
|
||||
2. Fix logic.
|
||||
3. Verify with `./scripts/run-tests.sh`.
|
||||
- `mole update` uses `install.sh` with `--prefix`/`--config`; keep these flags.
|
||||
- Cleanup must go through `safe_*` and respect protection lists.
|
||||
|
||||
@@ -9,26 +9,16 @@ brew install shfmt shellcheck bats-core
|
||||
|
||||
## Development
|
||||
|
||||
Run all quality checks before committing:
|
||||
Run quality checks before committing (auto-formats code):
|
||||
|
||||
```bash
|
||||
./scripts/check.sh
|
||||
```
|
||||
|
||||
This command runs:
|
||||
|
||||
- Code formatting check
|
||||
- ShellCheck linting
|
||||
- Unit tests
|
||||
|
||||
Individual commands:
|
||||
Run tests:
|
||||
|
||||
```bash
|
||||
# Format code
|
||||
./scripts/format.sh
|
||||
|
||||
# Run tests only
|
||||
./tests/run.sh
|
||||
./scripts/test.sh
|
||||
```
|
||||
|
||||
## Code Style
|
||||
@@ -54,8 +44,8 @@ Config: `.editorconfig` and `.shellcheckrc`
|
||||
# Single file/directory
|
||||
safe_remove "/path/to/file"
|
||||
|
||||
# Batch delete with find
|
||||
safe_find_delete "$dir" "*.log" 7 "f" # files older than 7 days
|
||||
# Purge files older than 7 days
|
||||
safe_find_delete "$dir" "*.log" 7 "f"
|
||||
|
||||
# With sudo
|
||||
safe_sudo_remove "/Library/Caches/com.example"
|
||||
@@ -137,7 +127,7 @@ Format: `[MODULE_NAME] message` output to stderr.
|
||||
- macOS 10.14 or newer, works on Intel and Apple Silicon
|
||||
- Default macOS Bash 3.2+ plus administrator privileges for cleanup tasks
|
||||
- Install Command Line Tools with `xcode-select --install` for curl, tar, and related utilities
|
||||
- Go 1.24+ required when building the `mo status` or `mo analyze` TUI binaries locally
|
||||
- Go 1.24+ is required to build the `mo status` or `mo analyze` TUI binaries locally.
|
||||
|
||||
## Go Components
|
||||
|
||||
@@ -154,14 +144,28 @@ Format: `[MODULE_NAME] message` output to stderr.
|
||||
- Format code with `gofmt -w ./cmd/...`
|
||||
- Run `go vet ./cmd/...` to check for issues
|
||||
- Build with `go build ./...` to verify all packages compile
|
||||
- Build universal binaries via `./scripts/build-status.sh` and `./scripts/build-analyze.sh`
|
||||
|
||||
**Building Go Binaries:**
|
||||
|
||||
For local development:
|
||||
|
||||
```bash
|
||||
# Build binaries for current architecture
|
||||
make build
|
||||
|
||||
# Or run directly without building
|
||||
go run ./cmd/analyze
|
||||
go run ./cmd/status
|
||||
```
|
||||
|
||||
For releases, GitHub Actions builds architecture-specific binaries automatically.
|
||||
|
||||
**Guidelines:**
|
||||
|
||||
- Keep files focused on single responsibility
|
||||
- Extract constants instead of magic numbers
|
||||
- Use context for timeout control on external commands
|
||||
- Add comments explaining why, not what
|
||||
- Add comments explaining **why** something is done, not just **what** is being done.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
|
||||
40
Makefile
Normal file
40
Makefile
Normal file
@@ -0,0 +1,40 @@
|
||||
# Makefile for Mole
|
||||
|
||||
.PHONY: all build clean release
|
||||
|
||||
# Output directory
|
||||
BIN_DIR := bin
|
||||
|
||||
# Binaries
|
||||
ANALYZE := analyze
|
||||
STATUS := status
|
||||
|
||||
# Source directories
|
||||
ANALYZE_SRC := ./cmd/analyze
|
||||
STATUS_SRC := ./cmd/status
|
||||
|
||||
# Build flags
|
||||
LDFLAGS := -s -w
|
||||
|
||||
all: build
|
||||
|
||||
# Local build (current architecture)
|
||||
build:
|
||||
@echo "Building for local architecture..."
|
||||
go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(ANALYZE)-go $(ANALYZE_SRC)
|
||||
go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(STATUS)-go $(STATUS_SRC)
|
||||
|
||||
# Release build targets (run on native architectures for CGO support)
|
||||
release-amd64:
|
||||
@echo "Building release binaries (amd64)..."
|
||||
GOOS=darwin GOARCH=amd64 go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(ANALYZE)-darwin-amd64 $(ANALYZE_SRC)
|
||||
GOOS=darwin GOARCH=amd64 go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(STATUS)-darwin-amd64 $(STATUS_SRC)
|
||||
|
||||
release-arm64:
|
||||
@echo "Building release binaries (arm64)..."
|
||||
GOOS=darwin GOARCH=arm64 go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(ANALYZE)-darwin-arm64 $(ANALYZE_SRC)
|
||||
GOOS=darwin GOARCH=arm64 go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(STATUS)-darwin-arm64 $(STATUS_SRC)
|
||||
|
||||
clean:
|
||||
@echo "Cleaning binaries..."
|
||||
rm -f $(BIN_DIR)/$(ANALYZE)-* $(BIN_DIR)/$(STATUS)-* $(BIN_DIR)/$(ANALYZE)-go $(BIN_DIR)/$(STATUS)-go
|
||||
78
README.md
78
README.md
@@ -18,24 +18,25 @@
|
||||
|
||||
## Features
|
||||
|
||||
- **All-in-one toolkit** combining the power of CleanMyMac, AppCleaner, DaisyDisk, Sensei, and iStat in one **trusted binary**
|
||||
- **Deep cleanup** scans and removes caches, logs, browser leftovers, and junk to **reclaim tens of gigabytes**
|
||||
- **Smart uninstall** completely removes apps including launch agents, preferences, caches, and **hidden leftovers**
|
||||
- **Disk insight + optimization** visualizes usage, handles large files, **rebuilds caches**, cleans swap, and refreshes services
|
||||
- **Live status** monitors CPU, GPU, memory, disk, network, battery, and proxy stats to **diagnose issues**
|
||||
- **Unified toolkit**: Consolidated features of CleanMyMac, AppCleaner, DaisyDisk, and iStat into a **single binary**
|
||||
- **Deep cleaning**: Scans and removes caches, logs, and browser leftovers to **reclaim gigabytes of space**
|
||||
- **Smart uninstaller**: Thoroughly removes apps along with launch agents, preferences, and **hidden remnants**
|
||||
- **Disk insights**: Visualizes usage, manages large files, **rebuilds caches**, and refreshes system services
|
||||
- **Live monitoring**: Real-time stats for CPU, GPU, memory, disk, and network to **diagnose performance issues**
|
||||
|
||||
## Quick Start
|
||||
|
||||
**Installation:**
|
||||
**Install by Brew, recommended:**
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/tw93/mole/main/install.sh | bash
|
||||
brew install mole
|
||||
```
|
||||
|
||||
Or via Homebrew:
|
||||
**or by Script, for older macOS or latest code:**
|
||||
|
||||
```bash
|
||||
brew install tw93/tap/mole
|
||||
# Use for older macOS or latest code; add '-s latest' for newest, or '-s 1.17.0' for a fixed version.
|
||||
curl -fsSL https://raw.githubusercontent.com/tw93/mole/main/install.sh | bash
|
||||
```
|
||||
|
||||
**Run:**
|
||||
@@ -47,18 +48,21 @@ mo uninstall # Remove apps + leftovers
|
||||
mo optimize # Refresh caches & services
|
||||
mo analyze # Visual disk explorer
|
||||
mo status # Live system health dashboard
|
||||
mo purge # Clean project build artifacts
|
||||
|
||||
mo touchid # Configure Touch ID for sudo
|
||||
mo completion # Setup shell tab completion
|
||||
mo update # Update Mole
|
||||
mo remove # Remove Mole from system
|
||||
mo --help # Show help
|
||||
mo --version # Show installed version
|
||||
|
||||
mo clean --dry-run # Preview cleanup plan
|
||||
mo clean --whitelist # Adjust protected caches
|
||||
mo uninstall --force-rescan # Rescan apps and refresh cache
|
||||
mo optimize --whitelist # Adjust protected optimization items
|
||||
mo clean --dry-run # Preview the cleanup plan
|
||||
mo clean --whitelist # Manage protected caches
|
||||
|
||||
mo optimize --dry-run # Preview optimization actions
|
||||
mo optimize --whitelist # Manage protected optimization rules
|
||||
mo purge --paths # Configure project scan directories
|
||||
```
|
||||
|
||||
## Tips
|
||||
@@ -67,6 +71,7 @@ mo optimize --whitelist # Adjust protected optimization items
|
||||
- **Safety**: Built with strict protections. See our [Security Audit](SECURITY_AUDIT.md). Preview changes with `mo clean --dry-run`.
|
||||
- **Whitelist**: Manage protected paths with `mo clean --whitelist`.
|
||||
- **Touch ID**: Enable Touch ID for sudo commands by running `mo touchid`.
|
||||
- **Shell Completion**: Enable tab completion by running `mo completion` (auto-detect and install).
|
||||
- **Navigation**: Supports standard arrow keys and Vim bindings (`h/j/k/l`).
|
||||
- **Debug**: View detailed logs by appending the `--debug` flag (e.g., `mo clean --debug`).
|
||||
|
||||
@@ -181,6 +186,42 @@ Proxy HTTP · 192.168.1.100 Terminal ▮▯▯▯▯ 12.5%
|
||||
|
||||
Health score based on CPU, memory, disk, temperature, and I/O load. Color-coded by range.
|
||||
|
||||
### Project Artifact Purge
|
||||
|
||||
Clean old build artifacts (`node_modules`, `target`, `build`, `dist`, etc.) from your projects to free up disk space.
|
||||
|
||||
```bash
|
||||
mo purge
|
||||
|
||||
Select Categories to Clean - 18.5GB (8 selected)
|
||||
|
||||
➤ ● my-react-app 3.2GB | node_modules
|
||||
● old-project 2.8GB | node_modules
|
||||
● rust-app 4.1GB | target
|
||||
● next-blog 1.9GB | node_modules
|
||||
○ current-work 856MB | node_modules | Recent
|
||||
● django-api 2.3GB | venv
|
||||
● vue-dashboard 1.7GB | node_modules
|
||||
● backend-service 2.5GB | node_modules
|
||||
```
|
||||
|
||||
> **Use with caution:** This will permanently delete selected artifacts. Review carefully before confirming. Recent projects (< 7 days) are marked and unselected by default.
|
||||
|
||||
<details>
|
||||
<summary><strong>Custom Scan Paths</strong></summary>
|
||||
|
||||
Run `mo purge --paths` to configure which directories to scan, or edit `~/.config/mole/purge_paths` directly:
|
||||
|
||||
```shell
|
||||
~/Documents/MyProjects
|
||||
~/Work/ClientA
|
||||
~/Work/ClientB
|
||||
```
|
||||
|
||||
When custom paths are configured, only those directories are scanned. Otherwise, defaults to `~/Projects`, `~/GitHub`, `~/dev`, etc.
|
||||
|
||||
</details>
|
||||
|
||||
## Quick Launchers
|
||||
|
||||
Launch Mole commands instantly from Raycast or Alfred:
|
||||
@@ -189,7 +230,15 @@ Launch Mole commands instantly from Raycast or Alfred:
|
||||
curl -fsSL https://raw.githubusercontent.com/tw93/Mole/main/scripts/setup-quick-launchers.sh | bash
|
||||
```
|
||||
|
||||
Adds 5 commands: `clean`, `uninstall`, `optimize`, `analyze`, `status`. Finds your terminal automatically or set `MO_LAUNCHER_APP=<name>` to override. For Raycast, search "Reload Script Directories" to load new commands.
|
||||
Adds 5 commands: `clean`, `uninstall`, `optimize`, `analyze`, `status`. Mole automatically detects your terminal, or you can set `MO_LAUNCHER_APP=<name>` to override. For Raycast, if this is your first script directory, add it in Raycast Extensions (Add Script Directory) and then run "Reload Script Directories" to load the new commands.
|
||||
|
||||
## Community Love
|
||||
|
||||
<p align="center">
|
||||
<img src="https://cdn.tw93.fun/pic/lovemole.jpeg" alt="Community feedback on Mole" width="800" />
|
||||
</p>
|
||||
|
||||
Users from around the world are loving Mole! Join the community and share your experience.
|
||||
|
||||
## Support
|
||||
|
||||
@@ -197,7 +246,6 @@ Adds 5 commands: `clean`, `uninstall`, `optimize`, `analyze`, `status`. Finds yo
|
||||
|
||||
- If Mole saved you space, consider starring the repo or sharing it with friends who need a cleaner Mac.
|
||||
- Have ideas or fixes? Open an issue or PR to help shape Mole's future with the community.
|
||||
|
||||
- Love cats? Treat Tangyuan and Cola to canned food via <a href="https://miaoyan.app/cats.html?name=Mole" target="_blank">this link</a> to keep our mascots purring.
|
||||
|
||||
## License
|
||||
|
||||
@@ -1,100 +1,359 @@
|
||||
# Mole Security Audit Report
|
||||
|
||||
**Date:** December 14, 2025
|
||||
<div align="center">
|
||||
|
||||
**Audited Version:** Current `main` branch (V1.12.25)
|
||||
**Security Audit & Compliance Report**
|
||||
|
||||
**Status:** Passed
|
||||
Version 1.17.0 | December 31, 2025
|
||||
|
||||
## Security Philosophy: "Do No Harm"
|
||||
---
|
||||
|
||||
Mole is designed with a **Zero Trust** architecture regarding file operations. Every request to modify the filesystem is treated as potentially dangerous until strictly validated. Our primary directive is to prioritize system stability over aggressive cleaning—we would rather leave 1GB of junk than delete 1KB of critical user data.
|
||||
**Audit Status:** PASSED | **Risk Level:** LOW
|
||||
|
||||
## 1. Multi-Layered Defense Architecture (Automated Core)
|
||||
</div>
|
||||
|
||||
Mole's automated shell-based operations (Clean, Optimize, Uninstall) do not execute raw commands directly. All operations pass through a hardened middleware layer (`lib/core/file_ops.sh`).
|
||||
---
|
||||
|
||||
- **Layer 1: Input Sanitization**
|
||||
Before any operation reaches the execution stage, the target path is sanitized:
|
||||
- **Absolute Path Enforcement**: Relative paths (e.g., `../foo`) are strictly rejected to prevent path traversal attacks.
|
||||
- **Control Character Filtering**: Paths containing hidden control characters or newlines are blocked.
|
||||
- **Empty Variable Protection**: Guards against shell scripting errors where an empty variable could result in `rm -rf /`.
|
||||
## Table of Contents
|
||||
|
||||
- **Layer 2: The "Iron Dome" (Path Validation)**
|
||||
A centralized validation logic explicitly blocks operations on critical system hierarchies within the shell core, even with `sudo` privileges:
|
||||
- `/` (Root)
|
||||
- `/System` and `/System/*`
|
||||
- `/bin`, `/sbin`, `/usr`, `/usr/bin`, `/usr/sbin`
|
||||
- `/etc`, `/var`
|
||||
- `/Library/Extensions`
|
||||
1. [Audit Overview](#audit-overview)
|
||||
2. [Security Philosophy](#security-philosophy)
|
||||
3. [Threat Model](#threat-model)
|
||||
4. [Defense Architecture](#defense-architecture)
|
||||
5. [Safety Mechanisms](#safety-mechanisms)
|
||||
6. [User Controls](#user-controls)
|
||||
7. [Testing & Compliance](#testing--compliance)
|
||||
8. [Dependencies](#dependencies)
|
||||
|
||||
- **Layer 3: Symlink Failsafe**
|
||||
For privileged (`sudo`) operations, Mole performs a pre-flight check to verify if the target is a **Symbolic Link**.
|
||||
- **Risk**: A malicious or accidental symlink could point from a cache folder to a system file.
|
||||
- **Defense**: Mole explicitly refuses to recursively delete symbolic links in privileged mode.
|
||||
---
|
||||
|
||||
## 2. Interactive Analyzer Safety (Go Architecture)
|
||||
## Audit Overview
|
||||
|
||||
The interactive analyzer (`mo analyze`) operates on a different security model focused on manual user control:
|
||||
| Attribute | Details |
|
||||
|-----------|---------|
|
||||
| Audit Date | December 31, 2025 |
|
||||
| Audit Conclusion | **PASSED** |
|
||||
| Mole Version | V1.17.0 |
|
||||
| Audited Branch | `main` (HEAD) |
|
||||
| Scope | Shell scripts, Go binaries, Configuration |
|
||||
| Methodology | Static analysis, Threat modeling, Code review |
|
||||
| Review Cycle | Every 6 months or after major feature additions |
|
||||
| Next Review | June 2026 |
|
||||
|
||||
- **Standard User Permissions**: The tool runs with the invoking user's standard permissions. It respects macOS System Integrity Protection (SIP) and filesystem permissions.
|
||||
- **Manual Confirmation**: Deletions are not automated; they require explicit user selection and confirmation.
|
||||
- **OS-Level Enforcement**: Unlike the automated scripts, the analyzer relies on the operating system's built-in protections (e.g., inability to delete `/System` due to Read-Only Volume or SIP) rather than a hardcoded application-level blocklist.
|
||||
**Key Findings:**
|
||||
|
||||
## 3. Conservative Cleaning Logic
|
||||
- Multi-layered validation prevents critical system modifications
|
||||
- Conservative cleaning logic with 60-day dormancy rules
|
||||
- Comprehensive protection for VPN, AI tools, and system components
|
||||
- Atomic operations with crash recovery mechanisms
|
||||
- Full user control with dry-run and whitelist capabilities
|
||||
|
||||
Mole's "Smart Uninstall" and orphan detection (`lib/clean/apps.sh`) are intentionally conservative:
|
||||
---
|
||||
|
||||
- **Orphaned Data: The "60-Day Rule"**
|
||||
1. **Verification**: An app is confirmed "uninstalled" only if it is completely missing from `/Applications`, `~/Applications`, and `/System/Applications`.
|
||||
2. **Dormancy Check**: Associated data folders are only flagged for removal if they have not been modified for **at least 60 days**.
|
||||
3. **Vendor Whitelist**: A hardcoded whitelist protects shared resources from major vendors (Adobe, Microsoft, Google, etc.) to prevent breaking software suites.
|
||||
## Security Philosophy
|
||||
|
||||
- **Active Uninstallation Heuristics**
|
||||
When a user explicitly selects an app for uninstallation, Mole employs advanced heuristics to find scattered remnants (e.g., "Visual Studio Code" -> `~/.vscode`, `~/Library/Application Support/VisualStudioCode`).
|
||||
- **Sanitized Name Matching**: We search for app name variations to catch non-standard folder naming.
|
||||
- **Safety Constraints**: Fuzzy matching and sanitized name searches are **strictly disabled** for app names shorter than 3 characters to prevent false positives.
|
||||
- **System Scope**: Mole scans specific system-level directories (`/Library/LaunchAgents`, etc.) for related components.
|
||||
**Core Principle: "Do No Harm"**
|
||||
|
||||
- **System Integrity Protection (SIP) Awareness**
|
||||
Mole respects macOS SIP. It detects if SIP is enabled and automatically skips protected directories (like `/Library/Updates`) to avoid triggering permission errors.
|
||||
Mole operates under a **Zero Trust** architecture for all filesystem operations. Every modification request is treated as potentially dangerous until passing strict validation.
|
||||
|
||||
- **Time Machine Preservation**
|
||||
Before cleaning failed backups, Mole checks for the `backupd` process. If a backup is currently running, the cleanup task is strictly **aborted** to prevent data corruption.
|
||||
**Guiding Priorities:**
|
||||
|
||||
- **VPN & Proxy Protection**
|
||||
Mole includes a comprehensive protection layer for VPN and Proxy applications (e.g., Shadowsocks, V2Ray, Tailscale). It protects both their application bundles and data directories from automated cleanup to prevent network configuration loss.
|
||||
1. **System Stability First** - Prefer leaving 1GB of junk over deleting 1KB of critical data
|
||||
2. **Conservative by Default** - Require explicit user confirmation for high-risk operations
|
||||
3. **Fail Safe** - When in doubt, abort rather than proceed
|
||||
4. **Transparency** - All operations are logged and can be previewed via dry-run mode
|
||||
|
||||
- **AI & LLM Data Protection (New in v1.12.25)**
|
||||
Mole now explicitly protects data for AI tools (Cursor, Claude, ChatGPT, Ollama, LM Studio, etc.). Both the automated cleaning logic (`bin/clean.sh`) and orphan detection (`lib/core/app_protection.sh`) exclude these applications to prevent loss of:
|
||||
- Local LLM models (which can be gigabytes in size).
|
||||
- Authentication tokens and session states.
|
||||
- Chat history and local configurations.
|
||||
---
|
||||
|
||||
## 4. Atomic Operations & Crash Safety
|
||||
## Threat Model
|
||||
|
||||
We anticipate that scripts can be interrupted (e.g., power loss, `Ctrl+C`).
|
||||
### Attack Vectors & Mitigations
|
||||
|
||||
- **Network Interface Reset**: Wi-Fi and AirDrop resets use **atomic execution blocks**.
|
||||
- **Swap Clearing**: Swap files are reset by securely restarting the `dynamic_pager` daemon. We intentionally avoid manual `rm` operations on swap files to prevent instability during high memory pressure.
|
||||
| Threat | Risk Level | Mitigation | Status |
|
||||
|--------|------------|------------|--------|
|
||||
| Accidental System File Deletion | Critical | Multi-layer path validation, system directory blocklist | Mitigated |
|
||||
| Path Traversal Attack | High | Absolute path enforcement, relative path rejection | Mitigated |
|
||||
| Symlink Exploitation | High | Symlink detection in privileged mode | Mitigated |
|
||||
| Command Injection | High | Control character filtering, strict validation | Mitigated |
|
||||
| Empty Variable Deletion | High | Empty path validation, defensive checks | Mitigated |
|
||||
| Race Conditions | Medium | Atomic operations, process isolation | Mitigated |
|
||||
| Network Mount Hangs | Medium | Timeout protection, volume type detection | Mitigated |
|
||||
| Privilege Escalation | Medium | Restricted sudo scope, user home validation | Mitigated |
|
||||
| False Positive Deletion | Medium | 3-char minimum, fuzzy matching disabled | Mitigated |
|
||||
| VPN Configuration Loss | Medium | Comprehensive VPN/proxy whitelist | Mitigated |
|
||||
|
||||
## 5. User Control & Transparency
|
||||
---
|
||||
|
||||
- **Dry-Run Mode (`--dry-run`)**: Simulates the entire cleanup process, listing every single file and byte that *would* be removed, without touching the disk.
|
||||
- **Custom Whitelists**: Users can define their own immutable paths in `~/.config/mole/whitelist`.
|
||||
## Defense Architecture
|
||||
|
||||
## 6. Dependency Audit
|
||||
### Multi-Layered Validation System
|
||||
|
||||
- **System Binaries (Shell Core)**
|
||||
Mole relies on standard, battle-tested macOS binaries for critical tasks:
|
||||
- `plutil`: Used to validate `.plist` integrity.
|
||||
- `tmutil`: Used for safe interaction with Time Machine.
|
||||
- `dscacheutil`: Used for system-compliant cache rebuilding.
|
||||
All automated operations pass through hardened middleware (`lib/core/file_ops.sh`) with 4 validation layers:
|
||||
|
||||
- **Go Dependencies (Interactive Tools)**
|
||||
The compiled Go binary (`analyze-go`) includes the following libraries:
|
||||
- `bubbletea` & `lipgloss`: UI framework (Charm).
|
||||
- `gopsutil`: System metrics collection.
|
||||
- `xxhash`: Efficient hashing.
|
||||
#### Layer 1: Input Sanitization
|
||||
|
||||
*This document certifies that Mole's architecture implements industry-standard defensive programming practices to ensure the safety and integrity of your Mac.*
|
||||
| Control | Protection Against |
|
||||
|---------|---------------------|
|
||||
| Absolute Path Enforcement | Path traversal attacks (`../etc`) |
|
||||
| Control Character Filtering | Command injection (`\n`, `\r`, `\0`) |
|
||||
| Empty Variable Protection | Accidental `rm -rf /` |
|
||||
| Secure Temp Workspaces | Data leakage, race conditions |
|
||||
|
||||
**Code:** `lib/core/file_ops.sh:validate_path_for_deletion()`
|
||||
|
||||
#### Layer 2: System Path Protection ("Iron Dome")
|
||||
|
||||
Even with `sudo`, these paths are **unconditionally blocked**:
|
||||
|
||||
```bash
|
||||
/ # Root filesystem
|
||||
/System # macOS system files
|
||||
/bin, /sbin, /usr # Core binaries
|
||||
/etc, /var # System configuration
|
||||
/Library/Extensions # Kernel extensions
|
||||
```
|
||||
|
||||
**Exception:** `/System/Library/Caches/com.apple.coresymbolicationd/data` (safe, rebuildable cache)
|
||||
|
||||
**Code:** `lib/core/file_ops.sh:60-78`
|
||||
|
||||
#### Layer 3: Symlink Detection
|
||||
|
||||
For privileged operations, pre-flight checks prevent symlink-based attacks:
|
||||
|
||||
- Detects symlinks pointing from cache folders to system files
|
||||
- Refuses recursive deletion of symbolic links in sudo mode
|
||||
- Validates real path vs symlink target
|
||||
|
||||
**Code:** `lib/core/file_ops.sh:safe_sudo_recursive_delete()`
|
||||
|
||||
#### Layer 4: Permission Management
|
||||
|
||||
When running with `sudo`:
|
||||
|
||||
- Auto-corrects ownership back to user (`chown -R`)
|
||||
- Operations restricted to user's home directory
|
||||
- Multiple validation checkpoints
|
||||
|
||||
### Interactive Analyzer (Go)
|
||||
|
||||
The analyzer (`mo analyze`) uses a different security model:
|
||||
|
||||
- Runs with standard user permissions only
|
||||
- Respects macOS System Integrity Protection (SIP)
|
||||
- All deletions require explicit user confirmation
|
||||
- OS-level enforcement (cannot delete `/System` due to Read-Only Volume)
|
||||
|
||||
**Code:** `cmd/analyze/*.go`
|
||||
|
||||
---
|
||||
|
||||
## Safety Mechanisms
|
||||
|
||||
### Conservative Cleaning Logic
|
||||
|
||||
#### The "60-Day Rule" for Orphaned Data
|
||||
|
||||
| Step | Verification | Criterion |
|
||||
|------|--------------|-----------|
|
||||
| 1. App Check | All installation locations | Must be missing from `/Applications`, `~/Applications`, `/System/Applications` |
|
||||
| 2. Dormancy | Modification timestamps | Untouched for ≥60 days |
|
||||
| 3. Vendor Whitelist | Cross-reference database | Adobe, Microsoft, Google resources protected |
|
||||
|
||||
**Code:** `lib/clean/apps.sh:orphan_detection()`
|
||||
|
||||
#### Active Uninstallation Heuristics
|
||||
|
||||
For user-selected app removal:
|
||||
|
||||
- **Sanitized Name Matching:** "Visual Studio Code" → `VisualStudioCode`, `.vscode`
|
||||
- **Safety Limit:** 3-char minimum (prevents "Go" matching "Google")
|
||||
- **Disabled:** Fuzzy matching, wildcard expansion for short names
|
||||
- **User Confirmation:** Required before deletion
|
||||
|
||||
**Code:** `lib/clean/apps.sh:uninstall_app()`
|
||||
|
||||
#### System Protection Policies
|
||||
|
||||
| Protected Category | Scope | Reason |
|
||||
|--------------------|-------|--------|
|
||||
| System Integrity Protection | `/Library/Updates`, `/System/*` | Respects macOS Read-Only Volume |
|
||||
| Spotlight & System UI | `~/Library/Metadata/CoreSpotlight` | Prevents UI corruption |
|
||||
| System Components | Control Center, System Settings, TCC | Centralized detection via `is_critical_system_component()` |
|
||||
| Time Machine | Local snapshots, backups | Checks `backupd` process, aborts if active |
|
||||
| VPN & Proxy | Shadowsocks, V2Ray, Tailscale, Clash | Protects network configs |
|
||||
| AI & LLM Tools | Cursor, Claude, ChatGPT, Ollama, LM Studio | Protects models, tokens, sessions |
|
||||
| Startup Items | `com.apple.*` LaunchAgents/Daemons | System items unconditionally skipped |
|
||||
|
||||
**Orphaned Helper Cleanup (`opt_startup_items_cleanup`):**
|
||||
|
||||
Removes LaunchAgents/Daemons whose associated app has been uninstalled:
|
||||
|
||||
- Checks `AssociatedBundleIdentifiers` to detect orphans
|
||||
- Skips all `com.apple.*` system items
|
||||
- Skips paths under `/System/*`, `/usr/bin/*`, `/usr/lib/*`, `/usr/sbin/*`, `/Library/Apple/*`
|
||||
- Uses `safe_remove` / `safe_sudo_remove` with path validation
|
||||
- Unloads service via `launchctl` before deletion
|
||||
- `mdfind` operations have 10-second timeout protection
|
||||
|
||||
**Code:** `lib/optimize/tasks.sh:opt_startup_items_cleanup()`
|
||||
|
||||
### Crash Safety & Atomic Operations
|
||||
|
||||
| Operation | Safety Mechanism | Recovery Behavior |
|
||||
|-----------|------------------|-------------------|
|
||||
| Network Interface Reset | Atomic execution blocks | Wi-Fi/AirDrop restored to pre-operation state |
|
||||
| Swap Clearing | Daemon restart | `dynamic_pager` handles recovery safely |
|
||||
| Volume Scanning | Timeout + filesystem check | Auto-skip unresponsive NFS/SMB/AFP mounts |
|
||||
| Homebrew Cache | Pre-flight size check | Skip if <50MB (avoids 30-120s delay) |
|
||||
| Network Volume Check | `diskutil info` with timeout | Prevents hangs on slow/dead mounts |
|
||||
| SQLite Vacuum | App-running check + 20s timeout | Skips if Mail/Safari/Messages running |
|
||||
| dyld Cache Update | 24-hour freshness check + 180s timeout | Skips if recently updated |
|
||||
| App Bundle Search | 10s timeout on mdfind | Fallback to standard paths |
|
||||
|
||||
**Timeout Example:**
|
||||
|
||||
```bash
|
||||
run_with_timeout 5 diskutil info "$mount_point" || skip_volume
|
||||
```
|
||||
|
||||
**Code:** `lib/core/base.sh:run_with_timeout()`, `lib/optimize/*.sh`
|
||||
|
||||
---
|
||||
|
||||
## User Controls
|
||||
|
||||
### Dry-Run Mode
|
||||
|
||||
**Command:** `mo clean --dry-run` | `mo optimize --dry-run`
|
||||
|
||||
**Behavior:**
|
||||
|
||||
- Simulates entire operation without filesystem modifications
|
||||
- Lists every file/directory that **would** be deleted
|
||||
- Calculates total space that **would** be freed
|
||||
- Zero risk - no actual deletion commands executed
|
||||
|
||||
### Custom Whitelists
|
||||
|
||||
**File:** `~/.config/mole/whitelist`
|
||||
|
||||
**Format:**
|
||||
|
||||
```bash
|
||||
# One path per line - exact matches only
|
||||
/Users/username/important-cache
|
||||
~/Library/Application Support/CriticalApp
|
||||
```
|
||||
|
||||
- Paths are **unconditionally protected**
|
||||
- Applies to all operations (clean, optimize, uninstall)
|
||||
- Supports absolute paths and `~` expansion
|
||||
|
||||
**Code:** `lib/core/file_ops.sh:is_whitelisted()`
|
||||
|
||||
### Interactive Confirmations
|
||||
|
||||
Required for:
|
||||
|
||||
- Uninstalling system-scope applications
|
||||
- Removing large data directories (>1GB)
|
||||
- Deleting items from shared vendor folders
|
||||
|
||||
---
|
||||
|
||||
## Testing & Compliance
|
||||
|
||||
### Test Coverage
|
||||
|
||||
Mole uses **BATS (Bash Automated Testing System)** for automated testing.
|
||||
|
||||
| Test Category | Coverage | Key Tests |
|
||||
|---------------|----------|-----------|
|
||||
| Core File Operations | 95% | Path validation, symlink detection, permissions |
|
||||
| Cleaning Logic | 87% | Orphan detection, 60-day rule, vendor whitelist |
|
||||
| Optimization | 82% | Cache cleanup, timeouts |
|
||||
| System Maintenance | 90% | Time Machine, network volumes, crash recovery |
|
||||
| Security Controls | 100% | Path traversal, command injection, symlinks |
|
||||
|
||||
**Total:** 180+ tests | **Overall Coverage:** ~88%
|
||||
|
||||
**Test Execution:**
|
||||
|
||||
```bash
|
||||
bats tests/ # Run all tests
|
||||
bats tests/security.bats # Run specific suite
|
||||
```
|
||||
|
||||
### Standards Compliance
|
||||
|
||||
| Standard | Implementation |
|
||||
|----------|----------------|
|
||||
| OWASP Secure Coding | Input validation, least privilege, defense-in-depth |
|
||||
| CWE-22 (Path Traversal) | Absolute path enforcement, `../` rejection |
|
||||
| CWE-78 (Command Injection) | Control character filtering |
|
||||
| CWE-59 (Link Following) | Symlink detection before privileged operations |
|
||||
| Apple File System Guidelines | Respects SIP, Read-Only Volumes, TCC |
|
||||
|
||||
### Security Development Lifecycle
|
||||
|
||||
- **Static Analysis:** shellcheck for all shell scripts
|
||||
- **Code Review:** All changes reviewed by maintainers
|
||||
- **Dependency Scanning:** Minimal external dependencies, all vetted
|
||||
|
||||
### Known Limitations
|
||||
|
||||
| Limitation | Impact | Mitigation |
|
||||
|------------|--------|------------|
|
||||
| Requires `sudo` for system caches | Initial friction | Clear documentation |
|
||||
| 60-day rule may delay cleanup | Some orphans remain longer | Manual `mo uninstall` available |
|
||||
| No undo functionality | Deleted files unrecoverable | Dry-run mode, warnings |
|
||||
| English-only name matching | May miss non-English apps | Bundle ID fallback |
|
||||
|
||||
**Intentionally Out of Scope (Safety):**
|
||||
|
||||
- Automatic deletion of user documents/media
|
||||
- Encryption key stores or password managers
|
||||
- System configuration files (`/etc/*`)
|
||||
- Browser history or cookies
|
||||
- Git repository cleanup
|
||||
|
||||
---
|
||||
|
||||
## Dependencies
|
||||
|
||||
### System Binaries
|
||||
|
||||
Mole relies on standard macOS system binaries (all SIP-protected):
|
||||
|
||||
| Binary | Purpose | Fallback |
|
||||
|--------|---------|----------|
|
||||
| `plutil` | Validate `.plist` integrity | Skip invalid plists |
|
||||
| `tmutil` | Time Machine interaction | Skip TM cleanup |
|
||||
| `dscacheutil` | System cache rebuilding | Optional optimization |
|
||||
| `diskutil` | Volume information | Skip network volumes |
|
||||
|
||||
### Go Dependencies (Interactive Tools)
|
||||
|
||||
The compiled Go binary (`analyze-go`) includes:
|
||||
|
||||
| Library | Version | Purpose | License |
|
||||
|---------|---------|---------|---------|
|
||||
| `bubbletea` | v0.23+ | TUI framework | MIT |
|
||||
| `lipgloss` | v0.6+ | Terminal styling | MIT |
|
||||
| `gopsutil` | v3.22+ | System metrics | BSD-3 |
|
||||
| `xxhash` | v2.2+ | Fast hashing | BSD-2 |
|
||||
|
||||
**Supply Chain Security:**
|
||||
|
||||
- All dependencies pinned to specific versions
|
||||
- Regular security audits
|
||||
- No transitive dependencies with known CVEs
|
||||
- **Automated Releases**: Binaries compiled via GitHub Actions and signed
|
||||
- **Source Only**: Repository contains no pre-compiled binaries
|
||||
|
||||
---
|
||||
|
||||
**Certification:** This security audit certifies that Mole implements industry-standard defensive programming practices and adheres to macOS security guidelines. The architecture prioritizes system stability and data integrity over aggressive optimization.
|
||||
|
||||
*For security concerns or vulnerability reports, please contact the maintainers via GitHub Issues.*
|
||||
|
||||
BIN
bin/analyze-go
BIN
bin/analyze-go
Binary file not shown.
@@ -1,5 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Entry point for the Go-based disk analyzer binary bundled with Mole.
|
||||
# Mole - Analyze command.
|
||||
# Runs the Go disk analyzer UI.
|
||||
# Uses bundled analyze-go binary.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
@@ -16,13 +16,20 @@ source "$SCRIPT_DIR/lib/manage/autofix.sh"
|
||||
source "$SCRIPT_DIR/lib/check/all.sh"
|
||||
|
||||
cleanup_all() {
|
||||
stop_inline_spinner 2> /dev/null || true
|
||||
stop_sudo_session
|
||||
cleanup_temp_files
|
||||
}
|
||||
|
||||
handle_interrupt() {
|
||||
cleanup_all
|
||||
exit 130
|
||||
}
|
||||
|
||||
main() {
|
||||
# Register unified cleanup handler
|
||||
trap cleanup_all EXIT INT TERM
|
||||
trap cleanup_all EXIT
|
||||
trap handle_interrupt INT TERM
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
clear
|
||||
|
||||
717
bin/clean.sh
717
bin/clean.sh
File diff suppressed because it is too large
Load Diff
250
bin/completion.sh
Executable file
250
bin/completion.sh
Executable file
@@ -0,0 +1,250 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
source "$ROOT_DIR/lib/core/common.sh"
|
||||
source "$ROOT_DIR/lib/core/commands.sh"
|
||||
|
||||
command_names=()
|
||||
for entry in "${MOLE_COMMANDS[@]}"; do
|
||||
command_names+=("${entry%%:*}")
|
||||
done
|
||||
command_words="${command_names[*]}"
|
||||
|
||||
emit_zsh_subcommands() {
|
||||
for entry in "${MOLE_COMMANDS[@]}"; do
|
||||
printf " '%s:%s'\n" "${entry%%:*}" "${entry#*:}"
|
||||
done
|
||||
}
|
||||
|
||||
emit_fish_completions() {
|
||||
local cmd="$1"
|
||||
for entry in "${MOLE_COMMANDS[@]}"; do
|
||||
local name="${entry%%:*}"
|
||||
local desc="${entry#*:}"
|
||||
printf 'complete -c %s -n "__fish_mole_no_subcommand" -a %s -d "%s"\n' "$cmd" "$name" "$desc"
|
||||
done
|
||||
|
||||
printf '\n'
|
||||
printf 'complete -c %s -n "not __fish_mole_no_subcommand" -a bash -d "generate bash completion" -n "__fish_see_subcommand_path completion"\n' "$cmd"
|
||||
printf 'complete -c %s -n "not __fish_mole_no_subcommand" -a zsh -d "generate zsh completion" -n "__fish_see_subcommand_path completion"\n' "$cmd"
|
||||
printf 'complete -c %s -n "not __fish_mole_no_subcommand" -a fish -d "generate fish completion" -n "__fish_see_subcommand_path completion"\n' "$cmd"
|
||||
}
|
||||
|
||||
# Auto-install mode when run without arguments
|
||||
if [[ $# -eq 0 ]]; then
|
||||
# Detect current shell
|
||||
current_shell="${SHELL##*/}"
|
||||
if [[ -z "$current_shell" ]]; then
|
||||
current_shell="$(ps -p "$PPID" -o comm= 2> /dev/null | awk '{print $1}')"
|
||||
fi
|
||||
|
||||
completion_name=""
|
||||
if command -v mole > /dev/null 2>&1; then
|
||||
completion_name="mole"
|
||||
elif command -v mo > /dev/null 2>&1; then
|
||||
completion_name="mo"
|
||||
fi
|
||||
|
||||
case "$current_shell" in
|
||||
bash)
|
||||
config_file="${HOME}/.bashrc"
|
||||
[[ -f "${HOME}/.bash_profile" ]] && config_file="${HOME}/.bash_profile"
|
||||
# shellcheck disable=SC2016
|
||||
completion_line='if output="$('"$completion_name"' completion bash 2>/dev/null)"; then eval "$output"; fi'
|
||||
;;
|
||||
zsh)
|
||||
config_file="${HOME}/.zshrc"
|
||||
# shellcheck disable=SC2016
|
||||
completion_line='if output="$('"$completion_name"' completion zsh 2>/dev/null)"; then eval "$output"; fi'
|
||||
;;
|
||||
fish)
|
||||
config_file="${HOME}/.config/fish/config.fish"
|
||||
# shellcheck disable=SC2016
|
||||
completion_line='set -l output ('"$completion_name"' completion fish 2>/dev/null); and echo "$output" | source'
|
||||
;;
|
||||
*)
|
||||
log_error "Unsupported shell: $current_shell"
|
||||
echo " mole completion <bash|zsh|fish>"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -z "$completion_name" ]]; then
|
||||
if [[ -f "$config_file" ]] && grep -Eq "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" 2> /dev/null; then
|
||||
original_mode=""
|
||||
original_mode="$(stat -f '%Mp%Lp' "$config_file" 2> /dev/null || true)"
|
||||
temp_file="$(mktemp)"
|
||||
grep -Ev "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" > "$temp_file" || true
|
||||
mv "$temp_file" "$config_file"
|
||||
if [[ -n "$original_mode" ]]; then
|
||||
chmod "$original_mode" "$config_file" 2> /dev/null || true
|
||||
fi
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Removed stale completion entries from $config_file"
|
||||
echo ""
|
||||
fi
|
||||
log_error "mole not found in PATH - install Mole before enabling completion"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if already installed and normalize to latest line
|
||||
if [[ -f "$config_file" ]] && grep -Eq "(mole|mo)[[:space:]]+completion" "$config_file" 2> /dev/null; then
|
||||
original_mode=""
|
||||
original_mode="$(stat -f '%Mp%Lp' "$config_file" 2> /dev/null || true)"
|
||||
temp_file="$(mktemp)"
|
||||
grep -Ev "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" > "$temp_file" || true
|
||||
mv "$temp_file" "$config_file"
|
||||
if [[ -n "$original_mode" ]]; then
|
||||
chmod "$original_mode" "$config_file" 2> /dev/null || true
|
||||
fi
|
||||
{
|
||||
echo ""
|
||||
echo "# Mole shell completion"
|
||||
echo "$completion_line"
|
||||
} >> "$config_file"
|
||||
echo ""
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Shell completion updated in $config_file"
|
||||
echo ""
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Prompt user for installation
|
||||
echo ""
|
||||
echo -e "${GRAY}Will add to ${config_file}:${NC}"
|
||||
echo " $completion_line"
|
||||
echo ""
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Enable completion for ${GREEN}${current_shell}${NC}? ${GRAY}Enter confirm / Q cancel${NC}: "
|
||||
IFS= read -r -s -n1 key || key=""
|
||||
drain_pending_input
|
||||
echo ""
|
||||
|
||||
case "$key" in
|
||||
$'\e' | [Qq] | [Nn])
|
||||
echo -e "${YELLOW}Cancelled${NC}"
|
||||
exit 0
|
||||
;;
|
||||
"" | $'\n' | $'\r' | [Yy]) ;;
|
||||
*)
|
||||
log_error "Invalid key"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Create config file if it doesn't exist
|
||||
if [[ ! -f "$config_file" ]]; then
|
||||
mkdir -p "$(dirname "$config_file")"
|
||||
touch "$config_file"
|
||||
fi
|
||||
|
||||
# Remove previous Mole completion lines to avoid duplicates
|
||||
if [[ -f "$config_file" ]]; then
|
||||
original_mode=""
|
||||
original_mode="$(stat -f '%Mp%Lp' "$config_file" 2> /dev/null || true)"
|
||||
temp_file="$(mktemp)"
|
||||
grep -Ev "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" > "$temp_file" || true
|
||||
mv "$temp_file" "$config_file"
|
||||
if [[ -n "$original_mode" ]]; then
|
||||
chmod "$original_mode" "$config_file" 2> /dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Add completion line
|
||||
{
|
||||
echo ""
|
||||
echo "# Mole shell completion"
|
||||
echo "$completion_line"
|
||||
} >> "$config_file"
|
||||
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Completion added to $config_file"
|
||||
echo ""
|
||||
echo ""
|
||||
echo -e "${GRAY}To activate now:${NC}"
|
||||
echo -e " ${GREEN}source $config_file${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
case "$1" in
|
||||
bash)
|
||||
cat << EOF
|
||||
_mole_completions()
|
||||
{
|
||||
local cur_word prev_word
|
||||
cur_word="\${COMP_WORDS[\$COMP_CWORD]}"
|
||||
prev_word="\${COMP_WORDS[\$COMP_CWORD-1]}"
|
||||
|
||||
if [ "\$COMP_CWORD" -eq 1 ]; then
|
||||
COMPREPLY=( \$(compgen -W "$command_words" -- "\$cur_word") )
|
||||
else
|
||||
case "\$prev_word" in
|
||||
completion)
|
||||
COMPREPLY=( \$(compgen -W "bash zsh fish" -- "\$cur_word") )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=()
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
complete -F _mole_completions mole mo
|
||||
EOF
|
||||
;;
|
||||
zsh)
|
||||
printf '#compdef mole mo\n\n'
|
||||
printf '_mole() {\n'
|
||||
printf ' local -a subcommands\n'
|
||||
printf ' subcommands=(\n'
|
||||
emit_zsh_subcommands
|
||||
printf ' )\n'
|
||||
printf " _describe 'subcommand' subcommands\n"
|
||||
printf '}\n\n'
|
||||
;;
|
||||
fish)
|
||||
printf '# Completions for mole\n'
|
||||
emit_fish_completions mole
|
||||
printf '\n# Completions for mo (alias)\n'
|
||||
emit_fish_completions mo
|
||||
printf '\nfunction __fish_mole_no_subcommand\n'
|
||||
printf ' for i in (commandline -opc)\n'
|
||||
# shellcheck disable=SC2016
|
||||
printf ' if contains -- $i %s\n' "$command_words"
|
||||
printf ' return 1\n'
|
||||
printf ' end\n'
|
||||
printf ' end\n'
|
||||
printf ' return 0\n'
|
||||
printf 'end\n\n'
|
||||
printf 'function __fish_see_subcommand_path\n'
|
||||
printf ' string match -q -- "completion" (commandline -opc)[1]\n'
|
||||
printf 'end\n'
|
||||
;;
|
||||
*)
|
||||
cat << 'EOF'
|
||||
Usage: mole completion [bash|zsh|fish]
|
||||
|
||||
Setup shell tab completion for mole and mo commands.
|
||||
|
||||
Auto-install:
|
||||
mole completion # Auto-detect shell and install
|
||||
|
||||
Manual install:
|
||||
mole completion bash # Generate bash completion script
|
||||
mole completion zsh # Generate zsh completion script
|
||||
mole completion fish # Generate fish completion script
|
||||
|
||||
Examples:
|
||||
# Auto-install (recommended)
|
||||
mole completion
|
||||
|
||||
# Manual install - Bash
|
||||
eval "$(mole completion bash)"
|
||||
|
||||
# Manual install - Zsh
|
||||
eval "$(mole completion zsh)"
|
||||
|
||||
# Manual install - Fish
|
||||
mole completion fish | source
|
||||
EOF
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
229
bin/optimize.sh
229
bin/optimize.sh
@@ -1,72 +1,66 @@
|
||||
#!/bin/bash
|
||||
# Mole - Optimize command.
|
||||
# Runs system maintenance checks and fixes.
|
||||
# Supports dry-run where applicable.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Fix locale issues (Issue #83)
|
||||
# Fix locale issues.
|
||||
export LC_ALL=C
|
||||
export LANG=C
|
||||
|
||||
# Load common functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
source "$SCRIPT_DIR/lib/core/common.sh"
|
||||
|
||||
# Clean temp files on exit.
|
||||
trap cleanup_temp_files EXIT INT TERM
|
||||
source "$SCRIPT_DIR/lib/core/sudo.sh"
|
||||
source "$SCRIPT_DIR/lib/manage/update.sh"
|
||||
source "$SCRIPT_DIR/lib/manage/autofix.sh"
|
||||
source "$SCRIPT_DIR/lib/optimize/maintenance.sh"
|
||||
source "$SCRIPT_DIR/lib/optimize/tasks.sh"
|
||||
source "$SCRIPT_DIR/lib/check/health_json.sh"
|
||||
|
||||
# Load check modules
|
||||
source "$SCRIPT_DIR/lib/check/all.sh"
|
||||
source "$SCRIPT_DIR/lib/manage/whitelist.sh"
|
||||
|
||||
# Colors and icons from common.sh
|
||||
|
||||
print_header() {
|
||||
printf '\n'
|
||||
echo -e "${PURPLE_BOLD}Optimize and Check${NC}"
|
||||
}
|
||||
|
||||
# System check functions (real-time display)
|
||||
run_system_checks() {
|
||||
# Skip checks in dry-run mode.
|
||||
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
unset AUTO_FIX_SUMMARY AUTO_FIX_DETAILS
|
||||
echo ""
|
||||
echo -e "${PURPLE_BOLD}System Check${NC}"
|
||||
unset MOLE_SECURITY_FIXES_SHOWN
|
||||
unset MOLE_SECURITY_FIXES_SKIPPED
|
||||
echo ""
|
||||
|
||||
# Check updates - real-time display
|
||||
echo -e "${BLUE}${ICON_ARROW}${NC} System updates"
|
||||
check_all_updates
|
||||
echo ""
|
||||
|
||||
# Check health - real-time display
|
||||
echo -e "${BLUE}${ICON_ARROW}${NC} System health"
|
||||
check_system_health
|
||||
echo ""
|
||||
|
||||
# Check security - real-time display
|
||||
echo -e "${BLUE}${ICON_ARROW}${NC} Security posture"
|
||||
check_all_security
|
||||
if ask_for_security_fixes; then
|
||||
perform_security_fixes
|
||||
fi
|
||||
echo ""
|
||||
if [[ "${MOLE_SECURITY_FIXES_SKIPPED:-}" != "true" ]]; then
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Check configuration - real-time display
|
||||
echo -e "${BLUE}${ICON_ARROW}${NC} Configuration"
|
||||
check_all_config
|
||||
echo ""
|
||||
|
||||
# Show suggestions
|
||||
show_suggestions
|
||||
echo ""
|
||||
|
||||
# Ask about updates first
|
||||
if ask_for_updates; then
|
||||
perform_updates
|
||||
fi
|
||||
|
||||
# Ask about auto-fix
|
||||
if ask_for_auto_fix; then
|
||||
perform_auto_fix
|
||||
fi
|
||||
@@ -78,39 +72,40 @@ show_optimization_summary() {
|
||||
if ((safe_count == 0 && confirm_count == 0)) && [[ -z "${AUTO_FIX_SUMMARY:-}" ]]; then
|
||||
return
|
||||
fi
|
||||
local summary_title="Optimization and Check Complete"
|
||||
|
||||
local summary_title
|
||||
local -a summary_details=()
|
||||
local total_applied=$((safe_count + confirm_count))
|
||||
|
||||
# Optimization results
|
||||
summary_details+=("Optimizations: ${GREEN}${safe_count}${NC} applied, ${YELLOW}${confirm_count}${NC} manual checks")
|
||||
summary_details+=("Caches refreshed; services restarted; system tuned")
|
||||
summary_details+=("Updates & security reviewed across system")
|
||||
|
||||
local summary_line4=""
|
||||
if [[ -n "${AUTO_FIX_SUMMARY:-}" ]]; then
|
||||
summary_line4="${AUTO_FIX_SUMMARY}"
|
||||
if [[ -n "${AUTO_FIX_DETAILS:-}" ]]; then
|
||||
local detail_join
|
||||
detail_join=$(echo "${AUTO_FIX_DETAILS}" | paste -sd ", " -)
|
||||
[[ -n "$detail_join" ]] && summary_line4+=" — ${detail_join}"
|
||||
fi
|
||||
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
|
||||
summary_title="Dry Run Complete - No Changes Made"
|
||||
summary_details+=("Would apply ${YELLOW}${total_applied:-0}${NC} optimizations")
|
||||
summary_details+=("Run without ${YELLOW}--dry-run${NC} to apply these changes")
|
||||
else
|
||||
summary_line4="Mac should feel faster and more responsive"
|
||||
fi
|
||||
summary_details+=("$summary_line4")
|
||||
summary_title="Optimization and Check Complete"
|
||||
summary_details+=("Applied ${GREEN}${total_applied:-0}${NC} optimizations; all system services tuned")
|
||||
summary_details+=("Updates, security and system health fully reviewed")
|
||||
|
||||
if [[ -n "${AUTO_FIX_SUMMARY:-}" ]]; then
|
||||
summary_details+=("$AUTO_FIX_SUMMARY")
|
||||
local summary_line4=""
|
||||
if [[ -n "${AUTO_FIX_SUMMARY:-}" ]]; then
|
||||
summary_line4="${AUTO_FIX_SUMMARY}"
|
||||
if [[ -n "${AUTO_FIX_DETAILS:-}" ]]; then
|
||||
local detail_join
|
||||
detail_join=$(echo "${AUTO_FIX_DETAILS}" | paste -sd ", " -)
|
||||
[[ -n "$detail_join" ]] && summary_line4+=" — ${detail_join}"
|
||||
fi
|
||||
else
|
||||
summary_line4="Your Mac is now faster and more responsive"
|
||||
fi
|
||||
summary_details+=("$summary_line4")
|
||||
fi
|
||||
|
||||
# Fix: Ensure summary is always printed for optimizations
|
||||
print_summary_block "$summary_title" "${summary_details[@]}"
|
||||
}
|
||||
|
||||
show_system_health() {
|
||||
local health_json="$1"
|
||||
|
||||
# Parse system health using jq with fallback to 0
|
||||
local mem_used=$(echo "$health_json" | jq -r '.memory_used_gb // 0' 2> /dev/null || echo "0")
|
||||
local mem_total=$(echo "$health_json" | jq -r '.memory_total_gb // 0' 2> /dev/null || echo "0")
|
||||
local disk_used=$(echo "$health_json" | jq -r '.disk_used_gb // 0' 2> /dev/null || echo "0")
|
||||
@@ -118,7 +113,6 @@ show_system_health() {
|
||||
local disk_percent=$(echo "$health_json" | jq -r '.disk_used_percent // 0' 2> /dev/null || echo "0")
|
||||
local uptime=$(echo "$health_json" | jq -r '.uptime_days // 0' 2> /dev/null || echo "0")
|
||||
|
||||
# Ensure all values are numeric (fallback to 0)
|
||||
mem_used=${mem_used:-0}
|
||||
mem_total=${mem_total:-0}
|
||||
disk_used=${disk_used:-0}
|
||||
@@ -126,15 +120,12 @@ show_system_health() {
|
||||
disk_percent=${disk_percent:-0}
|
||||
uptime=${uptime:-0}
|
||||
|
||||
# Compact one-line format with icon
|
||||
printf "${ICON_ADMIN} System %.0f/%.0f GB RAM | %.0f/%.0f GB Disk | Uptime %.0fd\n" \
|
||||
"$mem_used" "$mem_total" "$disk_used" "$disk_total" "$uptime"
|
||||
}
|
||||
|
||||
parse_optimizations() {
|
||||
local health_json="$1"
|
||||
|
||||
# Extract optimizations array
|
||||
echo "$health_json" | jq -c '.optimizations[]' 2> /dev/null
|
||||
}
|
||||
|
||||
@@ -143,23 +134,12 @@ announce_action() {
|
||||
local desc="$2"
|
||||
local kind="$3"
|
||||
|
||||
local badge=""
|
||||
if [[ "$kind" == "confirm" ]]; then
|
||||
badge="${YELLOW}[Confirm]${NC} "
|
||||
fi
|
||||
|
||||
local line="${BLUE}${ICON_ARROW}${NC} ${badge}${name}"
|
||||
if [[ -n "$desc" ]]; then
|
||||
line+=" ${GRAY}- ${desc}${NC}"
|
||||
fi
|
||||
|
||||
if ${first_heading:-true}; then
|
||||
first_heading=false
|
||||
if [[ "${FIRST_ACTION:-true}" == "true" ]]; then
|
||||
export FIRST_ACTION=false
|
||||
else
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo -e "$line"
|
||||
echo -e "${BLUE}${ICON_ARROW} ${name}${NC}"
|
||||
}
|
||||
|
||||
touchid_configured() {
|
||||
@@ -169,9 +149,16 @@ touchid_configured() {
|
||||
|
||||
touchid_supported() {
|
||||
if command -v bioutil > /dev/null 2>&1; then
|
||||
bioutil -r 2> /dev/null | grep -q "Touch ID" && return 0
|
||||
if bioutil -r 2> /dev/null | grep -qi "Touch ID"; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
[[ "$(uname -m)" == "arm64" ]]
|
||||
|
||||
# Fallback: Apple Silicon Macs usually have Touch ID.
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
cleanup_path() {
|
||||
@@ -183,6 +170,10 @@ cleanup_path() {
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $label"
|
||||
return
|
||||
fi
|
||||
if should_protect_path "$expanded_path"; then
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} Protected $label"
|
||||
return
|
||||
fi
|
||||
|
||||
local size_kb
|
||||
size_kb=$(get_path_size_kb "$expanded_path")
|
||||
@@ -214,23 +205,7 @@ cleanup_path() {
|
||||
ensure_directory() {
|
||||
local raw_path="$1"
|
||||
local expanded_path="${raw_path/#\~/$HOME}"
|
||||
mkdir -p "$expanded_path" > /dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
count_local_snapshots() {
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
echo 0
|
||||
return
|
||||
fi
|
||||
|
||||
local output
|
||||
output=$(tmutil listlocalsnapshots / 2> /dev/null || true)
|
||||
if [[ -z "$output" ]]; then
|
||||
echo 0
|
||||
return
|
||||
fi
|
||||
|
||||
echo "$output" | grep -c "com.apple.TimeMachine." | tr -d ' '
|
||||
ensure_user_dir "$expanded_path"
|
||||
}
|
||||
|
||||
declare -a SECURITY_FIXES=()
|
||||
@@ -248,7 +223,7 @@ collect_security_fix_actions() {
|
||||
fi
|
||||
fi
|
||||
if touchid_supported && ! touchid_configured; then
|
||||
if ! is_whitelisted "touchid"; then
|
||||
if ! is_whitelisted "check_touchid"; then
|
||||
SECURITY_FIXES+=("touchid|Enable Touch ID for sudo")
|
||||
fi
|
||||
fi
|
||||
@@ -261,35 +236,37 @@ ask_for_security_fixes() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${BLUE}SECURITY FIXES${NC}"
|
||||
for entry in "${SECURITY_FIXES[@]}"; do
|
||||
IFS='|' read -r _ label <<< "$entry"
|
||||
echo -e " ${ICON_LIST} $label"
|
||||
done
|
||||
echo ""
|
||||
echo -ne "${YELLOW}Apply now?${NC} ${GRAY}Enter confirm / ESC cancel${NC}: "
|
||||
export MOLE_SECURITY_FIXES_SHOWN=true
|
||||
echo -ne "${YELLOW}Apply now?${NC} ${GRAY}Enter confirm / Space cancel${NC}: "
|
||||
|
||||
local key
|
||||
if ! key=$(read_key); then
|
||||
echo "skip"
|
||||
export MOLE_SECURITY_FIXES_SKIPPED=true
|
||||
echo -e "\n ${GRAY}${ICON_WARNING}${NC} Security fixes skipped"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$key" == "ENTER" ]]; then
|
||||
echo "apply"
|
||||
echo ""
|
||||
return 0
|
||||
else
|
||||
echo "skip"
|
||||
export MOLE_SECURITY_FIXES_SKIPPED=true
|
||||
echo -e "\n ${GRAY}${ICON_WARNING}${NC} Security fixes skipped"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
apply_firewall_fix() {
|
||||
if sudo defaults write /Library/Preferences/com.apple.alf globalstate -int 1; then
|
||||
sudo pkill -HUP socketfilterfw 2> /dev/null || true
|
||||
if sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate on > /dev/null 2>&1; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Firewall enabled"
|
||||
FIREWALL_DISABLED=false
|
||||
return 0
|
||||
@@ -344,18 +321,26 @@ perform_security_fixes() {
|
||||
}
|
||||
|
||||
cleanup_all() {
|
||||
stop_inline_spinner 2> /dev/null || true
|
||||
stop_sudo_session
|
||||
cleanup_temp_files
|
||||
}
|
||||
|
||||
handle_interrupt() {
|
||||
cleanup_all
|
||||
exit 130
|
||||
}
|
||||
|
||||
main() {
|
||||
local health_json # Declare health_json at the top of main scope
|
||||
# Parse args
|
||||
local health_json
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
"--debug")
|
||||
export MO_DEBUG=1
|
||||
;;
|
||||
"--dry-run")
|
||||
export MOLE_DRY_RUN=1
|
||||
;;
|
||||
"--whitelist")
|
||||
manage_whitelist "optimize"
|
||||
exit 0
|
||||
@@ -363,28 +348,31 @@ main() {
|
||||
esac
|
||||
done
|
||||
|
||||
# Register unified cleanup handler
|
||||
trap cleanup_all EXIT INT TERM
|
||||
trap cleanup_all EXIT
|
||||
trap handle_interrupt INT TERM
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
clear
|
||||
fi
|
||||
print_header # Outputs "Optimize and Check"
|
||||
print_header
|
||||
|
||||
# Dry-run indicator.
|
||||
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
|
||||
echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC} - No files will be modified\n"
|
||||
fi
|
||||
|
||||
# Check dependencies
|
||||
if ! command -v jq > /dev/null 2>&1; then
|
||||
echo -e "${RED}${ICON_ERROR}${NC} Missing dependency: jq"
|
||||
echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: jq"
|
||||
echo -e "${GRAY}Install with: ${GREEN}brew install jq${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v bc > /dev/null 2>&1; then
|
||||
echo -e "${RED}${ICON_ERROR}${NC} Missing dependency: bc"
|
||||
echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: bc"
|
||||
echo -e "${GRAY}Install with: ${GREEN}brew install bc${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Collect system health data (doesn't require sudo)
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Collecting system info..."
|
||||
fi
|
||||
@@ -398,7 +386,6 @@ main() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate JSON before proceeding
|
||||
if ! echo "$health_json" | jq empty 2> /dev/null; then
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
@@ -413,13 +400,9 @@ main() {
|
||||
stop_inline_spinner
|
||||
fi
|
||||
|
||||
# Show system health
|
||||
show_system_health "$health_json" # Outputs "⚙ System ..."
|
||||
show_system_health "$health_json"
|
||||
|
||||
# Load whitelist patterns for checks
|
||||
load_whitelist "optimize"
|
||||
|
||||
# Display active whitelist patterns
|
||||
if [[ ${#CURRENT_WHITELIST_PATTERNS[@]} -gt 0 ]]; then
|
||||
local count=${#CURRENT_WHITELIST_PATTERNS[@]}
|
||||
if [[ $count -le 3 ]]; then
|
||||
@@ -428,37 +411,11 @@ main() {
|
||||
echo "${CURRENT_WHITELIST_PATTERNS[*]}"
|
||||
)
|
||||
echo -e "${ICON_ADMIN} Active Whitelist: ${patterns_list}"
|
||||
else
|
||||
echo -e "${ICON_ADMIN} Active Whitelist: ${GRAY}${count} items${NC}"
|
||||
fi
|
||||
fi
|
||||
echo "" # Empty line before sudo prompt
|
||||
|
||||
# Simple confirmation
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Optimization needs sudo — ${GREEN}Enter${NC} continue, ${GRAY}ESC${NC} cancel: "
|
||||
|
||||
local key
|
||||
if ! key=$(read_key); then
|
||||
echo -e " ${GRAY}Cancelled${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "$key" == "ENTER" ]]; then
|
||||
printf "\r\033[K"
|
||||
else
|
||||
echo -e " ${GRAY}Cancelled${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
|
||||
# Parse and display optimizations
|
||||
local -a safe_items=()
|
||||
local -a confirm_items=()
|
||||
|
||||
# Use temp file instead of process substitution to avoid hanging
|
||||
local opts_file
|
||||
opts_file=$(mktemp_file)
|
||||
parse_optimizations "$health_json" > "$opts_file"
|
||||
@@ -481,12 +438,12 @@ main() {
|
||||
fi
|
||||
done < "$opts_file"
|
||||
|
||||
# Execute all optimizations
|
||||
local first_heading=true
|
||||
echo ""
|
||||
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
|
||||
ensure_sudo_session "System optimization requires admin access" || true
|
||||
fi
|
||||
|
||||
ensure_sudo_session "System optimization requires admin access" || true
|
||||
|
||||
# Run safe optimizations
|
||||
export FIRST_ACTION=true
|
||||
if [[ ${#safe_items[@]} -gt 0 ]]; then
|
||||
for item in "${safe_items[@]}"; do
|
||||
IFS='|' read -r name desc action path <<< "$item"
|
||||
@@ -495,7 +452,6 @@ main() {
|
||||
done
|
||||
fi
|
||||
|
||||
# Run confirm items
|
||||
if [[ ${#confirm_items[@]} -gt 0 ]]; then
|
||||
for item in "${confirm_items[@]}"; do
|
||||
IFS='|' read -r name desc action path <<< "$item"
|
||||
@@ -504,17 +460,14 @@ main() {
|
||||
done
|
||||
fi
|
||||
|
||||
# Prepare optimization summary data (to show at the end)
|
||||
local safe_count=${#safe_items[@]}
|
||||
local confirm_count=${#confirm_items[@]}
|
||||
|
||||
# Run system checks first
|
||||
run_system_checks
|
||||
|
||||
export OPTIMIZE_SAFE_COUNT=$safe_count
|
||||
export OPTIMIZE_CONFIRM_COUNT=$confirm_count
|
||||
|
||||
# Show optimization summary at the end
|
||||
show_optimization_summary
|
||||
|
||||
printf '\n'
|
||||
|
||||
166
bin/purge.sh
Executable file
166
bin/purge.sh
Executable file
@@ -0,0 +1,166 @@
|
||||
#!/bin/bash
|
||||
# Mole - Purge command.
|
||||
# Cleans heavy project build artifacts.
|
||||
# Interactive selection by project.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Fix locale issues (avoid Perl warnings on non-English systems)
|
||||
export LC_ALL=C
|
||||
export LANG=C
|
||||
|
||||
# Get script directory and source common functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/../lib/core/common.sh"
|
||||
|
||||
# Set up cleanup trap for temporary files
|
||||
trap cleanup_temp_files EXIT INT TERM
|
||||
source "$SCRIPT_DIR/../lib/core/log.sh"
|
||||
source "$SCRIPT_DIR/../lib/clean/project.sh"
|
||||
|
||||
# Configuration
|
||||
CURRENT_SECTION=""
|
||||
|
||||
# Section management
|
||||
start_section() {
|
||||
local section_name="$1"
|
||||
CURRENT_SECTION="$section_name"
|
||||
printf '\n'
|
||||
echo -e "${BLUE}━━━ ${section_name} ━━━${NC}"
|
||||
}
|
||||
|
||||
end_section() {
|
||||
CURRENT_SECTION=""
|
||||
}
|
||||
|
||||
# Note activity for export list
|
||||
note_activity() {
|
||||
if [[ -n "$CURRENT_SECTION" ]]; then
|
||||
printf '%s\n' "$CURRENT_SECTION" >> "$EXPORT_LIST_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main purge function
|
||||
start_purge() {
|
||||
# Clear screen for better UX
|
||||
if [[ -t 1 ]]; then
|
||||
printf '\033[2J\033[H'
|
||||
fi
|
||||
printf '\n'
|
||||
echo -e "${PURPLE_BOLD}Purge Project Artifacts${NC}"
|
||||
|
||||
# Initialize stats file in user cache directory
|
||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||
ensure_user_dir "$stats_dir"
|
||||
ensure_user_file "$stats_dir/purge_stats"
|
||||
ensure_user_file "$stats_dir/purge_count"
|
||||
echo "0" > "$stats_dir/purge_stats"
|
||||
echo "0" > "$stats_dir/purge_count"
|
||||
}
|
||||
|
||||
# Perform the purge
|
||||
perform_purge() {
|
||||
clean_project_artifacts
|
||||
local exit_code=$?
|
||||
|
||||
# Exit codes:
|
||||
# 0 = success, show summary
|
||||
# 1 = user cancelled
|
||||
# 2 = nothing to clean
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Final summary (matching clean.sh format)
|
||||
echo ""
|
||||
|
||||
local summary_heading="Purge complete"
|
||||
local -a summary_details=()
|
||||
local total_size_cleaned=0
|
||||
local total_items_cleaned=0
|
||||
|
||||
# Read stats from user cache directory
|
||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||
|
||||
if [[ -f "$stats_dir/purge_stats" ]]; then
|
||||
total_size_cleaned=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0")
|
||||
rm -f "$stats_dir/purge_stats"
|
||||
fi
|
||||
|
||||
# Read count
|
||||
if [[ -f "$stats_dir/purge_count" ]]; then
|
||||
total_items_cleaned=$(cat "$stats_dir/purge_count" 2> /dev/null || echo "0")
|
||||
rm -f "$stats_dir/purge_count"
|
||||
fi
|
||||
|
||||
if [[ $total_size_cleaned -gt 0 ]]; then
|
||||
local freed_gb
|
||||
freed_gb=$(echo "$total_size_cleaned" | awk '{printf "%.2f", $1/1024/1024}')
|
||||
|
||||
summary_details+=("Space freed: ${GREEN}${freed_gb}GB${NC}")
|
||||
summary_details+=("Free space now: $(get_free_space)")
|
||||
|
||||
if [[ $total_items_cleaned -gt 0 ]]; then
|
||||
summary_details+=("Items cleaned: $total_items_cleaned")
|
||||
fi
|
||||
else
|
||||
summary_details+=("No old project artifacts to clean.")
|
||||
summary_details+=("Free space now: $(get_free_space)")
|
||||
fi
|
||||
|
||||
print_summary_block "$summary_heading" "${summary_details[@]}"
|
||||
printf '\n'
|
||||
}
|
||||
|
||||
# Show help message
|
||||
show_help() {
|
||||
echo -e "${PURPLE_BOLD}Mole Purge${NC} - Clean old project build artifacts"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Usage:${NC} mo purge [options]"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Options:${NC}"
|
||||
echo " --paths Edit custom scan directories"
|
||||
echo " --debug Enable debug logging"
|
||||
echo " --help Show this help message"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Default Paths:${NC}"
|
||||
for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do
|
||||
echo " - $path"
|
||||
done
|
||||
}
|
||||
|
||||
# Main entry point
|
||||
main() {
|
||||
# Set up signal handling
|
||||
trap 'show_cursor; exit 130' INT TERM
|
||||
|
||||
# Parse arguments
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
"--paths")
|
||||
source "$SCRIPT_DIR/../lib/manage/purge_paths.sh"
|
||||
manage_purge_paths
|
||||
exit 0
|
||||
;;
|
||||
"--help")
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
"--debug")
|
||||
export MO_DEBUG=1
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $arg"
|
||||
echo "Use 'mo purge --help' for usage information"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
start_purge
|
||||
hide_cursor
|
||||
perform_purge
|
||||
show_cursor
|
||||
}
|
||||
|
||||
main "$@"
|
||||
BIN
bin/status-go
BIN
bin/status-go
Binary file not shown.
@@ -1,5 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Entry point for the Go-based system status panel bundled with Mole.
|
||||
# Mole - Status command.
|
||||
# Runs the Go system status panel.
|
||||
# Shows live system metrics.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Mole - Touch ID Configuration Helper
|
||||
# Automatically configure Touch ID for sudo
|
||||
# Mole - Touch ID command.
|
||||
# Configures sudo with Touch ID.
|
||||
# Guided toggle with safety checks.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
@@ -109,8 +110,7 @@ enable_touchid() {
|
||||
|
||||
# Apply the changes
|
||||
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Touch ID enabled${NC} ${GRAY}- try: sudo ls${NC}"
|
||||
echo ""
|
||||
log_success "Touch ID enabled - try: sudo ls"
|
||||
return 0
|
||||
else
|
||||
log_error "Failed to enable Touch ID"
|
||||
|
||||
320
bin/uninstall.sh
320
bin/uninstall.sh
@@ -1,166 +1,134 @@
|
||||
#!/bin/bash
|
||||
# Mole - Uninstall Module
|
||||
# Interactive application uninstaller with keyboard navigation
|
||||
#
|
||||
# Usage:
|
||||
# uninstall.sh # Launch interactive uninstaller
|
||||
# uninstall.sh --force-rescan # Rescan apps and refresh cache
|
||||
# Mole - Uninstall command.
|
||||
# Interactive app uninstaller.
|
||||
# Removes app files and leftovers.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Fix locale issues (avoid Perl warnings on non-English systems)
|
||||
# Fix locale issues on non-English systems.
|
||||
export LC_ALL=C
|
||||
export LANG=C
|
||||
|
||||
# Get script directory and source common functions
|
||||
# Load shared helpers.
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/../lib/core/common.sh"
|
||||
|
||||
# Clean temp files on exit.
|
||||
trap cleanup_temp_files EXIT INT TERM
|
||||
source "$SCRIPT_DIR/../lib/ui/menu_paginated.sh"
|
||||
source "$SCRIPT_DIR/../lib/ui/app_selector.sh"
|
||||
source "$SCRIPT_DIR/../lib/uninstall/batch.sh"
|
||||
|
||||
# Note: Bundle preservation logic is now in lib/core/common.sh
|
||||
|
||||
# Initialize global variables
|
||||
selected_apps=() # Global array for app selection
|
||||
# State
|
||||
selected_apps=()
|
||||
declare -a apps_data=()
|
||||
declare -a selection_state=()
|
||||
total_items=0
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
|
||||
# Compact the "last used" descriptor for aligned summaries
|
||||
format_last_used_summary() {
|
||||
local value="$1"
|
||||
|
||||
case "$value" in
|
||||
"" | "Unknown")
|
||||
echo "Unknown"
|
||||
return 0
|
||||
;;
|
||||
"Never" | "Recent" | "Today" | "Yesterday" | "This year" | "Old")
|
||||
echo "$value"
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $value =~ ^([0-9]+)[[:space:]]+days?\ ago$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}d ago"
|
||||
return 0
|
||||
fi
|
||||
if [[ $value =~ ^([0-9]+)[[:space:]]+weeks?\ ago$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}w ago"
|
||||
return 0
|
||||
fi
|
||||
if [[ $value =~ ^([0-9]+)[[:space:]]+months?\ ago$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}m ago"
|
||||
return 0
|
||||
fi
|
||||
if [[ $value =~ ^([0-9]+)[[:space:]]+month\(s\)\ ago$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}m ago"
|
||||
return 0
|
||||
fi
|
||||
if [[ $value =~ ^([0-9]+)[[:space:]]+years?\ ago$ ]]; then
|
||||
echo "${BASH_REMATCH[1]}y ago"
|
||||
return 0
|
||||
fi
|
||||
echo "$value"
|
||||
}
|
||||
|
||||
# Scan applications and collect information
|
||||
# Scan applications and collect information.
|
||||
scan_applications() {
|
||||
# Simplified cache: only check timestamp (24h TTL)
|
||||
# Cache app scan (24h TTL).
|
||||
local cache_dir="$HOME/.cache/mole"
|
||||
local cache_file="$cache_dir/app_scan_cache"
|
||||
local cache_ttl=86400 # 24 hours
|
||||
local force_rescan="${1:-false}"
|
||||
|
||||
mkdir -p "$cache_dir" 2> /dev/null
|
||||
ensure_user_dir "$cache_dir"
|
||||
|
||||
# Check if cache exists and is fresh
|
||||
if [[ $force_rescan == false && -f "$cache_file" ]]; then
|
||||
local cache_age=$(($(date +%s) - $(get_file_mtime "$cache_file")))
|
||||
[[ $cache_age -eq $(date +%s) ]] && cache_age=86401 # Handle missing file
|
||||
[[ $cache_age -eq $(date +%s) ]] && cache_age=86401 # Handle mtime read failure
|
||||
if [[ $cache_age -lt $cache_ttl ]]; then
|
||||
# Cache hit - return immediately
|
||||
# Show brief flash of cache usage if in interactive mode
|
||||
if [[ -t 2 ]]; then
|
||||
echo -e "${GREEN}Loading from cache...${NC}" >&2
|
||||
# Small sleep to let user see it (optional, but good for "feeling" the speed vs glitch)
|
||||
sleep 0.3
|
||||
sleep 0.3 # Brief pause so user sees the message
|
||||
fi
|
||||
echo "$cache_file"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Cache miss - prepare for scanning
|
||||
local inline_loading=false
|
||||
if [[ -t 1 && -t 2 ]]; then
|
||||
inline_loading=true
|
||||
# Clear screen for inline loading
|
||||
printf "\033[2J\033[H" >&2
|
||||
printf "\033[2J\033[H" >&2 # Clear screen for inline loading
|
||||
fi
|
||||
|
||||
local temp_file
|
||||
temp_file=$(create_temp_file)
|
||||
|
||||
# Pre-cache current epoch to avoid repeated calls
|
||||
local current_epoch
|
||||
current_epoch=$(date "+%s")
|
||||
|
||||
# First pass: quickly collect all valid app paths and bundle IDs (NO mdls calls)
|
||||
# Pass 1: collect app paths and bundle IDs (no mdls).
|
||||
local -a app_data_tuples=()
|
||||
while IFS= read -r -d '' app_path; do
|
||||
if [[ ! -e "$app_path" ]]; then continue; fi
|
||||
|
||||
local app_name
|
||||
app_name=$(basename "$app_path" .app)
|
||||
|
||||
# Skip nested apps (e.g. inside Wrapper/ or Frameworks/ of another app)
|
||||
# Check if parent path component ends in .app (e.g. /Foo.app/Bar.app or /Foo.app/Contents/Bar.app)
|
||||
# This prevents false positives like /Old.apps/Target.app
|
||||
local parent_dir
|
||||
parent_dir=$(dirname "$app_path")
|
||||
if [[ "$parent_dir" == *".app" || "$parent_dir" == *".app/"* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Get bundle ID only (fast, no mdls calls in first pass)
|
||||
local bundle_id="unknown"
|
||||
if [[ -f "$app_path/Contents/Info.plist" ]]; then
|
||||
bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown")
|
||||
fi
|
||||
|
||||
# Skip system critical apps (input methods, system components)
|
||||
if should_protect_from_uninstall "$bundle_id"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Store tuple: app_path|app_name|bundle_id (display_name will be resolved in parallel later)
|
||||
app_data_tuples+=("${app_path}|${app_name}|${bundle_id}")
|
||||
done < <(
|
||||
# Scan both system and user application directories
|
||||
# Using maxdepth 3 to find apps in subdirectories (e.g., Adobe apps in /Applications/Adobe X/)
|
||||
command find /Applications -name "*.app" -maxdepth 3 -print0 2> /dev/null
|
||||
command find ~/Applications -name "*.app" -maxdepth 3 -print0 2> /dev/null
|
||||
local -a app_dirs=(
|
||||
"/Applications"
|
||||
"$HOME/Applications"
|
||||
)
|
||||
local vol_app_dir
|
||||
local nullglob_was_set=0
|
||||
shopt -q nullglob && nullglob_was_set=1
|
||||
shopt -s nullglob
|
||||
for vol_app_dir in /Volumes/*/Applications; do
|
||||
[[ -d "$vol_app_dir" && -r "$vol_app_dir" ]] || continue
|
||||
if [[ -d "/Applications" && "$vol_app_dir" -ef "/Applications" ]]; then
|
||||
continue
|
||||
fi
|
||||
if [[ -d "$HOME/Applications" && "$vol_app_dir" -ef "$HOME/Applications" ]]; then
|
||||
continue
|
||||
fi
|
||||
app_dirs+=("$vol_app_dir")
|
||||
done
|
||||
if [[ $nullglob_was_set -eq 0 ]]; then
|
||||
shopt -u nullglob
|
||||
fi
|
||||
|
||||
# Second pass: process each app with parallel size calculation
|
||||
for app_dir in "${app_dirs[@]}"; do
|
||||
if [[ ! -d "$app_dir" ]]; then continue; fi
|
||||
|
||||
while IFS= read -r -d '' app_path; do
|
||||
if [[ ! -e "$app_path" ]]; then continue; fi
|
||||
|
||||
local app_name
|
||||
app_name=$(basename "$app_path" .app)
|
||||
|
||||
# Skip nested apps inside another .app bundle.
|
||||
local parent_dir
|
||||
parent_dir=$(dirname "$app_path")
|
||||
if [[ "$parent_dir" == *".app" || "$parent_dir" == *".app/"* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Bundle ID from plist (fast path).
|
||||
local bundle_id="unknown"
|
||||
if [[ -f "$app_path/Contents/Info.plist" ]]; then
|
||||
bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown")
|
||||
fi
|
||||
|
||||
if should_protect_from_uninstall "$bundle_id"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Store tuple for pass 2 (metadata + size).
|
||||
app_data_tuples+=("${app_path}|${app_name}|${bundle_id}")
|
||||
done < <(command find "$app_dir" -name "*.app" -maxdepth 3 -print0 2> /dev/null)
|
||||
done
|
||||
|
||||
# Pass 2: metadata + size in parallel (mdls is slow).
|
||||
local app_count=0
|
||||
local total_apps=${#app_data_tuples[@]}
|
||||
# Bound parallelism - for metadata queries, can go higher since it's mostly waiting
|
||||
local max_parallel
|
||||
max_parallel=$(get_optimal_parallel_jobs "io")
|
||||
if [[ $max_parallel -lt 8 ]]; then
|
||||
max_parallel=8
|
||||
max_parallel=8 # At least 8 for good performance
|
||||
elif [[ $max_parallel -gt 32 ]]; then
|
||||
max_parallel=32
|
||||
max_parallel=32 # Cap at 32 to avoid too many processes
|
||||
fi
|
||||
local pids=()
|
||||
# inline_loading variable already set above (line ~92)
|
||||
|
||||
# Process app metadata extraction function
|
||||
process_app_metadata() {
|
||||
local app_data_tuple="$1"
|
||||
local output_file="$2"
|
||||
@@ -168,24 +136,26 @@ scan_applications() {
|
||||
|
||||
IFS='|' read -r app_path app_name bundle_id <<< "$app_data_tuple"
|
||||
|
||||
# Get localized display name (moved from first pass for better performance)
|
||||
# Display name priority: mdls display name → bundle display → bundle name → folder.
|
||||
local display_name="$app_name"
|
||||
if [[ -f "$app_path/Contents/Info.plist" ]]; then
|
||||
# Try to get localized name from system metadata (best for i18n)
|
||||
local md_display_name
|
||||
md_display_name=$(run_with_timeout 0.05 mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "")
|
||||
|
||||
# Get bundle names
|
||||
local bundle_display_name
|
||||
bundle_display_name=$(plutil -extract CFBundleDisplayName raw "$app_path/Contents/Info.plist" 2> /dev/null)
|
||||
local bundle_name
|
||||
bundle_name=$(plutil -extract CFBundleName raw "$app_path/Contents/Info.plist" 2> /dev/null)
|
||||
|
||||
# Priority order for name selection (prefer localized names):
|
||||
# 1. System metadata display name (kMDItemDisplayName) - respects system language
|
||||
# 2. CFBundleDisplayName - usually localized
|
||||
# 3. CFBundleName - fallback
|
||||
# 4. App folder name - last resort
|
||||
if [[ "$md_display_name" == /* ]]; then md_display_name=""; fi
|
||||
md_display_name="${md_display_name//|/-}"
|
||||
md_display_name="${md_display_name//[$'\t\r\n']/}"
|
||||
|
||||
bundle_display_name="${bundle_display_name//|/-}"
|
||||
bundle_display_name="${bundle_display_name//[$'\t\r\n']/}"
|
||||
|
||||
bundle_name="${bundle_name//|/-}"
|
||||
bundle_name="${bundle_name//[$'\t\r\n']/}"
|
||||
|
||||
if [[ -n "$md_display_name" && "$md_display_name" != "(null)" && "$md_display_name" != "$app_name" ]]; then
|
||||
display_name="$md_display_name"
|
||||
@@ -196,29 +166,32 @@ scan_applications() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Parallel size calculation
|
||||
if [[ "$display_name" == /* ]]; then
|
||||
display_name="$app_name"
|
||||
fi
|
||||
display_name="${display_name//|/-}"
|
||||
display_name="${display_name//[$'\t\r\n']/}"
|
||||
|
||||
# App size (KB → human).
|
||||
local app_size="N/A"
|
||||
local app_size_kb="0"
|
||||
if [[ -d "$app_path" ]]; then
|
||||
# Get size in KB, then format for display
|
||||
app_size_kb=$(get_path_size_kb "$app_path")
|
||||
app_size=$(bytes_to_human "$((app_size_kb * 1024))")
|
||||
fi
|
||||
|
||||
# Get last used date
|
||||
# Last used: mdls (fast timeout) → mtime.
|
||||
local last_used="Never"
|
||||
local last_used_epoch=0
|
||||
|
||||
if [[ -d "$app_path" ]]; then
|
||||
# Try mdls first with short timeout (0.05s) for accuracy, fallback to mtime for speed
|
||||
local metadata_date
|
||||
metadata_date=$(run_with_timeout 0.05 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
|
||||
metadata_date=$(run_with_timeout 0.1 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
|
||||
|
||||
if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then
|
||||
last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0")
|
||||
fi
|
||||
|
||||
# Fallback if mdls failed or returned nothing
|
||||
if [[ "$last_used_epoch" -eq 0 ]]; then
|
||||
last_used_epoch=$(get_file_mtime "$app_path")
|
||||
fi
|
||||
@@ -245,21 +218,19 @@ scan_applications() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Write to output file atomically
|
||||
# Fields: epoch|app_path|display_name|bundle_id|size_human|last_used|size_kb
|
||||
echo "${last_used_epoch}|${app_path}|${display_name}|${bundle_id}|${app_size}|${last_used}|${app_size_kb}" >> "$output_file"
|
||||
}
|
||||
|
||||
export -f process_app_metadata
|
||||
|
||||
# Create a temporary file to track progress
|
||||
local progress_file="${temp_file}.progress"
|
||||
echo "0" > "$progress_file"
|
||||
|
||||
# Start a background spinner that reads progress from file
|
||||
local spinner_pid=""
|
||||
(
|
||||
trap 'exit 0' TERM INT EXIT
|
||||
# shellcheck disable=SC2329 # Function invoked indirectly via trap
|
||||
cleanup_spinner() { exit 0; }
|
||||
trap cleanup_spinner TERM INT EXIT
|
||||
local spinner_chars="|/-\\"
|
||||
local i=0
|
||||
while true; do
|
||||
@@ -276,30 +247,22 @@ scan_applications() {
|
||||
) &
|
||||
spinner_pid=$!
|
||||
|
||||
# Process apps in parallel batches
|
||||
for app_data_tuple in "${app_data_tuples[@]}"; do
|
||||
((app_count++))
|
||||
|
||||
# Launch background process
|
||||
process_app_metadata "$app_data_tuple" "$temp_file" "$current_epoch" &
|
||||
pids+=($!)
|
||||
|
||||
# Update progress to show scanning progress (use app_count as it increments smoothly)
|
||||
echo "$app_count" > "$progress_file"
|
||||
|
||||
# Wait if we've hit max parallel limit
|
||||
if ((${#pids[@]} >= max_parallel)); then
|
||||
wait "${pids[0]}" 2> /dev/null
|
||||
pids=("${pids[@]:1}") # Remove first pid
|
||||
pids=("${pids[@]:1}")
|
||||
fi
|
||||
done
|
||||
|
||||
# Wait for remaining background processes
|
||||
for pid in "${pids[@]}"; do
|
||||
wait "$pid" 2> /dev/null
|
||||
done
|
||||
|
||||
# Stop the spinner and clear the line
|
||||
if [[ -n "$spinner_pid" ]]; then
|
||||
kill -TERM "$spinner_pid" 2> /dev/null || true
|
||||
wait "$spinner_pid" 2> /dev/null || true
|
||||
@@ -311,15 +274,12 @@ scan_applications() {
|
||||
fi
|
||||
rm -f "$progress_file"
|
||||
|
||||
# Check if we found any applications
|
||||
if [[ ! -s "$temp_file" ]]; then
|
||||
echo "No applications found to uninstall" >&2
|
||||
rm -f "$temp_file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Sort by last used (oldest first) and cache the result
|
||||
# Show brief processing message for large app lists
|
||||
if [[ $total_apps -gt 50 ]]; then
|
||||
if [[ $inline_loading == true ]]; then
|
||||
printf "\033[H\033[2KProcessing %d applications...\n" "$total_apps" >&2
|
||||
@@ -334,7 +294,6 @@ scan_applications() {
|
||||
}
|
||||
rm -f "$temp_file"
|
||||
|
||||
# Clear processing message
|
||||
if [[ $total_apps -gt 50 ]]; then
|
||||
if [[ $inline_loading == true ]]; then
|
||||
printf "\033[H\033[2K" >&2
|
||||
@@ -343,10 +302,9 @@ scan_applications() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Save to cache (simplified - no metadata)
|
||||
ensure_user_file "$cache_file"
|
||||
cp "${temp_file}.sorted" "$cache_file" 2> /dev/null || true
|
||||
|
||||
# Return sorted file
|
||||
if [[ -f "${temp_file}.sorted" ]]; then
|
||||
echo "${temp_file}.sorted"
|
||||
else
|
||||
@@ -354,7 +312,6 @@ scan_applications() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Load applications into arrays
|
||||
load_applications() {
|
||||
local apps_file="$1"
|
||||
|
||||
@@ -363,13 +320,10 @@ load_applications() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Clear arrays
|
||||
apps_data=()
|
||||
selection_state=()
|
||||
|
||||
# Read apps into array, skip non-existent apps
|
||||
while IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb; do
|
||||
# Skip if app path no longer exists
|
||||
[[ ! -e "$app_path" ]] && continue
|
||||
|
||||
apps_data+=("$epoch|$app_path|$app_name|$bundle_id|$size|$last_used|${size_kb:-0}")
|
||||
@@ -384,9 +338,8 @@ load_applications() {
|
||||
return 0
|
||||
}
|
||||
|
||||
# Cleanup function - restore cursor and clean up
|
||||
# Cleanup: restore cursor and kill keepalive.
|
||||
cleanup() {
|
||||
# Restore cursor using common function
|
||||
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
|
||||
leave_alt_screen
|
||||
unset MOLE_ALT_SCREEN_ACTIVE
|
||||
@@ -400,21 +353,16 @@ cleanup() {
|
||||
exit "${1:-0}"
|
||||
}
|
||||
|
||||
# Set trap for cleanup on exit
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
# Main function
|
||||
main() {
|
||||
# Parse args
|
||||
local force_rescan=false
|
||||
# Global flags
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
"--debug")
|
||||
export MO_DEBUG=1
|
||||
;;
|
||||
"--force-rescan")
|
||||
force_rescan=true
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
@@ -423,24 +371,18 @@ main() {
|
||||
use_inline_loading=true
|
||||
fi
|
||||
|
||||
# Hide cursor during operation
|
||||
hide_cursor
|
||||
|
||||
# Main interaction loop
|
||||
while true; do
|
||||
# Simplified: always check if we need alt screen for scanning
|
||||
# (scan_applications handles cache internally)
|
||||
local needs_scanning=true
|
||||
local cache_file="$HOME/.cache/mole/app_scan_cache"
|
||||
if [[ $force_rescan == false && -f "$cache_file" ]]; then
|
||||
local cache_age=$(($(date +%s) - $(get_file_mtime "$cache_file")))
|
||||
[[ $cache_age -eq $(date +%s) ]] && cache_age=86401 # Handle missing file
|
||||
[[ $cache_age -eq $(date +%s) ]] && cache_age=86401
|
||||
[[ $cache_age -lt 86400 ]] && needs_scanning=false
|
||||
fi
|
||||
|
||||
# Only enter alt screen if we need scanning (shows progress)
|
||||
if [[ $needs_scanning == true && $use_inline_loading == true ]]; then
|
||||
# Only enter if not already active
|
||||
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" != "1" ]]; then
|
||||
enter_alt_screen
|
||||
export MOLE_ALT_SCREEN_ACTIVE=1
|
||||
@@ -449,10 +391,6 @@ main() {
|
||||
fi
|
||||
printf "\033[2J\033[H" >&2
|
||||
else
|
||||
# If we don't need scanning but have alt screen from previous iteration, keep it?
|
||||
# Actually, scan_applications might output to stderr.
|
||||
# Let's just unset the flags if we don't need scanning, but keep alt screen if it was active?
|
||||
# No, select_apps_for_uninstall will handle its own screen management.
|
||||
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN MOLE_ALT_SCREEN_ACTIVE
|
||||
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
|
||||
leave_alt_screen
|
||||
@@ -460,7 +398,6 @@ main() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Scan applications
|
||||
local apps_file=""
|
||||
if ! apps_file=$(scan_applications "$force_rescan"); then
|
||||
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
|
||||
@@ -477,7 +414,6 @@ main() {
|
||||
fi
|
||||
|
||||
if [[ ! -f "$apps_file" ]]; then
|
||||
# Error message already shown by scan_applications
|
||||
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
|
||||
leave_alt_screen
|
||||
unset MOLE_ALT_SCREEN_ACTIVE
|
||||
@@ -486,7 +422,6 @@ main() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Load applications
|
||||
if ! load_applications "$apps_file"; then
|
||||
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
|
||||
leave_alt_screen
|
||||
@@ -497,7 +432,6 @@ main() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Interactive selection using paginated menu
|
||||
set +e
|
||||
select_apps_for_uninstall
|
||||
local exit_code=$?
|
||||
@@ -511,63 +445,83 @@ main() {
|
||||
fi
|
||||
show_cursor
|
||||
clear_screen
|
||||
printf '\033[2J\033[H' >&2 # Also clear stderr
|
||||
printf '\033[2J\033[H' >&2
|
||||
rm -f "$apps_file"
|
||||
|
||||
# Handle Refresh (code 10)
|
||||
if [[ $exit_code -eq 10 ]]; then
|
||||
force_rescan=true
|
||||
continue
|
||||
fi
|
||||
|
||||
# User cancelled selection, exit the loop
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Always clear on exit from selection, regardless of alt screen state
|
||||
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
|
||||
leave_alt_screen
|
||||
unset MOLE_ALT_SCREEN_ACTIVE
|
||||
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
|
||||
fi
|
||||
|
||||
# Restore cursor and clear screen (output to both stdout and stderr for reliability)
|
||||
show_cursor
|
||||
clear_screen
|
||||
printf '\033[2J\033[H' >&2 # Also clear stderr in case of mixed output
|
||||
printf '\033[2J\033[H' >&2
|
||||
local selection_count=${#selected_apps[@]}
|
||||
if [[ $selection_count -eq 0 ]]; then
|
||||
echo "No apps selected"
|
||||
rm -f "$apps_file"
|
||||
# Loop back or exit? If select_apps_for_uninstall returns 0 but empty selection,
|
||||
# it technically shouldn't happen based on that function's logic.
|
||||
continue
|
||||
fi
|
||||
# Show selected apps with clean alignment
|
||||
echo -e "${BLUE}${ICON_CONFIRM}${NC} Selected ${selection_count} app(s):"
|
||||
local -a summary_rows=()
|
||||
local max_name_display_width=0
|
||||
local max_size_width=0
|
||||
local name_trunc_limit=30
|
||||
local max_last_width=0
|
||||
for selected_app in "${selected_apps[@]}"; do
|
||||
IFS='|' read -r _ _ app_name _ size last_used _ <<< "$selected_app"
|
||||
local name_width=$(get_display_width "$app_name")
|
||||
[[ $name_width -gt $max_name_display_width ]] && max_name_display_width=$name_width
|
||||
local size_display="$size"
|
||||
[[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]] && size_display="Unknown"
|
||||
[[ ${#size_display} -gt $max_size_width ]] && max_size_width=${#size_display}
|
||||
local last_display=$(format_last_used_summary "$last_used")
|
||||
[[ ${#last_display} -gt $max_last_width ]] && max_last_width=${#last_display}
|
||||
done
|
||||
((max_size_width < 5)) && max_size_width=5
|
||||
((max_last_width < 5)) && max_last_width=5
|
||||
|
||||
local term_width=$(tput cols 2> /dev/null || echo 100)
|
||||
local available_for_name=$((term_width - 17 - max_size_width - max_last_width))
|
||||
|
||||
local min_name_width=24
|
||||
if [[ $term_width -ge 120 ]]; then
|
||||
min_name_width=50
|
||||
elif [[ $term_width -ge 100 ]]; then
|
||||
min_name_width=42
|
||||
elif [[ $term_width -ge 80 ]]; then
|
||||
min_name_width=30
|
||||
fi
|
||||
|
||||
local name_trunc_limit=$max_name_display_width
|
||||
[[ $name_trunc_limit -lt $min_name_width ]] && name_trunc_limit=$min_name_width
|
||||
[[ $name_trunc_limit -gt $available_for_name ]] && name_trunc_limit=$available_for_name
|
||||
[[ $name_trunc_limit -gt 60 ]] && name_trunc_limit=60
|
||||
|
||||
max_name_display_width=0
|
||||
|
||||
for selected_app in "${selected_apps[@]}"; do
|
||||
IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb <<< "$selected_app"
|
||||
|
||||
# Truncate by display width if needed
|
||||
local display_name
|
||||
display_name=$(truncate_by_display_width "$app_name" "$name_trunc_limit")
|
||||
|
||||
# Get actual display width
|
||||
local current_width
|
||||
current_width=$(get_display_width "$display_name")
|
||||
|
||||
[[ $current_width -gt $max_name_display_width ]] && max_name_display_width=$current_width
|
||||
|
||||
local size_display="$size"
|
||||
if [[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]]; then
|
||||
size_display="Unknown"
|
||||
fi
|
||||
[[ ${#size_display} -gt $max_size_width ]] && max_size_width=${#size_display}
|
||||
|
||||
local last_display
|
||||
last_display=$(format_last_used_summary "$last_used")
|
||||
@@ -576,12 +530,10 @@ main() {
|
||||
done
|
||||
|
||||
((max_name_display_width < 16)) && max_name_display_width=16
|
||||
((max_size_width < 5)) && max_size_width=5
|
||||
|
||||
local index=1
|
||||
for row in "${summary_rows[@]}"; do
|
||||
IFS='|' read -r name_cell size_cell last_cell <<< "$row"
|
||||
# Calculate printf width based on actual display width
|
||||
local name_display_width
|
||||
name_display_width=$(get_display_width "$name_cell")
|
||||
local name_char_count=${#name_cell}
|
||||
@@ -592,30 +544,24 @@ main() {
|
||||
((index++))
|
||||
done
|
||||
|
||||
# Execute batch uninstallation (handles confirmation)
|
||||
batch_uninstall_applications
|
||||
|
||||
# Cleanup current apps file
|
||||
rm -f "$apps_file"
|
||||
|
||||
# Pause before looping back
|
||||
echo -e "${GRAY}Press Enter to return to application list, any other key to exit...${NC}"
|
||||
local key
|
||||
IFS= read -r -s -n1 key || key=""
|
||||
drain_pending_input
|
||||
|
||||
# Logic: Enter = continue loop, any other key = exit
|
||||
if [[ -z "$key" ]]; then
|
||||
: # Enter pressed, continue loop
|
||||
:
|
||||
else
|
||||
show_cursor
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Reset force_rescan to false for subsequent loops
|
||||
force_rescan=false
|
||||
done
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
|
||||
@@ -75,7 +75,7 @@ scan_applications() {
|
||||
local cache_ttl=86400 # 24 hours
|
||||
local force_rescan="${1:-false}"
|
||||
|
||||
mkdir -p "$cache_dir" 2> /dev/null
|
||||
ensure_user_dir "$cache_dir"
|
||||
|
||||
# Check if cache exists and is fresh
|
||||
if [[ $force_rescan == false && -f "$cache_file" ]]; then
|
||||
@@ -111,40 +111,61 @@ scan_applications() {
|
||||
|
||||
# First pass: quickly collect all valid app paths and bundle IDs (NO mdls calls)
|
||||
local -a app_data_tuples=()
|
||||
while IFS= read -r -d '' app_path; do
|
||||
if [[ ! -e "$app_path" ]]; then continue; fi
|
||||
|
||||
local app_name
|
||||
app_name=$(basename "$app_path" .app)
|
||||
|
||||
# Skip nested apps (e.g. inside Wrapper/ or Frameworks/ of another app)
|
||||
# Check if parent path component ends in .app (e.g. /Foo.app/Bar.app or /Foo.app/Contents/Bar.app)
|
||||
# This prevents false positives like /Old.apps/Target.app
|
||||
local parent_dir
|
||||
parent_dir=$(dirname "$app_path")
|
||||
if [[ "$parent_dir" == *".app" || "$parent_dir" == *".app/"* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Get bundle ID only (fast, no mdls calls in first pass)
|
||||
local bundle_id="unknown"
|
||||
if [[ -f "$app_path/Contents/Info.plist" ]]; then
|
||||
bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown")
|
||||
fi
|
||||
|
||||
# Skip system critical apps (input methods, system components)
|
||||
if should_protect_from_uninstall "$bundle_id"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Store tuple: app_path|app_name|bundle_id (display_name will be resolved in parallel later)
|
||||
app_data_tuples+=("${app_path}|${app_name}|${bundle_id}")
|
||||
done < <(
|
||||
# Scan both system and user application directories
|
||||
# Using maxdepth 3 to find apps in subdirectories (e.g., Adobe apps in /Applications/Adobe X/)
|
||||
command find /Applications -name "*.app" -maxdepth 3 -print0 2> /dev/null
|
||||
command find ~/Applications -name "*.app" -maxdepth 3 -print0 2> /dev/null
|
||||
local -a app_dirs=(
|
||||
"/Applications"
|
||||
"$HOME/Applications"
|
||||
)
|
||||
local vol_app_dir
|
||||
local nullglob_was_set=0
|
||||
shopt -q nullglob && nullglob_was_set=1
|
||||
shopt -s nullglob
|
||||
for vol_app_dir in /Volumes/*/Applications; do
|
||||
[[ -d "$vol_app_dir" && -r "$vol_app_dir" ]] || continue
|
||||
if [[ -d "/Applications" && "$vol_app_dir" -ef "/Applications" ]]; then
|
||||
continue
|
||||
fi
|
||||
if [[ -d "$HOME/Applications" && "$vol_app_dir" -ef "$HOME/Applications" ]]; then
|
||||
continue
|
||||
fi
|
||||
app_dirs+=("$vol_app_dir")
|
||||
done
|
||||
if [[ $nullglob_was_set -eq 0 ]]; then
|
||||
shopt -u nullglob
|
||||
fi
|
||||
|
||||
for app_dir in "${app_dirs[@]}"; do
|
||||
if [[ ! -d "$app_dir" ]]; then continue; fi
|
||||
|
||||
while IFS= read -r -d '' app_path; do
|
||||
if [[ ! -e "$app_path" ]]; then continue; fi
|
||||
|
||||
local app_name
|
||||
app_name=$(basename "$app_path" .app)
|
||||
|
||||
# Skip nested apps (e.g. inside Wrapper/ or Frameworks/ of another app)
|
||||
# Check if parent path component ends in .app (e.g. /Foo.app/Bar.app or /Foo.app/Contents/Bar.app)
|
||||
# This prevents false positives like /Old.apps/Target.app
|
||||
local parent_dir
|
||||
parent_dir=$(dirname "$app_path")
|
||||
if [[ "$parent_dir" == *".app" || "$parent_dir" == *".app/"* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Get bundle ID only (fast, no mdls calls in first pass)
|
||||
local bundle_id="unknown"
|
||||
if [[ -f "$app_path/Contents/Info.plist" ]]; then
|
||||
bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown")
|
||||
fi
|
||||
|
||||
# Skip system critical apps (input methods, system components)
|
||||
if should_protect_from_uninstall "$bundle_id"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Store tuple: app_path|app_name|bundle_id (display_name will be resolved in parallel later)
|
||||
app_data_tuples+=("${app_path}|${app_name}|${bundle_id}")
|
||||
done < <(command find "$app_dir" -name "*.app" -maxdepth 3 -print0 2> /dev/null)
|
||||
done
|
||||
|
||||
# Second pass: process each app with parallel size calculation
|
||||
local app_count=0
|
||||
@@ -210,9 +231,9 @@ scan_applications() {
|
||||
local last_used_epoch=0
|
||||
|
||||
if [[ -d "$app_path" ]]; then
|
||||
# Try mdls first with short timeout (0.05s) for accuracy, fallback to mtime for speed
|
||||
# Try mdls first with short timeout (0.1s) for accuracy, fallback to mtime for speed
|
||||
local metadata_date
|
||||
metadata_date=$(run_with_timeout 0.05 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
|
||||
metadata_date=$(run_with_timeout 0.1 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
|
||||
|
||||
if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then
|
||||
last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0")
|
||||
@@ -259,7 +280,9 @@ scan_applications() {
|
||||
# Start a background spinner that reads progress from file
|
||||
local spinner_pid=""
|
||||
(
|
||||
trap 'exit 0' TERM INT EXIT
|
||||
# shellcheck disable=SC2329 # Function invoked indirectly via trap
|
||||
cleanup_spinner() { exit 0; }
|
||||
trap cleanup_spinner TERM INT EXIT
|
||||
local spinner_chars="|/-\\"
|
||||
local i=0
|
||||
while true; do
|
||||
@@ -344,6 +367,7 @@ scan_applications() {
|
||||
fi
|
||||
|
||||
# Save to cache (simplified - no metadata)
|
||||
ensure_user_file "$cache_file"
|
||||
cp "${temp_file}.sorted" "$cache_file" 2> /dev/null || true
|
||||
|
||||
# Return sorted file
|
||||
@@ -354,7 +378,6 @@ scan_applications() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Load applications into arrays
|
||||
load_applications() {
|
||||
local apps_file="$1"
|
||||
|
||||
@@ -403,9 +426,7 @@ cleanup() {
|
||||
# Set trap for cleanup on exit
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
# Main function
|
||||
main() {
|
||||
# Parse args
|
||||
local force_rescan=false
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
@@ -548,7 +569,43 @@ main() {
|
||||
local -a summary_rows=()
|
||||
local max_name_width=0
|
||||
local max_size_width=0
|
||||
local name_trunc_limit=30
|
||||
local max_last_width=0
|
||||
# First pass: get actual max widths for all columns
|
||||
for selected_app in "${selected_apps[@]}"; do
|
||||
IFS='|' read -r _ _ app_name _ size last_used _ <<< "$selected_app"
|
||||
[[ ${#app_name} -gt $max_name_width ]] && max_name_width=${#app_name}
|
||||
local size_display="$size"
|
||||
[[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]] && size_display="Unknown"
|
||||
[[ ${#size_display} -gt $max_size_width ]] && max_size_width=${#size_display}
|
||||
local last_display=$(format_last_used_summary "$last_used")
|
||||
[[ ${#last_display} -gt $max_last_width ]] && max_last_width=${#last_display}
|
||||
done
|
||||
((max_size_width < 5)) && max_size_width=5
|
||||
((max_last_width < 5)) && max_last_width=5
|
||||
|
||||
# Calculate name width: use actual max, but constrain by terminal width
|
||||
# Fixed elements: "99. " (4) + " " (2) + " | Last: " (11) = 17
|
||||
local term_width=$(tput cols 2> /dev/null || echo 100)
|
||||
local available_for_name=$((term_width - 17 - max_size_width - max_last_width))
|
||||
|
||||
# Dynamic minimum for better spacing on wide terminals
|
||||
local min_name_width=24
|
||||
if [[ $term_width -ge 120 ]]; then
|
||||
min_name_width=50
|
||||
elif [[ $term_width -ge 100 ]]; then
|
||||
min_name_width=42
|
||||
elif [[ $term_width -ge 80 ]]; then
|
||||
min_name_width=30
|
||||
fi
|
||||
|
||||
# Constrain name width: dynamic min, max min(actual_max, available, 60)
|
||||
local name_trunc_limit=$max_name_width
|
||||
[[ $name_trunc_limit -lt $min_name_width ]] && name_trunc_limit=$min_name_width
|
||||
[[ $name_trunc_limit -gt $available_for_name ]] && name_trunc_limit=$available_for_name
|
||||
[[ $name_trunc_limit -gt 60 ]] && name_trunc_limit=60
|
||||
|
||||
# Reset for second pass
|
||||
max_name_width=0
|
||||
|
||||
for selected_app in "${selected_apps[@]}"; do
|
||||
IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb <<< "$selected_app"
|
||||
@@ -563,7 +620,6 @@ main() {
|
||||
if [[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]]; then
|
||||
size_display="Unknown"
|
||||
fi
|
||||
[[ ${#size_display} -gt $max_size_width ]] && max_size_width=${#size_display}
|
||||
|
||||
local last_display
|
||||
last_display=$(format_last_used_summary "$last_used")
|
||||
@@ -572,7 +628,6 @@ main() {
|
||||
done
|
||||
|
||||
((max_name_width < 16)) && max_name_width=16
|
||||
((max_size_width < 5)) && max_size_width=5
|
||||
|
||||
local index=1
|
||||
for row in "${summary_rows[@]}"; do
|
||||
|
||||
@@ -75,11 +75,6 @@ func TestScanPathConcurrentBasic(t *testing.T) {
|
||||
if bytes := atomic.LoadInt64(&bytesScanned); bytes == 0 {
|
||||
t.Fatalf("expected byte counter to increase")
|
||||
}
|
||||
// current path update is throttled, so it might be empty for small scans
|
||||
// if current == "" {
|
||||
// t.Fatalf("expected current path to be updated")
|
||||
// }
|
||||
|
||||
foundSymlink := false
|
||||
for _, entry := range result.Entries {
|
||||
if strings.HasSuffix(entry.Name, " →") {
|
||||
@@ -148,7 +143,7 @@ func TestOverviewStoreAndLoad(t *testing.T) {
|
||||
t.Fatalf("snapshot mismatch: want %d, got %d", want, got)
|
||||
}
|
||||
|
||||
// Force reload from disk and ensure value persists.
|
||||
// Reload from disk and ensure value persists.
|
||||
resetOverviewSnapshotForTest()
|
||||
got, err = loadStoredOverviewSize(path)
|
||||
if err != nil {
|
||||
@@ -220,7 +215,7 @@ func TestMeasureOverviewSize(t *testing.T) {
|
||||
t.Fatalf("expected positive size, got %d", size)
|
||||
}
|
||||
|
||||
// Ensure snapshot stored
|
||||
// Ensure snapshot stored.
|
||||
cached, err := loadStoredOverviewSize(target)
|
||||
if err != nil {
|
||||
t.Fatalf("loadStoredOverviewSize: %v", err)
|
||||
@@ -279,13 +274,13 @@ func TestLoadCacheExpiresWhenDirectoryChanges(t *testing.T) {
|
||||
t.Fatalf("saveCacheToDisk: %v", err)
|
||||
}
|
||||
|
||||
// Touch directory to advance mtime beyond grace period.
|
||||
// Advance mtime beyond grace period.
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
if err := os.Chtimes(target, time.Now(), time.Now()); err != nil {
|
||||
t.Fatalf("chtimes: %v", err)
|
||||
}
|
||||
|
||||
// Force modtime difference beyond grace window by simulating an older cache entry.
|
||||
// Simulate older cache entry to exceed grace window.
|
||||
cachePath, err := getCachePath(target)
|
||||
if err != nil {
|
||||
t.Fatalf("getCachePath: %v", err)
|
||||
@@ -335,24 +330,24 @@ func TestScanPathPermissionError(t *testing.T) {
|
||||
t.Fatalf("create locked dir: %v", err)
|
||||
}
|
||||
|
||||
// Create a file inside before locking, just to be sure
|
||||
// Create a file before locking.
|
||||
if err := os.WriteFile(filepath.Join(lockedDir, "secret.txt"), []byte("shh"), 0o644); err != nil {
|
||||
t.Fatalf("write secret: %v", err)
|
||||
}
|
||||
|
||||
// Remove permissions
|
||||
// Remove permissions.
|
||||
if err := os.Chmod(lockedDir, 0o000); err != nil {
|
||||
t.Fatalf("chmod 000: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
// Restore permissions so cleanup can work
|
||||
// Restore permissions for cleanup.
|
||||
_ = os.Chmod(lockedDir, 0o755)
|
||||
}()
|
||||
|
||||
var files, dirs, bytes int64
|
||||
current := ""
|
||||
|
||||
// Scanning the locked dir itself should fail
|
||||
// Scanning the locked dir itself should fail.
|
||||
_, err := scanPathConcurrent(lockedDir, &files, &dirs, &bytes, ¤t)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error scanning locked directory, got nil")
|
||||
|
||||
@@ -222,7 +222,7 @@ func loadCacheFromDisk(path string) (*cacheEntry, error) {
|
||||
}
|
||||
|
||||
if info.ModTime().After(entry.ModTime) {
|
||||
// Only expire cache if the directory has been newer for longer than the grace window.
|
||||
// Allow grace window.
|
||||
if cacheModTimeGrace <= 0 || info.ModTime().Sub(entry.ModTime) > cacheModTimeGrace {
|
||||
return nil, fmt.Errorf("cache expired: directory modified")
|
||||
}
|
||||
@@ -290,29 +290,23 @@ func removeOverviewSnapshot(path string) {
|
||||
}
|
||||
}
|
||||
|
||||
// prefetchOverviewCache scans overview directories in background
|
||||
// to populate cache for faster overview mode access
|
||||
// prefetchOverviewCache warms overview cache in background.
|
||||
func prefetchOverviewCache(ctx context.Context) {
|
||||
entries := createOverviewEntries()
|
||||
|
||||
// Check which entries need refresh
|
||||
var needScan []string
|
||||
for _, entry := range entries {
|
||||
// Skip if we have fresh cache
|
||||
if size, err := loadStoredOverviewSize(entry.Path); err == nil && size > 0 {
|
||||
continue
|
||||
}
|
||||
needScan = append(needScan, entry.Path)
|
||||
}
|
||||
|
||||
// Nothing to scan
|
||||
if len(needScan) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Scan and cache in background with context cancellation support
|
||||
for _, path := range needScan {
|
||||
// Check if context is cancelled
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
@@ -5,23 +5,20 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// isCleanableDir checks if a directory is safe to manually delete
|
||||
// but NOT cleaned by mo clean (so user might want to delete it manually)
|
||||
// isCleanableDir marks paths safe to delete manually (not handled by mo clean).
|
||||
func isCleanableDir(path string) bool {
|
||||
if path == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Exclude paths that mo clean will handle automatically
|
||||
// These are system caches/logs that mo clean already processes
|
||||
// Exclude paths mo clean already handles.
|
||||
if isHandledByMoClean(path) {
|
||||
return false
|
||||
}
|
||||
|
||||
baseName := filepath.Base(path)
|
||||
|
||||
// Only mark project dependencies and build outputs
|
||||
// These are safe to delete but mo clean won't touch them
|
||||
// Project dependencies and build outputs are safe.
|
||||
if projectDependencyDirs[baseName] {
|
||||
return true
|
||||
}
|
||||
@@ -29,9 +26,8 @@ func isCleanableDir(path string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// isHandledByMoClean checks if this path will be cleaned by mo clean
|
||||
// isHandledByMoClean checks if a path is cleaned by mo clean.
|
||||
func isHandledByMoClean(path string) bool {
|
||||
// Paths that mo clean handles (from clean.sh)
|
||||
cleanPaths := []string{
|
||||
"/Library/Caches/",
|
||||
"/Library/Logs/",
|
||||
@@ -49,16 +45,15 @@ func isHandledByMoClean(path string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Project dependency and build directories
|
||||
// These are safe to delete manually but mo clean won't touch them
|
||||
// Project dependency and build directories.
|
||||
var projectDependencyDirs = map[string]bool{
|
||||
// JavaScript/Node dependencies
|
||||
// JavaScript/Node.
|
||||
"node_modules": true,
|
||||
"bower_components": true,
|
||||
".yarn": true, // Yarn local cache
|
||||
".pnpm-store": true, // pnpm store
|
||||
".yarn": true,
|
||||
".pnpm-store": true,
|
||||
|
||||
// Python dependencies and outputs
|
||||
// Python.
|
||||
"venv": true,
|
||||
".venv": true,
|
||||
"virtualenv": true,
|
||||
@@ -68,18 +63,18 @@ var projectDependencyDirs = map[string]bool{
|
||||
".ruff_cache": true,
|
||||
".tox": true,
|
||||
".eggs": true,
|
||||
"htmlcov": true, // Coverage reports
|
||||
".ipynb_checkpoints": true, // Jupyter checkpoints
|
||||
"htmlcov": true,
|
||||
".ipynb_checkpoints": true,
|
||||
|
||||
// Ruby dependencies
|
||||
// Ruby.
|
||||
"vendor": true,
|
||||
".bundle": true,
|
||||
|
||||
// Java/Kotlin/Scala
|
||||
".gradle": true, // Project-level Gradle cache
|
||||
"out": true, // IntelliJ IDEA build output
|
||||
// Java/Kotlin/Scala.
|
||||
".gradle": true,
|
||||
"out": true,
|
||||
|
||||
// Build outputs (can be rebuilt)
|
||||
// Build outputs.
|
||||
"build": true,
|
||||
"dist": true,
|
||||
"target": true,
|
||||
@@ -88,24 +83,25 @@ var projectDependencyDirs = map[string]bool{
|
||||
".output": true,
|
||||
".parcel-cache": true,
|
||||
".turbo": true,
|
||||
".vite": true, // Vite cache
|
||||
".nx": true, // Nx cache
|
||||
".vite": true,
|
||||
".nx": true,
|
||||
"coverage": true,
|
||||
".coverage": true,
|
||||
".nyc_output": true, // NYC coverage
|
||||
".nyc_output": true,
|
||||
|
||||
// Frontend framework outputs
|
||||
".angular": true, // Angular CLI cache
|
||||
".svelte-kit": true, // SvelteKit build
|
||||
".astro": true, // Astro cache
|
||||
".docusaurus": true, // Docusaurus build
|
||||
// Frontend framework outputs.
|
||||
".angular": true,
|
||||
".svelte-kit": true,
|
||||
".astro": true,
|
||||
".docusaurus": true,
|
||||
|
||||
// iOS/macOS development
|
||||
// Apple dev.
|
||||
"DerivedData": true,
|
||||
"Pods": true,
|
||||
".build": true,
|
||||
"Carthage": true,
|
||||
".dart_tool": true,
|
||||
|
||||
// Other tools
|
||||
".terraform": true, // Terraform plugins
|
||||
// Other tools.
|
||||
".terraform": true,
|
||||
}
|
||||
|
||||
@@ -6,35 +6,35 @@ const (
|
||||
maxEntries = 30
|
||||
maxLargeFiles = 30
|
||||
barWidth = 24
|
||||
minLargeFileSize = 100 << 20 // 100 MB
|
||||
defaultViewport = 12 // Default viewport when terminal height is unknown
|
||||
overviewCacheTTL = 7 * 24 * time.Hour // 7 days
|
||||
minLargeFileSize = 100 << 20
|
||||
defaultViewport = 12
|
||||
overviewCacheTTL = 7 * 24 * time.Hour
|
||||
overviewCacheFile = "overview_sizes.json"
|
||||
duTimeout = 30 * time.Second // Fail faster to fallback to concurrent scan
|
||||
duTimeout = 30 * time.Second
|
||||
mdlsTimeout = 5 * time.Second
|
||||
maxConcurrentOverview = 8 // Increased parallel overview scans
|
||||
batchUpdateSize = 100 // Batch atomic updates every N items
|
||||
cacheModTimeGrace = 30 * time.Minute // Ignore minor directory mtime bumps
|
||||
maxConcurrentOverview = 8
|
||||
batchUpdateSize = 100
|
||||
cacheModTimeGrace = 30 * time.Minute
|
||||
|
||||
// Worker pool configuration
|
||||
minWorkers = 16 // Safe baseline for older machines
|
||||
maxWorkers = 64 // Cap at 64 to avoid OS resource contention
|
||||
cpuMultiplier = 4 // Balanced CPU usage
|
||||
maxDirWorkers = 32 // Limit concurrent subdirectory scans
|
||||
openCommandTimeout = 10 * time.Second // Timeout for open/reveal commands
|
||||
// Worker pool limits.
|
||||
minWorkers = 16
|
||||
maxWorkers = 64
|
||||
cpuMultiplier = 4
|
||||
maxDirWorkers = 32
|
||||
openCommandTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var foldDirs = map[string]bool{
|
||||
// Version control
|
||||
// VCS.
|
||||
".git": true,
|
||||
".svn": true,
|
||||
".hg": true,
|
||||
|
||||
// JavaScript/Node
|
||||
// JavaScript/Node.
|
||||
"node_modules": true,
|
||||
".npm": true,
|
||||
"_npx": true, // ~/.npm/_npx global cache
|
||||
"_cacache": true, // ~/.npm/_cacache
|
||||
"_npx": true,
|
||||
"_cacache": true,
|
||||
"_logs": true,
|
||||
"_locks": true,
|
||||
"_quick": true,
|
||||
@@ -56,7 +56,7 @@ var foldDirs = map[string]bool{
|
||||
".bun": true,
|
||||
".deno": true,
|
||||
|
||||
// Python
|
||||
// Python.
|
||||
"__pycache__": true,
|
||||
".pytest_cache": true,
|
||||
".mypy_cache": true,
|
||||
@@ -73,7 +73,7 @@ var foldDirs = map[string]bool{
|
||||
".pip": true,
|
||||
".pipx": true,
|
||||
|
||||
// Ruby/Go/PHP (vendor), Java/Kotlin/Scala/Rust (target)
|
||||
// Ruby/Go/PHP (vendor), Java/Kotlin/Scala/Rust (target).
|
||||
"vendor": true,
|
||||
".bundle": true,
|
||||
"gems": true,
|
||||
@@ -88,20 +88,20 @@ var foldDirs = map[string]bool{
|
||||
".composer": true,
|
||||
".cargo": true,
|
||||
|
||||
// Build outputs
|
||||
// Build outputs.
|
||||
"build": true,
|
||||
"dist": true,
|
||||
".output": true,
|
||||
"coverage": true,
|
||||
".coverage": true,
|
||||
|
||||
// IDE
|
||||
// IDE.
|
||||
".idea": true,
|
||||
".vscode": true,
|
||||
".vs": true,
|
||||
".fleet": true,
|
||||
|
||||
// Cache directories
|
||||
// Cache directories.
|
||||
".cache": true,
|
||||
"__MACOSX": true,
|
||||
".DS_Store": true,
|
||||
@@ -121,36 +121,37 @@ var foldDirs = map[string]bool{
|
||||
".sdkman": true,
|
||||
".nvm": true,
|
||||
|
||||
// macOS specific
|
||||
// macOS.
|
||||
"Application Scripts": true,
|
||||
"Saved Application State": true,
|
||||
|
||||
// iCloud
|
||||
// iCloud.
|
||||
"Mobile Documents": true,
|
||||
|
||||
// Docker & Containers
|
||||
// Containers.
|
||||
".docker": true,
|
||||
".containerd": true,
|
||||
|
||||
// Mobile development
|
||||
// Mobile development.
|
||||
"Pods": true,
|
||||
"DerivedData": true,
|
||||
".build": true,
|
||||
"xcuserdata": true,
|
||||
"Carthage": true,
|
||||
".dart_tool": true,
|
||||
|
||||
// Web frameworks
|
||||
// Web frameworks.
|
||||
".angular": true,
|
||||
".svelte-kit": true,
|
||||
".astro": true,
|
||||
".solid": true,
|
||||
|
||||
// Databases
|
||||
// Databases.
|
||||
".mysql": true,
|
||||
".postgres": true,
|
||||
"mongodb": true,
|
||||
|
||||
// Other
|
||||
// Other.
|
||||
".terraform": true,
|
||||
".vagrant": true,
|
||||
"tmp": true,
|
||||
@@ -169,22 +170,22 @@ var skipSystemDirs = map[string]bool{
|
||||
"bin": true,
|
||||
"etc": true,
|
||||
"var": true,
|
||||
"opt": false, // User might want to specific check opt
|
||||
"usr": false, // User might check usr
|
||||
"Volumes": true, // Skip external drives by default when scanning root
|
||||
"Network": true, // Skip network mounts
|
||||
"opt": false,
|
||||
"usr": false,
|
||||
"Volumes": true,
|
||||
"Network": true,
|
||||
".vol": true,
|
||||
".Spotlight-V100": true,
|
||||
".fseventsd": true,
|
||||
".DocumentRevisions-V100": true,
|
||||
".TemporaryItems": true,
|
||||
".MobileBackups": true, // Time Machine local snapshots
|
||||
".MobileBackups": true,
|
||||
}
|
||||
|
||||
var defaultSkipDirs = map[string]bool{
|
||||
"nfs": true, // Network File System
|
||||
"PHD": true, // Parallels Shared Folders / Home Directories
|
||||
"Permissions": true, // Common macOS deny folder
|
||||
"nfs": true,
|
||||
"PHD": true,
|
||||
"Permissions": true,
|
||||
}
|
||||
|
||||
var skipExtensions = map[string]bool{
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
@@ -21,20 +23,68 @@ func deletePathCmd(path string, counter *int64) tea.Cmd {
|
||||
}
|
||||
}
|
||||
|
||||
// deleteMultiplePathsCmd deletes paths and aggregates results.
|
||||
func deleteMultiplePathsCmd(paths []string, counter *int64) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
var totalCount int64
|
||||
var errors []string
|
||||
|
||||
// Delete deeper paths first to avoid parent/child conflicts.
|
||||
pathsToDelete := append([]string(nil), paths...)
|
||||
sort.Slice(pathsToDelete, func(i, j int) bool {
|
||||
return strings.Count(pathsToDelete[i], string(filepath.Separator)) > strings.Count(pathsToDelete[j], string(filepath.Separator))
|
||||
})
|
||||
|
||||
for _, path := range pathsToDelete {
|
||||
count, err := deletePathWithProgress(path, counter)
|
||||
totalCount += count
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
errors = append(errors, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
var resultErr error
|
||||
if len(errors) > 0 {
|
||||
resultErr = &multiDeleteError{errors: errors}
|
||||
}
|
||||
|
||||
return deleteProgressMsg{
|
||||
done: true,
|
||||
err: resultErr,
|
||||
count: totalCount,
|
||||
path: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// multiDeleteError holds multiple deletion errors.
|
||||
type multiDeleteError struct {
|
||||
errors []string
|
||||
}
|
||||
|
||||
func (e *multiDeleteError) Error() string {
|
||||
if len(e.errors) == 1 {
|
||||
return e.errors[0]
|
||||
}
|
||||
return strings.Join(e.errors[:min(3, len(e.errors))], "; ")
|
||||
}
|
||||
|
||||
func deletePathWithProgress(root string, counter *int64) (int64, error) {
|
||||
var count int64
|
||||
var firstErr error
|
||||
|
||||
err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
// Skip permission errors but continue walking
|
||||
// Skip permission errors but continue.
|
||||
if os.IsPermission(err) {
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// For other errors, record and continue
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
@@ -48,7 +98,6 @@ func deletePathWithProgress(root string, counter *int64) (int64, error) {
|
||||
atomic.StoreInt64(counter, count)
|
||||
}
|
||||
} else if firstErr == nil {
|
||||
// Record first deletion error
|
||||
firstErr = removeErr
|
||||
}
|
||||
}
|
||||
@@ -56,19 +105,15 @@ func deletePathWithProgress(root string, counter *int64) (int64, error) {
|
||||
return nil
|
||||
})
|
||||
|
||||
// Track walk error separately
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
|
||||
// Try to remove remaining directory structure
|
||||
// Even if this fails, we still report files deleted
|
||||
if removeErr := os.RemoveAll(root); removeErr != nil {
|
||||
if firstErr == nil {
|
||||
firstErr = removeErr
|
||||
}
|
||||
}
|
||||
|
||||
// Always return count (even if there were errors), along with first error
|
||||
return count, firstErr
|
||||
}
|
||||
|
||||
43
cmd/analyze/delete_test.go
Normal file
43
cmd/analyze/delete_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDeleteMultiplePathsCmdHandlesParentChild(t *testing.T) {
|
||||
base := t.TempDir()
|
||||
parent := filepath.Join(base, "parent")
|
||||
child := filepath.Join(parent, "child")
|
||||
|
||||
// Structure: parent/fileA, parent/child/fileC.
|
||||
if err := os.MkdirAll(child, 0o755); err != nil {
|
||||
t.Fatalf("mkdir: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(parent, "fileA"), []byte("a"), 0o644); err != nil {
|
||||
t.Fatalf("write fileA: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(child, "fileC"), []byte("c"), 0o644); err != nil {
|
||||
t.Fatalf("write fileC: %v", err)
|
||||
}
|
||||
|
||||
var counter int64
|
||||
msg := deleteMultiplePathsCmd([]string{parent, child}, &counter)()
|
||||
progress, ok := msg.(deleteProgressMsg)
|
||||
if !ok {
|
||||
t.Fatalf("expected deleteProgressMsg, got %T", msg)
|
||||
}
|
||||
if progress.err != nil {
|
||||
t.Fatalf("unexpected error: %v", progress.err)
|
||||
}
|
||||
if progress.count != 2 {
|
||||
t.Fatalf("expected 2 files deleted, got %d", progress.count)
|
||||
}
|
||||
if _, err := os.Stat(parent); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected parent to be removed, err=%v", err)
|
||||
}
|
||||
if _, err := os.Stat(child); !os.IsNotExist(err) {
|
||||
t.Fatalf("expected child to be removed, err=%v", err)
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,7 @@ func displayPath(path string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
// truncateMiddle truncates string in the middle, keeping head and tail.
|
||||
// truncateMiddle trims the middle, keeping head and tail.
|
||||
func truncateMiddle(s string, maxWidth int) string {
|
||||
runes := []rune(s)
|
||||
currentWidth := displayWidth(s)
|
||||
@@ -27,9 +27,7 @@ func truncateMiddle(s string, maxWidth int) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Reserve 3 width for "..."
|
||||
if maxWidth < 10 {
|
||||
// Simple truncation for very small width
|
||||
width := 0
|
||||
for i, r := range runes {
|
||||
width += runeWidth(r)
|
||||
@@ -40,11 +38,9 @@ func truncateMiddle(s string, maxWidth int) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Keep more of the tail (filename usually more important)
|
||||
targetHeadWidth := (maxWidth - 3) / 3
|
||||
targetTailWidth := maxWidth - 3 - targetHeadWidth
|
||||
|
||||
// Find head cutoff point based on display width
|
||||
headWidth := 0
|
||||
headIdx := 0
|
||||
for i, r := range runes {
|
||||
@@ -56,7 +52,6 @@ func truncateMiddle(s string, maxWidth int) string {
|
||||
headIdx = i + 1
|
||||
}
|
||||
|
||||
// Find tail cutoff point
|
||||
tailWidth := 0
|
||||
tailIdx := len(runes)
|
||||
for i := len(runes) - 1; i >= 0; i-- {
|
||||
@@ -108,7 +103,6 @@ func coloredProgressBar(value, max int64, percent float64) string {
|
||||
filled = barWidth
|
||||
}
|
||||
|
||||
// Choose color based on percentage
|
||||
var barColor string
|
||||
if percent >= 50 {
|
||||
barColor = colorRed
|
||||
@@ -142,12 +136,24 @@ func coloredProgressBar(value, max int64, percent float64) string {
|
||||
return bar + colorReset
|
||||
}
|
||||
|
||||
// Calculate display width considering CJK characters.
|
||||
// runeWidth returns display width for wide characters and emoji.
|
||||
func runeWidth(r rune) int {
|
||||
if r >= 0x4E00 && r <= 0x9FFF ||
|
||||
r >= 0x3400 && r <= 0x4DBF ||
|
||||
r >= 0xAC00 && r <= 0xD7AF ||
|
||||
r >= 0xFF00 && r <= 0xFFEF {
|
||||
if r >= 0x4E00 && r <= 0x9FFF || // CJK Unified Ideographs
|
||||
r >= 0x3400 && r <= 0x4DBF || // CJK Extension A
|
||||
r >= 0x20000 && r <= 0x2A6DF || // CJK Extension B
|
||||
r >= 0x2A700 && r <= 0x2B73F || // CJK Extension C
|
||||
r >= 0x2B740 && r <= 0x2B81F || // CJK Extension D
|
||||
r >= 0x2B820 && r <= 0x2CEAF || // CJK Extension E
|
||||
r >= 0x3040 && r <= 0x30FF || // Hiragana and Katakana
|
||||
r >= 0x31F0 && r <= 0x31FF || // Katakana Phonetic Extensions
|
||||
r >= 0xAC00 && r <= 0xD7AF || // Hangul Syllables
|
||||
r >= 0xFF00 && r <= 0xFFEF || // Fullwidth Forms
|
||||
r >= 0x1F300 && r <= 0x1F6FF || // Miscellaneous Symbols and Pictographs (includes Transport)
|
||||
r >= 0x1F900 && r <= 0x1F9FF || // Supplemental Symbols and Pictographs
|
||||
r >= 0x2600 && r <= 0x26FF || // Miscellaneous Symbols
|
||||
r >= 0x2700 && r <= 0x27BF || // Dingbats
|
||||
r >= 0xFE10 && r <= 0xFE1F || // Vertical Forms
|
||||
r >= 0x1F000 && r <= 0x1F02F { // Mahjong Tiles
|
||||
return 2
|
||||
}
|
||||
return 1
|
||||
@@ -161,9 +167,26 @@ func displayWidth(s string) int {
|
||||
return width
|
||||
}
|
||||
|
||||
// calculateNameWidth computes name column width from terminal width.
|
||||
func calculateNameWidth(termWidth int) int {
|
||||
const fixedWidth = 61
|
||||
available := termWidth - fixedWidth
|
||||
|
||||
if available < 24 {
|
||||
return 24
|
||||
}
|
||||
if available > 60 {
|
||||
return 60
|
||||
}
|
||||
return available
|
||||
}
|
||||
|
||||
func trimName(name string) string {
|
||||
return trimNameWithWidth(name, 45) // Default width for backward compatibility
|
||||
}
|
||||
|
||||
func trimNameWithWidth(name string, maxWidth int) string {
|
||||
const (
|
||||
maxWidth = 28
|
||||
ellipsis = "..."
|
||||
ellipsisWidth = 3
|
||||
)
|
||||
@@ -202,7 +225,7 @@ func padName(name string, targetWidth int) string {
|
||||
return name + strings.Repeat(" ", targetWidth-currentWidth)
|
||||
}
|
||||
|
||||
// formatUnusedTime formats the time since last access in a compact way.
|
||||
// formatUnusedTime formats time since last access.
|
||||
func formatUnusedTime(lastAccess time.Time) string {
|
||||
if lastAccess.IsZero() {
|
||||
return ""
|
||||
|
||||
309
cmd/analyze/format_test.go
Normal file
309
cmd/analyze/format_test.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRuneWidth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input rune
|
||||
want int
|
||||
}{
|
||||
{"ASCII letter", 'a', 1},
|
||||
{"ASCII digit", '5', 1},
|
||||
{"Chinese character", '中', 2},
|
||||
{"Japanese hiragana", 'あ', 2},
|
||||
{"Korean hangul", '한', 2},
|
||||
{"CJK ideograph", '語', 2},
|
||||
{"Full-width number", '1', 2},
|
||||
{"ASCII space", ' ', 1},
|
||||
{"Tab", '\t', 1},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := runeWidth(tt.input); got != tt.want {
|
||||
t.Errorf("runeWidth(%q) = %d, want %d", tt.input, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDisplayWidth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want int
|
||||
}{
|
||||
{"Empty string", "", 0},
|
||||
{"ASCII only", "hello", 5},
|
||||
{"Chinese only", "你好", 4},
|
||||
{"Mixed ASCII and CJK", "hello世界", 9}, // 5 + 4
|
||||
{"Path with CJK", "/Users/张三/文件", 16}, // 7 (ASCII) + 4 (张三) + 4 (文件) + 1 (/) = 16
|
||||
{"Full-width chars", "123", 6},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := displayWidth(tt.input); got != tt.want {
|
||||
t.Errorf("displayWidth(%q) = %d, want %d", tt.input, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHumanizeBytes(t *testing.T) {
|
||||
tests := []struct {
|
||||
input int64
|
||||
want string
|
||||
}{
|
||||
{-100, "0 B"},
|
||||
{0, "0 B"},
|
||||
{512, "512 B"},
|
||||
{1023, "1023 B"},
|
||||
{1024, "1.0 KB"},
|
||||
{1536, "1.5 KB"},
|
||||
{10240, "10.0 KB"},
|
||||
{1048576, "1.0 MB"},
|
||||
{1572864, "1.5 MB"},
|
||||
{1073741824, "1.0 GB"},
|
||||
{1099511627776, "1.0 TB"},
|
||||
{1125899906842624, "1.0 PB"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := humanizeBytes(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("humanizeBytes(%d) = %q, want %q", tt.input, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatNumber(t *testing.T) {
|
||||
tests := []struct {
|
||||
input int64
|
||||
want string
|
||||
}{
|
||||
{0, "0"},
|
||||
{500, "500"},
|
||||
{999, "999"},
|
||||
{1000, "1.0k"},
|
||||
{1500, "1.5k"},
|
||||
{999999, "1000.0k"},
|
||||
{1000000, "1.0M"},
|
||||
{1500000, "1.5M"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := formatNumber(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("formatNumber(%d) = %q, want %q", tt.input, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTruncateMiddle(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
maxWidth int
|
||||
check func(t *testing.T, result string)
|
||||
}{
|
||||
{
|
||||
name: "No truncation needed",
|
||||
input: "short",
|
||||
maxWidth: 10,
|
||||
check: func(t *testing.T, result string) {
|
||||
if result != "short" {
|
||||
t.Errorf("Should not truncate short string, got %q", result)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Truncate long ASCII",
|
||||
input: "verylongfilename.txt",
|
||||
maxWidth: 15,
|
||||
check: func(t *testing.T, result string) {
|
||||
if !strings.Contains(result, "...") {
|
||||
t.Errorf("Truncated string should contain '...', got %q", result)
|
||||
}
|
||||
if displayWidth(result) > 15 {
|
||||
t.Errorf("Truncated width %d exceeds max %d", displayWidth(result), 15)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Truncate with CJK characters",
|
||||
input: "非常长的中文文件名称.txt",
|
||||
maxWidth: 20,
|
||||
check: func(t *testing.T, result string) {
|
||||
if !strings.Contains(result, "...") {
|
||||
t.Errorf("Should truncate CJK string, got %q", result)
|
||||
}
|
||||
if displayWidth(result) > 20 {
|
||||
t.Errorf("Truncated width %d exceeds max %d", displayWidth(result), 20)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Very small width",
|
||||
input: "longname",
|
||||
maxWidth: 5,
|
||||
check: func(t *testing.T, result string) {
|
||||
if displayWidth(result) > 5 {
|
||||
t.Errorf("Width %d exceeds max %d", displayWidth(result), 5)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := truncateMiddle(tt.input, tt.maxWidth)
|
||||
tt.check(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDisplayPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func() string
|
||||
check func(t *testing.T, result string)
|
||||
}{
|
||||
{
|
||||
name: "Replace home directory",
|
||||
setup: func() string {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
return home + "/Documents/file.txt"
|
||||
},
|
||||
check: func(t *testing.T, result string) {
|
||||
if !strings.HasPrefix(result, "~/") {
|
||||
t.Errorf("Expected path to start with ~/, got %q", result)
|
||||
}
|
||||
if !strings.HasSuffix(result, "Documents/file.txt") {
|
||||
t.Errorf("Expected path to end with Documents/file.txt, got %q", result)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Keep absolute path outside home",
|
||||
setup: func() string {
|
||||
t.Setenv("HOME", "/Users/test")
|
||||
return "/var/log/system.log"
|
||||
},
|
||||
check: func(t *testing.T, result string) {
|
||||
if result != "/var/log/system.log" {
|
||||
t.Errorf("Expected unchanged path, got %q", result)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
path := tt.setup()
|
||||
result := displayPath(path)
|
||||
tt.check(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPadName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
targetWidth int
|
||||
wantWidth int
|
||||
}{
|
||||
{"Pad ASCII", "test", 10, 10},
|
||||
{"No padding needed", "longname", 5, 8},
|
||||
{"Pad CJK", "中文", 10, 10},
|
||||
{"Mixed CJK and ASCII", "hello世", 15, 15},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := padName(tt.input, tt.targetWidth)
|
||||
gotWidth := displayWidth(result)
|
||||
if gotWidth < tt.wantWidth && displayWidth(tt.input) < tt.targetWidth {
|
||||
t.Errorf("padName(%q, %d) width = %d, want >= %d", tt.input, tt.targetWidth, gotWidth, tt.wantWidth)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrimNameWithWidth(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
maxWidth int
|
||||
check func(t *testing.T, result string)
|
||||
}{
|
||||
{
|
||||
name: "Trim ASCII name",
|
||||
input: "verylongfilename.txt",
|
||||
maxWidth: 10,
|
||||
check: func(t *testing.T, result string) {
|
||||
if displayWidth(result) > 10 {
|
||||
t.Errorf("Width exceeds max: %d > 10", displayWidth(result))
|
||||
}
|
||||
if !strings.HasSuffix(result, "...") {
|
||||
t.Errorf("Expected ellipsis, got %q", result)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Trim CJK name",
|
||||
input: "很长的文件名称.txt",
|
||||
maxWidth: 12,
|
||||
check: func(t *testing.T, result string) {
|
||||
if displayWidth(result) > 12 {
|
||||
t.Errorf("Width exceeds max: %d > 12", displayWidth(result))
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "No trimming needed",
|
||||
input: "short.txt",
|
||||
maxWidth: 20,
|
||||
check: func(t *testing.T, result string) {
|
||||
if result != "short.txt" {
|
||||
t.Errorf("Should not trim, got %q", result)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := trimNameWithWidth(tt.input, tt.maxWidth)
|
||||
tt.check(t, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalculateNameWidth(t *testing.T) {
|
||||
tests := []struct {
|
||||
termWidth int
|
||||
wantMin int
|
||||
wantMax int
|
||||
}{
|
||||
{80, 19, 60}, // 80 - 61 = 19
|
||||
{120, 59, 60}, // 120 - 61 = 59
|
||||
{200, 60, 60}, // Capped at 60
|
||||
{70, 24, 60}, // Below minimum, use 24
|
||||
{50, 24, 60}, // Very small, use minimum
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := calculateNameWidth(tt.termWidth)
|
||||
if got < tt.wantMin || got > tt.wantMax {
|
||||
t.Errorf("calculateNameWidth(%d) = %d, want between %d and %d",
|
||||
tt.termWidth, got, tt.wantMin, tt.wantMax)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,10 @@
|
||||
package main
|
||||
|
||||
// entryHeap implements heap.Interface for a min-heap of dirEntry (sorted by Size)
|
||||
// Since we want Top N Largest, we use a Min Heap of size N.
|
||||
// When adding a new item:
|
||||
// 1. If heap size < N: push
|
||||
// 2. If heap size == N and item > min (root): pop min, push item
|
||||
// The heap will thus maintain the largest N items.
|
||||
// entryHeap is a min-heap of dirEntry used to keep Top N largest entries.
|
||||
type entryHeap []dirEntry
|
||||
|
||||
func (h entryHeap) Len() int { return len(h) }
|
||||
func (h entryHeap) Less(i, j int) bool { return h[i].Size < h[j].Size } // Min-heap based on Size
|
||||
func (h entryHeap) Less(i, j int) bool { return h[i].Size < h[j].Size }
|
||||
func (h entryHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||
|
||||
func (h *entryHeap) Push(x interface{}) {
|
||||
@@ -24,7 +19,7 @@ func (h *entryHeap) Pop() interface{} {
|
||||
return x
|
||||
}
|
||||
|
||||
// largeFileHeap implements heap.Interface for fileEntry
|
||||
// largeFileHeap is a min-heap for fileEntry.
|
||||
type largeFileHeap []fileEntry
|
||||
|
||||
func (h largeFileHeap) Len() int { return len(h) }
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -111,6 +112,8 @@ type model struct {
|
||||
overviewScanningSet map[string]bool // Track which paths are currently being scanned
|
||||
width int // Terminal width
|
||||
height int // Terminal height
|
||||
multiSelected map[string]bool // Track multi-selected items by path (safer than index)
|
||||
largeMultiSelected map[string]bool // Track multi-selected large files by path (safer than index)
|
||||
}
|
||||
|
||||
func (m model) inOverviewMode() bool {
|
||||
@@ -127,7 +130,6 @@ func main() {
|
||||
var isOverview bool
|
||||
|
||||
if target == "" {
|
||||
// Default to overview mode
|
||||
isOverview = true
|
||||
abs = "/"
|
||||
} else {
|
||||
@@ -140,8 +142,7 @@ func main() {
|
||||
isOverview = false
|
||||
}
|
||||
|
||||
// Prefetch overview cache in background (non-blocking)
|
||||
// Use context with timeout to prevent hanging
|
||||
// Warm overview cache in background.
|
||||
prefetchCtx, prefetchCancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer prefetchCancel()
|
||||
go prefetchOverviewCache(prefetchCtx)
|
||||
@@ -177,9 +178,10 @@ func newModel(path string, isOverview bool) model {
|
||||
overviewCurrentPath: &overviewCurrentPath,
|
||||
overviewSizeCache: make(map[string]int64),
|
||||
overviewScanningSet: make(map[string]bool),
|
||||
multiSelected: make(map[string]bool),
|
||||
largeMultiSelected: make(map[string]bool),
|
||||
}
|
||||
|
||||
// In overview mode, create shortcut entries
|
||||
if isOverview {
|
||||
m.scanning = false
|
||||
m.hydrateOverviewEntries()
|
||||
@@ -200,11 +202,14 @@ func createOverviewEntries() []dirEntry {
|
||||
home := os.Getenv("HOME")
|
||||
entries := []dirEntry{}
|
||||
|
||||
// Separate Home and ~/Library to avoid double counting.
|
||||
if home != "" {
|
||||
entries = append(entries,
|
||||
dirEntry{Name: "Home (~)", Path: home, IsDir: true, Size: -1},
|
||||
dirEntry{Name: "Library (~/Library)", Path: filepath.Join(home, "Library"), IsDir: true, Size: -1},
|
||||
)
|
||||
entries = append(entries, dirEntry{Name: "Home", Path: home, IsDir: true, Size: -1})
|
||||
|
||||
userLibrary := filepath.Join(home, "Library")
|
||||
if _, err := os.Stat(userLibrary); err == nil {
|
||||
entries = append(entries, dirEntry{Name: "App Library", Path: userLibrary, IsDir: true, Size: -1})
|
||||
}
|
||||
}
|
||||
|
||||
entries = append(entries,
|
||||
@@ -212,7 +217,7 @@ func createOverviewEntries() []dirEntry {
|
||||
dirEntry{Name: "System Library", Path: "/Library", IsDir: true, Size: -1},
|
||||
)
|
||||
|
||||
// Add Volumes shortcut only when it contains real mounted folders (e.g., external disks)
|
||||
// Include Volumes only when real mounts exist.
|
||||
if hasUsefulVolumeMounts("/Volumes") {
|
||||
entries = append(entries, dirEntry{Name: "Volumes", Path: "/Volumes", IsDir: true, Size: -1})
|
||||
}
|
||||
@@ -228,7 +233,6 @@ func hasUsefulVolumeMounts(path string) bool {
|
||||
|
||||
for _, entry := range entries {
|
||||
name := entry.Name()
|
||||
// Skip hidden control entries for Spotlight/TimeMachine etc.
|
||||
if strings.HasPrefix(name, ".") {
|
||||
continue
|
||||
}
|
||||
@@ -265,12 +269,18 @@ func (m *model) hydrateOverviewEntries() {
|
||||
m.totalSize = sumKnownEntrySizes(m.entries)
|
||||
}
|
||||
|
||||
func (m *model) sortOverviewEntriesBySize() {
|
||||
// Stable sort by size.
|
||||
sort.SliceStable(m.entries, func(i, j int) bool {
|
||||
return m.entries[i].Size > m.entries[j].Size
|
||||
})
|
||||
}
|
||||
|
||||
func (m *model) scheduleOverviewScans() tea.Cmd {
|
||||
if !m.inOverviewMode() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find pending entries (not scanned and not currently scanning)
|
||||
var pendingIndices []int
|
||||
for i, entry := range m.entries {
|
||||
if entry.Size < 0 && !m.overviewScanningSet[entry.Path] {
|
||||
@@ -281,16 +291,15 @@ func (m *model) scheduleOverviewScans() tea.Cmd {
|
||||
}
|
||||
}
|
||||
|
||||
// No more work to do
|
||||
if len(pendingIndices) == 0 {
|
||||
m.overviewScanning = false
|
||||
if !hasPendingOverviewEntries(m.entries) {
|
||||
m.sortOverviewEntriesBySize()
|
||||
m.status = "Ready"
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark all as scanning
|
||||
var cmds []tea.Cmd
|
||||
for _, idx := range pendingIndices {
|
||||
entry := m.entries[idx]
|
||||
@@ -341,7 +350,6 @@ func (m model) Init() tea.Cmd {
|
||||
|
||||
func (m model) scanCmd(path string) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
// Try to load from persistent cache first
|
||||
if cached, err := loadCacheFromDisk(path); err == nil {
|
||||
result := scanResult{
|
||||
Entries: cached.Entries,
|
||||
@@ -351,8 +359,6 @@ func (m model) scanCmd(path string) tea.Cmd {
|
||||
return scanResultMsg{result: result, err: nil}
|
||||
}
|
||||
|
||||
// Use singleflight to avoid duplicate scans of the same path
|
||||
// If multiple goroutines request the same path, only one scan will be performed
|
||||
v, err, _ := scanGroup.Do(path, func() (interface{}, error) {
|
||||
return scanPathConcurrent(path, m.filesScanned, m.dirsScanned, m.bytesScanned, m.currentPath)
|
||||
})
|
||||
@@ -363,10 +369,8 @@ func (m model) scanCmd(path string) tea.Cmd {
|
||||
|
||||
result := v.(scanResult)
|
||||
|
||||
// Save to persistent cache asynchronously with error logging
|
||||
go func(p string, r scanResult) {
|
||||
if err := saveCacheToDisk(p, r); err != nil {
|
||||
// Log error but don't fail the scan
|
||||
_ = err // Cache save failure is not critical
|
||||
}
|
||||
}(path, result)
|
||||
@@ -392,6 +396,8 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
case deleteProgressMsg:
|
||||
if msg.done {
|
||||
m.deleting = false
|
||||
m.multiSelected = make(map[string]bool)
|
||||
m.largeMultiSelected = make(map[string]bool)
|
||||
if msg.err != nil {
|
||||
m.status = fmt.Sprintf("Failed to delete: %v", msg.err)
|
||||
} else {
|
||||
@@ -401,7 +407,6 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
invalidateCache(m.path)
|
||||
m.status = fmt.Sprintf("Deleted %d items", msg.count)
|
||||
// Mark all caches as dirty
|
||||
for i := range m.history {
|
||||
m.history[i].Dirty = true
|
||||
}
|
||||
@@ -410,9 +415,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
entry.Dirty = true
|
||||
m.cache[path] = entry
|
||||
}
|
||||
// Refresh the view
|
||||
m.scanning = true
|
||||
// Reset scan counters for rescan
|
||||
atomic.StoreInt64(m.filesScanned, 0)
|
||||
atomic.StoreInt64(m.dirsScanned, 0)
|
||||
atomic.StoreInt64(m.bytesScanned, 0)
|
||||
@@ -429,7 +432,6 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
m.status = fmt.Sprintf("Scan failed: %v", msg.err)
|
||||
return m, nil
|
||||
}
|
||||
// Filter out 0-byte items for cleaner view
|
||||
filteredEntries := make([]dirEntry, 0, len(msg.result.Entries))
|
||||
for _, e := range msg.result.Entries {
|
||||
if e.Size > 0 {
|
||||
@@ -454,7 +456,6 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
return m, nil
|
||||
case overviewSizeMsg:
|
||||
// Remove from scanning set
|
||||
delete(m.overviewScanningSet, msg.Path)
|
||||
|
||||
if msg.Err == nil {
|
||||
@@ -465,7 +466,6 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
|
||||
if m.inOverviewMode() {
|
||||
// Update entry with result
|
||||
for i := range m.entries {
|
||||
if m.entries[i].Path == msg.Path {
|
||||
if msg.Err == nil {
|
||||
@@ -478,18 +478,15 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
m.totalSize = sumKnownEntrySizes(m.entries)
|
||||
|
||||
// Show error briefly if any
|
||||
if msg.Err != nil {
|
||||
m.status = fmt.Sprintf("Unable to measure %s: %v", displayPath(msg.Path), msg.Err)
|
||||
}
|
||||
|
||||
// Schedule next batch of scans
|
||||
cmd := m.scheduleOverviewScans()
|
||||
return m, cmd
|
||||
}
|
||||
return m, nil
|
||||
case tickMsg:
|
||||
// Keep spinner running if scanning or deleting or if there are pending overview items
|
||||
hasPending := false
|
||||
if m.inOverviewMode() {
|
||||
for _, entry := range m.entries {
|
||||
@@ -501,7 +498,6 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
if m.scanning || m.deleting || (m.inOverviewMode() && (m.overviewScanning || hasPending)) {
|
||||
m.spinner = (m.spinner + 1) % len(spinnerFrames)
|
||||
// Update delete progress status
|
||||
if m.deleting && m.deleteCount != nil {
|
||||
count := atomic.LoadInt64(m.deleteCount)
|
||||
if count > 0 {
|
||||
@@ -517,33 +513,56 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
|
||||
func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
// Handle delete confirmation
|
||||
// Delete confirm flow.
|
||||
if m.deleteConfirm {
|
||||
switch msg.String() {
|
||||
case "delete", "backspace":
|
||||
// Confirm delete - start async deletion
|
||||
if m.deleteTarget != nil {
|
||||
m.deleteConfirm = false
|
||||
m.deleting = true
|
||||
var deleteCount int64
|
||||
m.deleteCount = &deleteCount
|
||||
targetPath := m.deleteTarget.Path
|
||||
targetName := m.deleteTarget.Name
|
||||
m.deleteTarget = nil
|
||||
m.status = fmt.Sprintf("Deleting %s...", targetName)
|
||||
m.deleteConfirm = false
|
||||
m.deleting = true
|
||||
var deleteCount int64
|
||||
m.deleteCount = &deleteCount
|
||||
|
||||
// Collect paths (safer than indices).
|
||||
var pathsToDelete []string
|
||||
if m.showLargeFiles {
|
||||
if len(m.largeMultiSelected) > 0 {
|
||||
for path := range m.largeMultiSelected {
|
||||
pathsToDelete = append(pathsToDelete, path)
|
||||
}
|
||||
} else if m.deleteTarget != nil {
|
||||
pathsToDelete = append(pathsToDelete, m.deleteTarget.Path)
|
||||
}
|
||||
} else {
|
||||
if len(m.multiSelected) > 0 {
|
||||
for path := range m.multiSelected {
|
||||
pathsToDelete = append(pathsToDelete, path)
|
||||
}
|
||||
} else if m.deleteTarget != nil {
|
||||
pathsToDelete = append(pathsToDelete, m.deleteTarget.Path)
|
||||
}
|
||||
}
|
||||
|
||||
m.deleteTarget = nil
|
||||
if len(pathsToDelete) == 0 {
|
||||
m.deleting = false
|
||||
m.status = "Nothing to delete"
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if len(pathsToDelete) == 1 {
|
||||
targetPath := pathsToDelete[0]
|
||||
m.status = fmt.Sprintf("Deleting %s...", filepath.Base(targetPath))
|
||||
return m, tea.Batch(deletePathCmd(targetPath, m.deleteCount), tickCmd())
|
||||
}
|
||||
m.deleteConfirm = false
|
||||
m.deleteTarget = nil
|
||||
return m, nil
|
||||
|
||||
m.status = fmt.Sprintf("Deleting %d items...", len(pathsToDelete))
|
||||
return m, tea.Batch(deleteMultiplePathsCmd(pathsToDelete, m.deleteCount), tickCmd())
|
||||
case "esc", "q":
|
||||
// Cancel delete with ESC or Q
|
||||
m.status = "Cancelled"
|
||||
m.deleteConfirm = false
|
||||
m.deleteTarget = nil
|
||||
return m, nil
|
||||
default:
|
||||
// Ignore other keys - keep showing confirmation
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
@@ -598,7 +617,6 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
return m, nil
|
||||
}
|
||||
if len(m.history) == 0 {
|
||||
// Return to overview if at top level
|
||||
if !m.inOverviewMode() {
|
||||
return m, m.switchToOverviewMode()
|
||||
}
|
||||
@@ -613,7 +631,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
m.largeOffset = last.LargeOffset
|
||||
m.isOverview = last.IsOverview
|
||||
if last.Dirty {
|
||||
// If returning to overview mode, refresh overview entries instead of scanning
|
||||
// On overview return, refresh cached entries.
|
||||
if last.IsOverview {
|
||||
m.hydrateOverviewEntries()
|
||||
m.totalSize = sumKnownEntrySizes(m.entries)
|
||||
@@ -646,13 +664,14 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
m.scanning = false
|
||||
return m, nil
|
||||
case "r":
|
||||
m.multiSelected = make(map[string]bool)
|
||||
m.largeMultiSelected = make(map[string]bool)
|
||||
|
||||
if m.inOverviewMode() {
|
||||
// In overview mode, clear cache and re-scan known entries
|
||||
m.overviewSizeCache = make(map[string]int64)
|
||||
m.overviewScanningSet = make(map[string]bool)
|
||||
m.hydrateOverviewEntries() // Reset sizes to pending
|
||||
|
||||
// Reset all entries to pending state for visual feedback
|
||||
for i := range m.entries {
|
||||
m.entries[i].Size = -1
|
||||
}
|
||||
@@ -663,11 +682,9 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
return m, tea.Batch(m.scheduleOverviewScans(), tickCmd())
|
||||
}
|
||||
|
||||
// Normal mode: Invalidate cache before rescanning
|
||||
invalidateCache(m.path)
|
||||
m.status = "Refreshing..."
|
||||
m.scanning = true
|
||||
// Reset scan counters for refresh
|
||||
atomic.StoreInt64(m.filesScanned, 0)
|
||||
atomic.StoreInt64(m.dirsScanned, 0)
|
||||
atomic.StoreInt64(m.bytesScanned, 0)
|
||||
@@ -676,19 +693,63 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
|
||||
case "t", "T":
|
||||
// Don't allow switching to large files view in overview mode
|
||||
if !m.inOverviewMode() {
|
||||
m.showLargeFiles = !m.showLargeFiles
|
||||
if m.showLargeFiles {
|
||||
m.largeSelected = 0
|
||||
m.largeOffset = 0
|
||||
m.largeMultiSelected = make(map[string]bool)
|
||||
} else {
|
||||
m.multiSelected = make(map[string]bool)
|
||||
}
|
||||
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
|
||||
}
|
||||
case "o":
|
||||
// Open selected entry
|
||||
// Open selected entries (multi-select aware).
|
||||
const maxBatchOpen = 20
|
||||
if m.showLargeFiles {
|
||||
if len(m.largeFiles) > 0 {
|
||||
selected := m.largeFiles[m.largeSelected]
|
||||
if len(m.largeMultiSelected) > 0 {
|
||||
count := len(m.largeMultiSelected)
|
||||
if count > maxBatchOpen {
|
||||
m.status = fmt.Sprintf("Too many items to open (max %d, selected %d)", maxBatchOpen, count)
|
||||
return m, nil
|
||||
}
|
||||
for path := range m.largeMultiSelected {
|
||||
go func(p string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
_ = exec.CommandContext(ctx, "open", p).Run()
|
||||
}(path)
|
||||
}
|
||||
m.status = fmt.Sprintf("Opening %d items...", count)
|
||||
} else {
|
||||
selected := m.largeFiles[m.largeSelected]
|
||||
go func(path string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
_ = exec.CommandContext(ctx, "open", path).Run()
|
||||
}(selected.Path)
|
||||
m.status = fmt.Sprintf("Opening %s...", selected.Name)
|
||||
}
|
||||
}
|
||||
} else if len(m.entries) > 0 {
|
||||
if len(m.multiSelected) > 0 {
|
||||
count := len(m.multiSelected)
|
||||
if count > maxBatchOpen {
|
||||
m.status = fmt.Sprintf("Too many items to open (max %d, selected %d)", maxBatchOpen, count)
|
||||
return m, nil
|
||||
}
|
||||
for path := range m.multiSelected {
|
||||
go func(p string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
_ = exec.CommandContext(ctx, "open", p).Run()
|
||||
}(path)
|
||||
}
|
||||
m.status = fmt.Sprintf("Opening %d items...", count)
|
||||
} else {
|
||||
selected := m.entries[m.selected]
|
||||
go func(path string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
@@ -696,20 +757,53 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
}(selected.Path)
|
||||
m.status = fmt.Sprintf("Opening %s...", selected.Name)
|
||||
}
|
||||
} else if len(m.entries) > 0 {
|
||||
selected := m.entries[m.selected]
|
||||
go func(path string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
_ = exec.CommandContext(ctx, "open", path).Run()
|
||||
}(selected.Path)
|
||||
m.status = fmt.Sprintf("Opening %s...", selected.Name)
|
||||
}
|
||||
case "f", "F":
|
||||
// Reveal selected entry in Finder
|
||||
// Reveal in Finder (multi-select aware).
|
||||
const maxBatchReveal = 20
|
||||
if m.showLargeFiles {
|
||||
if len(m.largeFiles) > 0 {
|
||||
selected := m.largeFiles[m.largeSelected]
|
||||
if len(m.largeMultiSelected) > 0 {
|
||||
count := len(m.largeMultiSelected)
|
||||
if count > maxBatchReveal {
|
||||
m.status = fmt.Sprintf("Too many items to reveal (max %d, selected %d)", maxBatchReveal, count)
|
||||
return m, nil
|
||||
}
|
||||
for path := range m.largeMultiSelected {
|
||||
go func(p string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
_ = exec.CommandContext(ctx, "open", "-R", p).Run()
|
||||
}(path)
|
||||
}
|
||||
m.status = fmt.Sprintf("Showing %d items in Finder...", count)
|
||||
} else {
|
||||
selected := m.largeFiles[m.largeSelected]
|
||||
go func(path string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
_ = exec.CommandContext(ctx, "open", "-R", path).Run()
|
||||
}(selected.Path)
|
||||
m.status = fmt.Sprintf("Showing %s in Finder...", selected.Name)
|
||||
}
|
||||
}
|
||||
} else if len(m.entries) > 0 {
|
||||
if len(m.multiSelected) > 0 {
|
||||
count := len(m.multiSelected)
|
||||
if count > maxBatchReveal {
|
||||
m.status = fmt.Sprintf("Too many items to reveal (max %d, selected %d)", maxBatchReveal, count)
|
||||
return m, nil
|
||||
}
|
||||
for path := range m.multiSelected {
|
||||
go func(p string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
_ = exec.CommandContext(ctx, "open", "-R", p).Run()
|
||||
}(path)
|
||||
}
|
||||
m.status = fmt.Sprintf("Showing %d items in Finder...", count)
|
||||
} else {
|
||||
selected := m.entries[m.selected]
|
||||
go func(path string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
@@ -717,32 +811,110 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
}(selected.Path)
|
||||
m.status = fmt.Sprintf("Showing %s in Finder...", selected.Name)
|
||||
}
|
||||
} else if len(m.entries) > 0 {
|
||||
selected := m.entries[m.selected]
|
||||
go func(path string) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), openCommandTimeout)
|
||||
defer cancel()
|
||||
_ = exec.CommandContext(ctx, "open", "-R", path).Run()
|
||||
}(selected.Path)
|
||||
m.status = fmt.Sprintf("Showing %s in Finder...", selected.Name)
|
||||
}
|
||||
case " ":
|
||||
// Toggle multi-select (paths as keys).
|
||||
if m.showLargeFiles {
|
||||
if len(m.largeFiles) > 0 && m.largeSelected < len(m.largeFiles) {
|
||||
if m.largeMultiSelected == nil {
|
||||
m.largeMultiSelected = make(map[string]bool)
|
||||
}
|
||||
selectedPath := m.largeFiles[m.largeSelected].Path
|
||||
if m.largeMultiSelected[selectedPath] {
|
||||
delete(m.largeMultiSelected, selectedPath)
|
||||
} else {
|
||||
m.largeMultiSelected[selectedPath] = true
|
||||
}
|
||||
count := len(m.largeMultiSelected)
|
||||
if count > 0 {
|
||||
var totalSize int64
|
||||
for path := range m.largeMultiSelected {
|
||||
for _, file := range m.largeFiles {
|
||||
if file.Path == path {
|
||||
totalSize += file.Size
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
m.status = fmt.Sprintf("%d selected (%s)", count, humanizeBytes(totalSize))
|
||||
} else {
|
||||
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
|
||||
}
|
||||
}
|
||||
} else if len(m.entries) > 0 && !m.inOverviewMode() && m.selected < len(m.entries) {
|
||||
if m.multiSelected == nil {
|
||||
m.multiSelected = make(map[string]bool)
|
||||
}
|
||||
selectedPath := m.entries[m.selected].Path
|
||||
if m.multiSelected[selectedPath] {
|
||||
delete(m.multiSelected, selectedPath)
|
||||
} else {
|
||||
m.multiSelected[selectedPath] = true
|
||||
}
|
||||
count := len(m.multiSelected)
|
||||
if count > 0 {
|
||||
var totalSize int64
|
||||
for path := range m.multiSelected {
|
||||
for _, entry := range m.entries {
|
||||
if entry.Path == path {
|
||||
totalSize += entry.Size
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
m.status = fmt.Sprintf("%d selected (%s)", count, humanizeBytes(totalSize))
|
||||
} else {
|
||||
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
|
||||
}
|
||||
}
|
||||
case "delete", "backspace":
|
||||
// Delete selected file or directory
|
||||
if m.showLargeFiles {
|
||||
if len(m.largeFiles) > 0 {
|
||||
selected := m.largeFiles[m.largeSelected]
|
||||
m.deleteConfirm = true
|
||||
m.deleteTarget = &dirEntry{
|
||||
Name: selected.Name,
|
||||
Path: selected.Path,
|
||||
Size: selected.Size,
|
||||
IsDir: false,
|
||||
if len(m.largeMultiSelected) > 0 {
|
||||
m.deleteConfirm = true
|
||||
for path := range m.largeMultiSelected {
|
||||
for _, file := range m.largeFiles {
|
||||
if file.Path == path {
|
||||
m.deleteTarget = &dirEntry{
|
||||
Name: file.Name,
|
||||
Path: file.Path,
|
||||
Size: file.Size,
|
||||
IsDir: false,
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
break // Only need first one for display
|
||||
}
|
||||
} else if m.largeSelected < len(m.largeFiles) {
|
||||
selected := m.largeFiles[m.largeSelected]
|
||||
m.deleteConfirm = true
|
||||
m.deleteTarget = &dirEntry{
|
||||
Name: selected.Name,
|
||||
Path: selected.Path,
|
||||
Size: selected.Size,
|
||||
IsDir: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if len(m.entries) > 0 && !m.inOverviewMode() {
|
||||
selected := m.entries[m.selected]
|
||||
m.deleteConfirm = true
|
||||
m.deleteTarget = &selected
|
||||
if len(m.multiSelected) > 0 {
|
||||
m.deleteConfirm = true
|
||||
for path := range m.multiSelected {
|
||||
// Resolve entry by path.
|
||||
for i := range m.entries {
|
||||
if m.entries[i].Path == path {
|
||||
m.deleteTarget = &m.entries[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
break // Only need first one for display
|
||||
}
|
||||
} else if m.selected < len(m.entries) {
|
||||
selected := m.entries[m.selected]
|
||||
m.deleteConfirm = true
|
||||
m.deleteTarget = &selected
|
||||
}
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
@@ -766,7 +938,6 @@ func (m *model) switchToOverviewMode() tea.Cmd {
|
||||
m.status = "Ready"
|
||||
return nil
|
||||
}
|
||||
// Start tick to animate spinner while scanning
|
||||
return tea.Batch(cmd, tickCmd())
|
||||
}
|
||||
|
||||
@@ -776,7 +947,6 @@ func (m model) enterSelectedDir() (tea.Model, tea.Cmd) {
|
||||
}
|
||||
selected := m.entries[m.selected]
|
||||
if selected.IsDir {
|
||||
// Always save current state to history (including overview mode)
|
||||
m.history = append(m.history, snapshotFromModel(m))
|
||||
m.path = selected.Path
|
||||
m.selected = 0
|
||||
@@ -784,8 +954,9 @@ func (m model) enterSelectedDir() (tea.Model, tea.Cmd) {
|
||||
m.status = "Scanning..."
|
||||
m.scanning = true
|
||||
m.isOverview = false
|
||||
m.multiSelected = make(map[string]bool)
|
||||
m.largeMultiSelected = make(map[string]bool)
|
||||
|
||||
// Reset scan counters for new scan
|
||||
atomic.StoreInt64(m.filesScanned, 0)
|
||||
atomic.StoreInt64(m.dirsScanned, 0)
|
||||
atomic.StoreInt64(m.bytesScanned, 0)
|
||||
|
||||
@@ -31,16 +31,14 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
|
||||
var total int64
|
||||
|
||||
// Use heaps to track Top N items, drastically reducing memory usage
|
||||
// for directories with millions of files
|
||||
// Keep Top N heaps.
|
||||
entriesHeap := &entryHeap{}
|
||||
heap.Init(entriesHeap)
|
||||
|
||||
largeFilesHeap := &largeFileHeap{}
|
||||
heap.Init(largeFilesHeap)
|
||||
|
||||
// Use worker pool for concurrent directory scanning
|
||||
// For I/O-bound operations, use more workers than CPU count
|
||||
// Worker pool sized for I/O-bound scanning.
|
||||
numWorkers := runtime.NumCPU() * cpuMultiplier
|
||||
if numWorkers < minWorkers {
|
||||
numWorkers = minWorkers
|
||||
@@ -57,17 +55,15 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
sem := make(chan struct{}, numWorkers)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Use channels to collect results without lock contention
|
||||
// Collect results via channels.
|
||||
entryChan := make(chan dirEntry, len(children))
|
||||
largeFileChan := make(chan fileEntry, maxLargeFiles*2)
|
||||
|
||||
// Start goroutines to collect from channels into heaps
|
||||
var collectorWg sync.WaitGroup
|
||||
collectorWg.Add(2)
|
||||
go func() {
|
||||
defer collectorWg.Done()
|
||||
for entry := range entryChan {
|
||||
// Maintain Top N Heap for entries
|
||||
if entriesHeap.Len() < maxEntries {
|
||||
heap.Push(entriesHeap, entry)
|
||||
} else if entry.Size > (*entriesHeap)[0].Size {
|
||||
@@ -79,7 +75,6 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
go func() {
|
||||
defer collectorWg.Done()
|
||||
for file := range largeFileChan {
|
||||
// Maintain Top N Heap for large files
|
||||
if largeFilesHeap.Len() < maxLargeFiles {
|
||||
heap.Push(largeFilesHeap, file)
|
||||
} else if file.Size > (*largeFilesHeap)[0].Size {
|
||||
@@ -90,24 +85,21 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
}()
|
||||
|
||||
isRootDir := root == "/"
|
||||
home := os.Getenv("HOME")
|
||||
isHomeDir := home != "" && root == home
|
||||
|
||||
for _, child := range children {
|
||||
fullPath := filepath.Join(root, child.Name())
|
||||
|
||||
// Skip symlinks to avoid following them into unexpected locations
|
||||
// Use Type() instead of IsDir() to check without following symlinks
|
||||
// Skip symlinks to avoid following unexpected targets.
|
||||
if child.Type()&fs.ModeSymlink != 0 {
|
||||
// For symlinks, check if they point to a directory
|
||||
targetInfo, err := os.Stat(fullPath)
|
||||
isDir := false
|
||||
if err == nil && targetInfo.IsDir() {
|
||||
isDir = true
|
||||
}
|
||||
|
||||
// Get symlink size (we don't effectively count the target size towards parent to avoid double counting,
|
||||
// or we just count the link size itself. Existing logic counts 'size' via getActualFileSize on the link info).
|
||||
// Ideally we just want navigation.
|
||||
// Re-fetching info for link itself if needed, but child.Info() does that.
|
||||
// Count link size only to avoid double-counting targets.
|
||||
info, err := child.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -116,27 +108,56 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
atomic.AddInt64(&total, size)
|
||||
|
||||
entryChan <- dirEntry{
|
||||
Name: child.Name() + " →", // Add arrow to indicate symlink
|
||||
Name: child.Name() + " →",
|
||||
Path: fullPath,
|
||||
Size: size,
|
||||
IsDir: isDir, // Allow navigation if target is directory
|
||||
IsDir: isDir,
|
||||
LastAccess: getLastAccessTimeFromInfo(info),
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if child.IsDir() {
|
||||
// Check if directory should be skipped based on user configuration
|
||||
if defaultSkipDirs[child.Name()] {
|
||||
continue
|
||||
}
|
||||
|
||||
// In root directory, skip system directories completely
|
||||
// Skip system dirs at root.
|
||||
if isRootDir && skipSystemDirs[child.Name()] {
|
||||
continue
|
||||
}
|
||||
|
||||
// For folded directories, calculate size quickly without expanding
|
||||
// ~/Library is scanned separately; reuse cache when possible.
|
||||
if isHomeDir && child.Name() == "Library" {
|
||||
wg.Add(1)
|
||||
go func(name, path string) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
var size int64
|
||||
if cached, err := loadStoredOverviewSize(path); err == nil && cached > 0 {
|
||||
size = cached
|
||||
} else if cached, err := loadCacheFromDisk(path); err == nil {
|
||||
size = cached.TotalSize
|
||||
} else {
|
||||
size = calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
}
|
||||
atomic.AddInt64(&total, size)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
|
||||
entryChan <- dirEntry{
|
||||
Name: name,
|
||||
Path: path,
|
||||
Size: size,
|
||||
IsDir: true,
|
||||
LastAccess: time.Time{},
|
||||
}
|
||||
}(child.Name(), fullPath)
|
||||
continue
|
||||
}
|
||||
|
||||
// Folded dirs: fast size without expanding.
|
||||
if shouldFoldDirWithPath(child.Name(), fullPath) {
|
||||
wg.Add(1)
|
||||
go func(name, path string) {
|
||||
@@ -144,10 +165,8 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
// Try du command first for folded dirs (much faster)
|
||||
size, err := getDirectorySizeFromDu(path)
|
||||
if err != nil || size <= 0 {
|
||||
// Fallback to concurrent walk if du fails
|
||||
size = calculateDirSizeFast(path, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
}
|
||||
atomic.AddInt64(&total, size)
|
||||
@@ -158,13 +177,12 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
Path: path,
|
||||
Size: size,
|
||||
IsDir: true,
|
||||
LastAccess: time.Time{}, // Lazy load when displayed
|
||||
LastAccess: time.Time{},
|
||||
}
|
||||
}(child.Name(), fullPath)
|
||||
continue
|
||||
}
|
||||
|
||||
// Normal directory: full scan with detail
|
||||
wg.Add(1)
|
||||
go func(name, path string) {
|
||||
defer wg.Done()
|
||||
@@ -180,7 +198,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
Path: path,
|
||||
Size: size,
|
||||
IsDir: true,
|
||||
LastAccess: time.Time{}, // Lazy load when displayed
|
||||
LastAccess: time.Time{},
|
||||
}
|
||||
}(child.Name(), fullPath)
|
||||
continue
|
||||
@@ -190,7 +208,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// Get actual disk usage for sparse files and cloud files
|
||||
// Actual disk usage for sparse/cloud files.
|
||||
size := getActualFileSize(fullPath, info)
|
||||
atomic.AddInt64(&total, size)
|
||||
atomic.AddInt64(filesScanned, 1)
|
||||
@@ -203,7 +221,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
IsDir: false,
|
||||
LastAccess: getLastAccessTimeFromInfo(info),
|
||||
}
|
||||
// Only track large files that are not code/text files
|
||||
// Track large files only.
|
||||
if !shouldSkipFileForLargeTracking(fullPath) && size >= minLargeFileSize {
|
||||
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
|
||||
}
|
||||
@@ -211,12 +229,12 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Close channels and wait for collectors to finish
|
||||
// Close channels and wait for collectors.
|
||||
close(entryChan)
|
||||
close(largeFileChan)
|
||||
collectorWg.Wait()
|
||||
|
||||
// Convert Heaps to sorted slices (Descending order)
|
||||
// Convert heaps to sorted slices (descending).
|
||||
entries := make([]dirEntry, entriesHeap.Len())
|
||||
for i := len(entries) - 1; i >= 0; i-- {
|
||||
entries[i] = heap.Pop(entriesHeap).(dirEntry)
|
||||
@@ -227,20 +245,11 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
largeFiles[i] = heap.Pop(largeFilesHeap).(fileEntry)
|
||||
}
|
||||
|
||||
// Try to use Spotlight (mdfind) for faster large file discovery
|
||||
// This is a performance optimization that gracefully falls back to scan results
|
||||
// if Spotlight is unavailable or fails. The fallback is intentionally silent
|
||||
// because users only care about correct results, not the method used.
|
||||
// Use Spotlight for large files when available.
|
||||
if spotlightFiles := findLargeFilesWithSpotlight(root, minLargeFileSize); len(spotlightFiles) > 0 {
|
||||
// Spotlight results are already sorted top N
|
||||
// Use them in place of scanned large files
|
||||
largeFiles = spotlightFiles
|
||||
}
|
||||
|
||||
// Double check sorting consistency (Spotlight returns sorted, but heap pop handles scan results)
|
||||
// If needed, we could re-sort largeFiles, but heap pop ensures ascending, and we filled reverse, so it's Descending.
|
||||
// Spotlight returns Descending. So no extra sort needed for either.
|
||||
|
||||
return scanResult{
|
||||
Entries: entries,
|
||||
LargeFiles: largeFiles,
|
||||
@@ -249,21 +258,16 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
}
|
||||
|
||||
func shouldFoldDirWithPath(name, path string) bool {
|
||||
// Check basic fold list first
|
||||
if foldDirs[name] {
|
||||
return true
|
||||
}
|
||||
|
||||
// Special case: npm cache directories - fold all subdirectories
|
||||
// This includes: .npm/_quick/*, .npm/_cacache/*, .npm/a-z/*, .tnpm/*
|
||||
// Handle npm cache structure.
|
||||
if strings.Contains(path, "/.npm/") || strings.Contains(path, "/.tnpm/") {
|
||||
// Get the parent directory name
|
||||
parent := filepath.Base(filepath.Dir(path))
|
||||
// If parent is a cache folder (_quick, _cacache, etc) or npm dir itself, fold it
|
||||
if parent == ".npm" || parent == ".tnpm" || strings.HasPrefix(parent, "_") {
|
||||
return true
|
||||
}
|
||||
// Also fold single-letter subdirectories (npm cache structure like .npm/a/, .npm/b/)
|
||||
if len(name) == 1 {
|
||||
return true
|
||||
}
|
||||
@@ -277,17 +281,14 @@ func shouldSkipFileForLargeTracking(path string) bool {
|
||||
return skipExtensions[ext]
|
||||
}
|
||||
|
||||
// calculateDirSizeFast performs concurrent directory size calculation using os.ReadDir
|
||||
// This is a faster fallback than filepath.WalkDir when du fails
|
||||
// calculateDirSizeFast performs concurrent dir sizing using os.ReadDir.
|
||||
func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
|
||||
var total int64
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Create context with timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// Limit total concurrency for this walk
|
||||
concurrency := runtime.NumCPU() * 4
|
||||
if concurrency > 64 {
|
||||
concurrency = 64
|
||||
@@ -315,19 +316,16 @@ func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
// Directories: recurse concurrently
|
||||
wg.Add(1)
|
||||
// Capture loop variable
|
||||
subDir := filepath.Join(dirPath, entry.Name())
|
||||
go func(p string) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{} // Acquire token
|
||||
defer func() { <-sem }() // Release token
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
walk(p)
|
||||
}(subDir)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
} else {
|
||||
// Files: process immediately
|
||||
info, err := entry.Info()
|
||||
if err == nil {
|
||||
size := getActualFileSize(filepath.Join(dirPath, entry.Name()), info)
|
||||
@@ -352,9 +350,8 @@ func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *
|
||||
return total
|
||||
}
|
||||
|
||||
// Use Spotlight (mdfind) to quickly find large files in a directory
|
||||
// Use Spotlight (mdfind) to quickly find large files.
|
||||
func findLargeFilesWithSpotlight(root string, minSize int64) []fileEntry {
|
||||
// mdfind query: files >= minSize in the specified directory
|
||||
query := fmt.Sprintf("kMDItemFSSize >= %d", minSize)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), mdlsTimeout)
|
||||
@@ -363,7 +360,6 @@ func findLargeFilesWithSpotlight(root string, minSize int64) []fileEntry {
|
||||
cmd := exec.CommandContext(ctx, "mdfind", "-onlyin", root, query)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// Fallback: mdfind not available or failed
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -375,28 +371,26 @@ func findLargeFilesWithSpotlight(root string, minSize int64) []fileEntry {
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter out code files first (cheapest check, no I/O)
|
||||
// Filter code files first (cheap).
|
||||
if shouldSkipFileForLargeTracking(line) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Filter out files in folded directories (cheap string check)
|
||||
// Filter folded directories (cheap string check).
|
||||
if isInFoldedDir(line) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use Lstat instead of Stat (faster, doesn't follow symlinks)
|
||||
info, err := os.Lstat(line)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if it's a directory or symlink
|
||||
if info.IsDir() || info.Mode()&os.ModeSymlink != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get actual disk usage for sparse files and cloud files
|
||||
// Actual disk usage for sparse/cloud files.
|
||||
actualSize := getActualFileSize(line, info)
|
||||
files = append(files, fileEntry{
|
||||
Name: filepath.Base(line),
|
||||
@@ -405,12 +399,11 @@ func findLargeFilesWithSpotlight(root string, minSize int64) []fileEntry {
|
||||
})
|
||||
}
|
||||
|
||||
// Sort by size (descending)
|
||||
// Sort by size (descending).
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].Size > files[j].Size
|
||||
})
|
||||
|
||||
// Return top N
|
||||
if len(files) > maxLargeFiles {
|
||||
files = files[:maxLargeFiles]
|
||||
}
|
||||
@@ -418,9 +411,8 @@ func findLargeFilesWithSpotlight(root string, minSize int64) []fileEntry {
|
||||
return files
|
||||
}
|
||||
|
||||
// isInFoldedDir checks if a path is inside a folded directory (optimized)
|
||||
// isInFoldedDir checks if a path is inside a folded directory.
|
||||
func isInFoldedDir(path string) bool {
|
||||
// Split path into components for faster checking
|
||||
parts := strings.Split(path, string(os.PathSeparator))
|
||||
for _, part := range parts {
|
||||
if foldDirs[part] {
|
||||
@@ -431,7 +423,6 @@ func isInFoldedDir(path string) bool {
|
||||
}
|
||||
|
||||
func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
|
||||
// Read immediate children
|
||||
children, err := os.ReadDir(root)
|
||||
if err != nil {
|
||||
return 0
|
||||
@@ -440,7 +431,7 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
||||
var total int64
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Limit concurrent subdirectory scans to avoid too many goroutines
|
||||
// Limit concurrent subdirectory scans.
|
||||
maxConcurrent := runtime.NumCPU() * 2
|
||||
if maxConcurrent > maxDirWorkers {
|
||||
maxConcurrent = maxDirWorkers
|
||||
@@ -450,9 +441,7 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
||||
for _, child := range children {
|
||||
fullPath := filepath.Join(root, child.Name())
|
||||
|
||||
// Skip symlinks to avoid following them into unexpected locations
|
||||
if child.Type()&fs.ModeSymlink != 0 {
|
||||
// For symlinks, just count their size without following
|
||||
info, err := child.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -465,9 +454,7 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
||||
}
|
||||
|
||||
if child.IsDir() {
|
||||
// Check if this is a folded directory
|
||||
if shouldFoldDirWithPath(child.Name(), fullPath) {
|
||||
// Use du for folded directories (much faster)
|
||||
wg.Add(1)
|
||||
go func(path string) {
|
||||
defer wg.Done()
|
||||
@@ -481,7 +468,6 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
||||
continue
|
||||
}
|
||||
|
||||
// Recursively scan subdirectory in parallel
|
||||
wg.Add(1)
|
||||
go func(path string) {
|
||||
defer wg.Done()
|
||||
@@ -495,7 +481,6 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle files
|
||||
info, err := child.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -506,12 +491,11 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
||||
atomic.AddInt64(filesScanned, 1)
|
||||
atomic.AddInt64(bytesScanned, size)
|
||||
|
||||
// Track large files
|
||||
if !shouldSkipFileForLargeTracking(fullPath) && size >= minLargeFileSize {
|
||||
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
|
||||
}
|
||||
|
||||
// Update current path occasionally to prevent UI jitter
|
||||
// Update current path occasionally to prevent UI jitter.
|
||||
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
|
||||
*currentPath = fullPath
|
||||
}
|
||||
@@ -522,6 +506,7 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
||||
}
|
||||
|
||||
// measureOverviewSize calculates the size of a directory using multiple strategies.
|
||||
// When scanning Home, it excludes ~/Library to avoid duplicate counting.
|
||||
func measureOverviewSize(path string) (int64, error) {
|
||||
if path == "" {
|
||||
return 0, fmt.Errorf("empty path")
|
||||
@@ -536,16 +521,23 @@ func measureOverviewSize(path string) (int64, error) {
|
||||
return 0, fmt.Errorf("cannot access path: %v", err)
|
||||
}
|
||||
|
||||
// Determine if we should exclude ~/Library (when scanning Home)
|
||||
home := os.Getenv("HOME")
|
||||
excludePath := ""
|
||||
if home != "" && path == home {
|
||||
excludePath = filepath.Join(home, "Library")
|
||||
}
|
||||
|
||||
if cached, err := loadStoredOverviewSize(path); err == nil && cached > 0 {
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
if duSize, err := getDirectorySizeFromDu(path); err == nil && duSize > 0 {
|
||||
if duSize, err := getDirectorySizeFromDuWithExclude(path, excludePath); err == nil && duSize > 0 {
|
||||
_ = storeOverviewSize(path, duSize)
|
||||
return duSize, nil
|
||||
}
|
||||
|
||||
if logicalSize, err := getDirectoryLogicalSize(path); err == nil && logicalSize > 0 {
|
||||
if logicalSize, err := getDirectoryLogicalSizeWithExclude(path, excludePath); err == nil && logicalSize > 0 {
|
||||
_ = storeOverviewSize(path, logicalSize)
|
||||
return logicalSize, nil
|
||||
}
|
||||
@@ -559,38 +551,69 @@ func measureOverviewSize(path string) (int64, error) {
|
||||
}
|
||||
|
||||
func getDirectorySizeFromDu(path string) (int64, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), duTimeout)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "du", "-sk", path)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return 0, fmt.Errorf("du timeout after %v", duTimeout)
|
||||
}
|
||||
if stderr.Len() > 0 {
|
||||
return 0, fmt.Errorf("du failed: %v (%s)", err, stderr.String())
|
||||
}
|
||||
return 0, fmt.Errorf("du failed: %v", err)
|
||||
}
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) == 0 {
|
||||
return 0, fmt.Errorf("du output empty")
|
||||
}
|
||||
kb, err := strconv.ParseInt(fields[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse du output: %v", err)
|
||||
}
|
||||
if kb <= 0 {
|
||||
return 0, fmt.Errorf("du size invalid: %d", kb)
|
||||
}
|
||||
return kb * 1024, nil
|
||||
return getDirectorySizeFromDuWithExclude(path, "")
|
||||
}
|
||||
|
||||
func getDirectoryLogicalSize(path string) (int64, error) {
|
||||
func getDirectorySizeFromDuWithExclude(path string, excludePath string) (int64, error) {
|
||||
runDuSize := func(target string) (int64, error) {
|
||||
if _, err := os.Stat(target); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), duTimeout)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "du", "-sk", target)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return 0, fmt.Errorf("du timeout after %v", duTimeout)
|
||||
}
|
||||
if stderr.Len() > 0 {
|
||||
return 0, fmt.Errorf("du failed: %v (%s)", err, stderr.String())
|
||||
}
|
||||
return 0, fmt.Errorf("du failed: %v", err)
|
||||
}
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) == 0 {
|
||||
return 0, fmt.Errorf("du output empty")
|
||||
}
|
||||
kb, err := strconv.ParseInt(fields[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse du output: %v", err)
|
||||
}
|
||||
if kb <= 0 {
|
||||
return 0, fmt.Errorf("du size invalid: %d", kb)
|
||||
}
|
||||
return kb * 1024, nil
|
||||
}
|
||||
|
||||
// When excluding a path (e.g., ~/Library), subtract only that exact directory instead of ignoring every "Library"
|
||||
if excludePath != "" {
|
||||
totalSize, err := runDuSize(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
excludeSize, err := runDuSize(excludePath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return 0, err
|
||||
}
|
||||
excludeSize = 0
|
||||
}
|
||||
if excludeSize > totalSize {
|
||||
excludeSize = 0
|
||||
}
|
||||
return totalSize - excludeSize, nil
|
||||
}
|
||||
|
||||
return runDuSize(path)
|
||||
}
|
||||
|
||||
func getDirectoryLogicalSizeWithExclude(path string, excludePath string) (int64, error) {
|
||||
var total int64
|
||||
err := filepath.WalkDir(path, func(p string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
@@ -599,6 +622,10 @@ func getDirectoryLogicalSize(path string) (int64, error) {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Skip excluded path
|
||||
if excludePath != "" && p == excludePath {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
45
cmd/analyze/scanner_test.go
Normal file
45
cmd/analyze/scanner_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func writeFileWithSize(t *testing.T, path string, size int) {
|
||||
t.Helper()
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
|
||||
t.Fatalf("mkdir %s: %v", path, err)
|
||||
}
|
||||
content := make([]byte, size)
|
||||
if err := os.WriteFile(path, content, 0o644); err != nil {
|
||||
t.Fatalf("write %s: %v", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDirectoryLogicalSizeWithExclude(t *testing.T) {
|
||||
base := t.TempDir()
|
||||
homeFile := filepath.Join(base, "fileA")
|
||||
libFile := filepath.Join(base, "Library", "fileB")
|
||||
projectLibFile := filepath.Join(base, "Projects", "Library", "fileC")
|
||||
|
||||
writeFileWithSize(t, homeFile, 100)
|
||||
writeFileWithSize(t, libFile, 200)
|
||||
writeFileWithSize(t, projectLibFile, 300)
|
||||
|
||||
total, err := getDirectoryLogicalSizeWithExclude(base, "")
|
||||
if err != nil {
|
||||
t.Fatalf("getDirectoryLogicalSizeWithExclude (no exclude) error: %v", err)
|
||||
}
|
||||
if total != 600 {
|
||||
t.Fatalf("expected total 600 bytes, got %d", total)
|
||||
}
|
||||
|
||||
excluding, err := getDirectoryLogicalSizeWithExclude(base, filepath.Join(base, "Library"))
|
||||
if err != nil {
|
||||
t.Fatalf("getDirectoryLogicalSizeWithExclude (exclude Library) error: %v", err)
|
||||
}
|
||||
if excluding != 400 {
|
||||
t.Fatalf("expected 400 bytes when excluding top-level Library, got %d", excluding)
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// View renders the TUI display.
|
||||
// View renders the TUI.
|
||||
func (m model) View() string {
|
||||
var b strings.Builder
|
||||
fmt.Fprintln(&b)
|
||||
@@ -16,7 +16,6 @@ func (m model) View() string {
|
||||
if m.inOverviewMode() {
|
||||
fmt.Fprintf(&b, "%sAnalyze Disk%s\n", colorPurpleBold, colorReset)
|
||||
if m.overviewScanning {
|
||||
// Check if we're in initial scan (all entries are pending)
|
||||
allPending := true
|
||||
for _, entry := range m.entries {
|
||||
if entry.Size >= 0 {
|
||||
@@ -26,19 +25,16 @@ func (m model) View() string {
|
||||
}
|
||||
|
||||
if allPending {
|
||||
// Show prominent loading screen for initial scan
|
||||
fmt.Fprintf(&b, "%s%s%s%s Analyzing disk usage, please wait...%s\n",
|
||||
colorCyan, colorBold,
|
||||
spinnerFrames[m.spinner],
|
||||
colorReset, colorReset)
|
||||
return b.String()
|
||||
} else {
|
||||
// Progressive scanning - show subtle indicator
|
||||
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
|
||||
fmt.Fprintf(&b, "%s%s%s%s Scanning...\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset)
|
||||
}
|
||||
} else {
|
||||
// Check if there are still pending items
|
||||
hasPending := false
|
||||
for _, entry := range m.entries {
|
||||
if entry.Size < 0 {
|
||||
@@ -62,7 +58,6 @@ func (m model) View() string {
|
||||
}
|
||||
|
||||
if m.deleting {
|
||||
// Show delete progress
|
||||
count := int64(0)
|
||||
if m.deleteCount != nil {
|
||||
count = atomic.LoadInt64(m.deleteCount)
|
||||
@@ -119,25 +114,36 @@ func (m model) View() string {
|
||||
maxLargeSize = file.Size
|
||||
}
|
||||
}
|
||||
nameWidth := calculateNameWidth(m.width)
|
||||
for idx := start; idx < end; idx++ {
|
||||
file := m.largeFiles[idx]
|
||||
shortPath := displayPath(file.Path)
|
||||
shortPath = truncateMiddle(shortPath, 35)
|
||||
paddedPath := padName(shortPath, 35)
|
||||
shortPath = truncateMiddle(shortPath, nameWidth)
|
||||
paddedPath := padName(shortPath, nameWidth)
|
||||
entryPrefix := " "
|
||||
nameColor := ""
|
||||
sizeColor := colorGray
|
||||
numColor := ""
|
||||
|
||||
isMultiSelected := m.largeMultiSelected != nil && m.largeMultiSelected[file.Path]
|
||||
selectIcon := "○"
|
||||
if isMultiSelected {
|
||||
selectIcon = fmt.Sprintf("%s●%s", colorGreen, colorReset)
|
||||
nameColor = colorGreen
|
||||
}
|
||||
|
||||
if idx == m.largeSelected {
|
||||
entryPrefix = fmt.Sprintf(" %s%s▶%s ", colorCyan, colorBold, colorReset)
|
||||
nameColor = colorCyan
|
||||
if !isMultiSelected {
|
||||
nameColor = colorCyan
|
||||
}
|
||||
sizeColor = colorCyan
|
||||
numColor = colorCyan
|
||||
}
|
||||
size := humanizeBytes(file.Size)
|
||||
bar := coloredProgressBar(file.Size, maxLargeSize, 0)
|
||||
fmt.Fprintf(&b, "%s%s%2d.%s %s | 📄 %s%s%s %s%10s%s\n",
|
||||
entryPrefix, numColor, idx+1, colorReset, bar, nameColor, paddedPath, colorReset, sizeColor, size, colorReset)
|
||||
fmt.Fprintf(&b, "%s%s %s%2d.%s %s | 📄 %s%s%s %s%10s%s\n",
|
||||
entryPrefix, selectIcon, numColor, idx+1, colorReset, bar, nameColor, paddedPath, colorReset, sizeColor, size, colorReset)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -152,6 +158,8 @@ func (m model) View() string {
|
||||
}
|
||||
}
|
||||
totalSize := m.totalSize
|
||||
// Overview paths are short; fixed width keeps layout stable.
|
||||
nameWidth := 20
|
||||
for idx, entry := range m.entries {
|
||||
icon := "📁"
|
||||
sizeVal := entry.Size
|
||||
@@ -188,8 +196,8 @@ func (m model) View() string {
|
||||
}
|
||||
}
|
||||
entryPrefix := " "
|
||||
name := trimName(entry.Name)
|
||||
paddedName := padName(name, 28)
|
||||
name := trimNameWithWidth(entry.Name, nameWidth)
|
||||
paddedName := padName(name, nameWidth)
|
||||
nameSegment := fmt.Sprintf("%s %s", icon, paddedName)
|
||||
numColor := ""
|
||||
percentColor := ""
|
||||
@@ -202,12 +210,10 @@ func (m model) View() string {
|
||||
}
|
||||
displayIndex := idx + 1
|
||||
|
||||
// Priority: cleanable > unused time
|
||||
var hintLabel string
|
||||
if entry.IsDir && isCleanableDir(entry.Path) {
|
||||
hintLabel = fmt.Sprintf("%s🧹%s", colorYellow, colorReset)
|
||||
} else {
|
||||
// For overview mode, get access time on-demand if not set
|
||||
lastAccess := entry.LastAccess
|
||||
if lastAccess.IsZero() && entry.Path != "" {
|
||||
lastAccess = getLastAccessTime(entry.Path)
|
||||
@@ -228,7 +234,6 @@ func (m model) View() string {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Normal mode with sizes and progress bars
|
||||
maxSize := int64(1)
|
||||
for _, entry := range m.entries {
|
||||
if entry.Size > maxSize {
|
||||
@@ -237,6 +242,7 @@ func (m model) View() string {
|
||||
}
|
||||
|
||||
viewport := calculateViewport(m.height, false)
|
||||
nameWidth := calculateNameWidth(m.width)
|
||||
start := m.offset
|
||||
if start < 0 {
|
||||
start = 0
|
||||
@@ -253,17 +259,14 @@ func (m model) View() string {
|
||||
icon = "📁"
|
||||
}
|
||||
size := humanizeBytes(entry.Size)
|
||||
name := trimName(entry.Name)
|
||||
paddedName := padName(name, 28)
|
||||
name := trimNameWithWidth(entry.Name, nameWidth)
|
||||
paddedName := padName(name, nameWidth)
|
||||
|
||||
// Calculate percentage
|
||||
percent := float64(entry.Size) / float64(m.totalSize) * 100
|
||||
percentStr := fmt.Sprintf("%5.1f%%", percent)
|
||||
|
||||
// Get colored progress bar
|
||||
bar := coloredProgressBar(entry.Size, maxSize, percent)
|
||||
|
||||
// Color the size based on magnitude
|
||||
var sizeColor string
|
||||
if percent >= 50 {
|
||||
sizeColor = colorRed
|
||||
@@ -275,14 +278,26 @@ func (m model) View() string {
|
||||
sizeColor = colorGray
|
||||
}
|
||||
|
||||
// Keep chart columns aligned even when arrow is shown
|
||||
isMultiSelected := m.multiSelected != nil && m.multiSelected[entry.Path]
|
||||
selectIcon := "○"
|
||||
nameColor := ""
|
||||
if isMultiSelected {
|
||||
selectIcon = fmt.Sprintf("%s●%s", colorGreen, colorReset)
|
||||
nameColor = colorGreen
|
||||
}
|
||||
|
||||
entryPrefix := " "
|
||||
nameSegment := fmt.Sprintf("%s %s", icon, paddedName)
|
||||
if nameColor != "" {
|
||||
nameSegment = fmt.Sprintf("%s%s %s%s", nameColor, icon, paddedName, colorReset)
|
||||
}
|
||||
numColor := ""
|
||||
percentColor := ""
|
||||
if idx == m.selected {
|
||||
entryPrefix = fmt.Sprintf(" %s%s▶%s ", colorCyan, colorBold, colorReset)
|
||||
nameSegment = fmt.Sprintf("%s%s %s%s", colorCyan, icon, paddedName, colorReset)
|
||||
if !isMultiSelected {
|
||||
nameSegment = fmt.Sprintf("%s%s %s%s", colorCyan, icon, paddedName, colorReset)
|
||||
}
|
||||
numColor = colorCyan
|
||||
percentColor = colorCyan
|
||||
sizeColor = colorCyan
|
||||
@@ -290,12 +305,10 @@ func (m model) View() string {
|
||||
|
||||
displayIndex := idx + 1
|
||||
|
||||
// Priority: cleanable > unused time
|
||||
var hintLabel string
|
||||
if entry.IsDir && isCleanableDir(entry.Path) {
|
||||
hintLabel = fmt.Sprintf("%s🧹%s", colorYellow, colorReset)
|
||||
} else {
|
||||
// Get access time on-demand if not set
|
||||
lastAccess := entry.LastAccess
|
||||
if lastAccess.IsZero() && entry.Path != "" {
|
||||
lastAccess = getLastAccessTime(entry.Path)
|
||||
@@ -306,12 +319,12 @@ func (m model) View() string {
|
||||
}
|
||||
|
||||
if hintLabel == "" {
|
||||
fmt.Fprintf(&b, "%s%s%2d.%s %s %s%s%s | %s %s%10s%s\n",
|
||||
entryPrefix, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
|
||||
fmt.Fprintf(&b, "%s%s %s%2d.%s %s %s%s%s | %s %s%10s%s\n",
|
||||
entryPrefix, selectIcon, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
|
||||
nameSegment, sizeColor, size, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s%s%2d.%s %s %s%s%s | %s %s%10s%s %s\n",
|
||||
entryPrefix, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
|
||||
fmt.Fprintf(&b, "%s%s %s%2d.%s %s %s%s%s | %s %s%10s%s %s\n",
|
||||
entryPrefix, selectIcon, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
|
||||
nameSegment, sizeColor, size, colorReset, hintLabel)
|
||||
}
|
||||
}
|
||||
@@ -321,53 +334,94 @@ func (m model) View() string {
|
||||
|
||||
fmt.Fprintln(&b)
|
||||
if m.inOverviewMode() {
|
||||
// Show ← Back if there's history (entered from a parent directory)
|
||||
if len(m.history) > 0 {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ← Back | Q Quit%s\n", colorGray, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s↑↓→ | Enter | R Refresh | O Open | F File | Q Quit%s\n", colorGray, colorReset)
|
||||
}
|
||||
} else if m.showLargeFiles {
|
||||
fmt.Fprintf(&b, "%s↑↓← | R Refresh | O Open | F File | ⌫ Del | ← Back | Q Quit%s\n", colorGray, colorReset)
|
||||
selectCount := len(m.largeMultiSelected)
|
||||
if selectCount > 0 {
|
||||
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del(%d) | ← Back | Q Quit%s\n", colorGray, selectCount, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del | ← Back | Q Quit%s\n", colorGray, colorReset)
|
||||
}
|
||||
} else {
|
||||
largeFileCount := len(m.largeFiles)
|
||||
if largeFileCount > 0 {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ⌫ Del | T Top(%d) | Q Quit%s\n", colorGray, largeFileCount, colorReset)
|
||||
selectCount := len(m.multiSelected)
|
||||
if selectCount > 0 {
|
||||
if largeFileCount > 0 {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del(%d) | T Top(%d) | Q Quit%s\n", colorGray, selectCount, largeFileCount, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del(%d) | Q Quit%s\n", colorGray, selectCount, colorReset)
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ⌫ Del | Q Quit%s\n", colorGray, colorReset)
|
||||
if largeFileCount > 0 {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | T Top(%d) | Q Quit%s\n", colorGray, largeFileCount, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | Q Quit%s\n", colorGray, colorReset)
|
||||
}
|
||||
}
|
||||
}
|
||||
if m.deleteConfirm && m.deleteTarget != nil {
|
||||
fmt.Fprintln(&b)
|
||||
fmt.Fprintf(&b, "%sDelete:%s %s (%s) %sPress ⌫ again | ESC cancel%s\n",
|
||||
colorRed, colorReset,
|
||||
m.deleteTarget.Name, humanizeBytes(m.deleteTarget.Size),
|
||||
colorGray, colorReset)
|
||||
var deleteCount int
|
||||
var totalDeleteSize int64
|
||||
if m.showLargeFiles && len(m.largeMultiSelected) > 0 {
|
||||
deleteCount = len(m.largeMultiSelected)
|
||||
for path := range m.largeMultiSelected {
|
||||
for _, file := range m.largeFiles {
|
||||
if file.Path == path {
|
||||
totalDeleteSize += file.Size
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if !m.showLargeFiles && len(m.multiSelected) > 0 {
|
||||
deleteCount = len(m.multiSelected)
|
||||
for path := range m.multiSelected {
|
||||
for _, entry := range m.entries {
|
||||
if entry.Path == path {
|
||||
totalDeleteSize += entry.Size
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if deleteCount > 1 {
|
||||
fmt.Fprintf(&b, "%sDelete:%s %d items (%s) %sPress ⌫ again | ESC cancel%s\n",
|
||||
colorRed, colorReset,
|
||||
deleteCount, humanizeBytes(totalDeleteSize),
|
||||
colorGray, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%sDelete:%s %s (%s) %sPress ⌫ again | ESC cancel%s\n",
|
||||
colorRed, colorReset,
|
||||
m.deleteTarget.Name, humanizeBytes(m.deleteTarget.Size),
|
||||
colorGray, colorReset)
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// calculateViewport computes the number of visible items based on terminal height.
|
||||
// calculateViewport returns visible rows for the current terminal height.
|
||||
func calculateViewport(termHeight int, isLargeFiles bool) int {
|
||||
if termHeight <= 0 {
|
||||
// Terminal height unknown, use default
|
||||
return defaultViewport
|
||||
}
|
||||
|
||||
// Calculate reserved space for UI elements
|
||||
reserved := 6 // header (3-4 lines) + footer (2 lines)
|
||||
reserved := 6 // Header + footer
|
||||
if isLargeFiles {
|
||||
reserved = 5 // Large files view has less overhead
|
||||
reserved = 5
|
||||
}
|
||||
|
||||
available := termHeight - reserved
|
||||
|
||||
// Ensure minimum and maximum bounds
|
||||
if available < 1 {
|
||||
return 1 // Minimum 1 line for very short terminals
|
||||
return 1
|
||||
}
|
||||
if available > 30 {
|
||||
return 30 // Maximum 30 lines to avoid information overload
|
||||
return 30
|
||||
}
|
||||
|
||||
return available
|
||||
|
||||
@@ -72,7 +72,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
m.metrics = msg.data
|
||||
m.lastUpdated = msg.data.CollectedAt
|
||||
m.collecting = false
|
||||
// Mark ready after first successful data collection
|
||||
// Mark ready after first successful data collection.
|
||||
if !m.ready {
|
||||
m.ready = true
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func animTick() tea.Cmd {
|
||||
}
|
||||
|
||||
func animTickWithSpeed(cpuUsage float64) tea.Cmd {
|
||||
// Higher CPU = faster animation (50ms to 300ms)
|
||||
// Higher CPU = faster animation.
|
||||
interval := 300 - int(cpuUsage*2.5)
|
||||
if interval < 50 {
|
||||
interval = 50
|
||||
|
||||
@@ -118,10 +118,13 @@ type BatteryStatus struct {
|
||||
}
|
||||
|
||||
type ThermalStatus struct {
|
||||
CPUTemp float64
|
||||
GPUTemp float64
|
||||
FanSpeed int
|
||||
FanCount int
|
||||
CPUTemp float64
|
||||
GPUTemp float64
|
||||
FanSpeed int
|
||||
FanCount int
|
||||
SystemPower float64 // System power consumption in Watts
|
||||
AdapterPower float64 // AC adapter max power in Watts
|
||||
BatteryPower float64 // Battery charge/discharge power in Watts (positive = discharging)
|
||||
}
|
||||
|
||||
type SensorReading struct {
|
||||
@@ -138,10 +141,18 @@ type BluetoothDevice struct {
|
||||
}
|
||||
|
||||
type Collector struct {
|
||||
// Static cache.
|
||||
cachedHW HardwareInfo
|
||||
lastHWAt time.Time
|
||||
hasStatic bool
|
||||
|
||||
// Slow cache (30s-1m).
|
||||
lastBTAt time.Time
|
||||
lastBT []BluetoothDevice
|
||||
|
||||
// Fast metrics (1s).
|
||||
prevNet map[string]net.IOCountersStat
|
||||
lastNetAt time.Time
|
||||
lastBTAt time.Time
|
||||
lastBT []BluetoothDevice
|
||||
lastGPUAt time.Time
|
||||
cachedGPU []GPUStatus
|
||||
prevDiskIO disk.IOCountersStat
|
||||
@@ -157,9 +168,7 @@ func NewCollector() *Collector {
|
||||
func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
now := time.Now()
|
||||
|
||||
// Start host info collection early (it's fast but good to parallelize if possible,
|
||||
// but it returns a struct needed for result, so we can just run it here or in parallel)
|
||||
// host.Info is usually cached by gopsutil but let's just call it.
|
||||
// Host info is cached by gopsutil; fetch once.
|
||||
hostInfo, _ := host.Info()
|
||||
|
||||
var (
|
||||
@@ -181,7 +190,7 @@ func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
topProcs []ProcessInfo
|
||||
)
|
||||
|
||||
// Helper to launch concurrent collection
|
||||
// Helper to launch concurrent collection.
|
||||
collect := func(fn func() error) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -198,7 +207,7 @@ func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
}()
|
||||
}
|
||||
|
||||
// Launch all independent collection tasks
|
||||
// Launch independent collection tasks.
|
||||
collect(func() (err error) { cpuStats, err = collectCPU(); return })
|
||||
collect(func() (err error) { memStats, err = collectMemory(); return })
|
||||
collect(func() (err error) { diskStats, err = collectDisks(); return })
|
||||
@@ -209,14 +218,31 @@ func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
collect(func() (err error) { thermalStats = collectThermal(); return nil })
|
||||
collect(func() (err error) { sensorStats, _ = collectSensors(); return nil })
|
||||
collect(func() (err error) { gpuStats, err = c.collectGPU(now); return })
|
||||
collect(func() (err error) { btStats = c.collectBluetooth(now); return nil })
|
||||
collect(func() (err error) {
|
||||
// Bluetooth is slow; cache for 30s.
|
||||
if now.Sub(c.lastBTAt) > 30*time.Second || len(c.lastBT) == 0 {
|
||||
btStats = c.collectBluetooth(now)
|
||||
c.lastBT = btStats
|
||||
c.lastBTAt = now
|
||||
} else {
|
||||
btStats = c.lastBT
|
||||
}
|
||||
return nil
|
||||
})
|
||||
collect(func() (err error) { topProcs = collectTopProcesses(); return nil })
|
||||
|
||||
// Wait for all to complete
|
||||
// Wait for all to complete.
|
||||
wg.Wait()
|
||||
|
||||
// Dependent tasks (must run after others)
|
||||
hwInfo := collectHardware(memStats.Total, diskStats)
|
||||
// Dependent tasks (post-collect).
|
||||
// Cache hardware info as it's expensive and rarely changes.
|
||||
if !c.hasStatic || now.Sub(c.lastHWAt) > 10*time.Minute {
|
||||
c.cachedHW = collectHardware(memStats.Total, diskStats)
|
||||
c.lastHWAt = now
|
||||
c.hasStatic = true
|
||||
}
|
||||
hwInfo := c.cachedHW
|
||||
|
||||
score, scoreMsg := calculateHealthScore(cpuStats, memStats, diskStats, diskIO, thermalStats)
|
||||
|
||||
return MetricsSnapshot{
|
||||
@@ -243,8 +269,6 @@ func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
}, mergeErr
|
||||
}
|
||||
|
||||
// Utility functions
|
||||
|
||||
func runCmd(ctx context.Context, name string, args ...string) (string, error) {
|
||||
cmd := exec.CommandContext(ctx, name, args...)
|
||||
output, err := cmd.Output()
|
||||
@@ -260,11 +284,9 @@ func commandExists(name string) bool {
|
||||
}
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
// If LookPath panics due to permissions or platform quirks, act as if the command is missing.
|
||||
// Treat LookPath panics as "missing".
|
||||
}
|
||||
}()
|
||||
_, err := exec.LookPath(name)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// humanBytes is defined in view.go to avoid duplication
|
||||
|
||||
@@ -14,24 +14,33 @@ import (
|
||||
"github.com/shirou/gopsutil/v3/host"
|
||||
)
|
||||
|
||||
var (
|
||||
// Cache for heavy system_profiler output.
|
||||
lastPowerAt time.Time
|
||||
cachedPower string
|
||||
powerCacheTTL = 30 * time.Second
|
||||
)
|
||||
|
||||
func collectBatteries() (batts []BatteryStatus, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
// Swallow panics from platform-specific battery probes to keep the UI alive.
|
||||
// Swallow panics to keep UI alive.
|
||||
err = fmt.Errorf("battery collection failed: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// macOS: pmset
|
||||
// macOS: pmset for real-time percentage/status.
|
||||
if runtime.GOOS == "darwin" && commandExists("pmset") {
|
||||
if out, err := runCmd(context.Background(), "pmset", "-g", "batt"); err == nil {
|
||||
if batts := parsePMSet(out); len(batts) > 0 {
|
||||
// Health/cycles from cached system_profiler.
|
||||
health, cycles := getCachedPowerData()
|
||||
if batts := parsePMSet(out, health, cycles); len(batts) > 0 {
|
||||
return batts, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Linux: /sys/class/power_supply
|
||||
// Linux: /sys/class/power_supply.
|
||||
matches, _ := filepath.Glob("/sys/class/power_supply/BAT*/capacity")
|
||||
for _, capFile := range matches {
|
||||
statusFile := filepath.Join(filepath.Dir(capFile), "status")
|
||||
@@ -58,15 +67,14 @@ func collectBatteries() (batts []BatteryStatus, err error) {
|
||||
return nil, errors.New("no battery data found")
|
||||
}
|
||||
|
||||
func parsePMSet(raw string) []BatteryStatus {
|
||||
func parsePMSet(raw string, health string, cycles int) []BatteryStatus {
|
||||
lines := strings.Split(raw, "\n")
|
||||
var out []BatteryStatus
|
||||
var timeLeft string
|
||||
|
||||
for _, line := range lines {
|
||||
// Check for time remaining
|
||||
// Time remaining.
|
||||
if strings.Contains(line, "remaining") {
|
||||
// Extract time like "1:30 remaining"
|
||||
parts := strings.Fields(line)
|
||||
for i, p := range parts {
|
||||
if p == "remaining" && i > 0 {
|
||||
@@ -101,9 +109,6 @@ func parsePMSet(raw string) []BatteryStatus {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get battery health and cycle count
|
||||
health, cycles := getBatteryHealth()
|
||||
|
||||
out = append(out, BatteryStatus{
|
||||
Percent: percent,
|
||||
Status: status,
|
||||
@@ -115,40 +120,51 @@ func parsePMSet(raw string) []BatteryStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
func getBatteryHealth() (string, int) {
|
||||
if runtime.GOOS != "darwin" {
|
||||
// getCachedPowerData returns condition and cycles from cached system_profiler.
|
||||
func getCachedPowerData() (health string, cycles int) {
|
||||
out := getSystemPowerOutput()
|
||||
if out == "" {
|
||||
return "", 0
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
out, err := runCmd(ctx, "system_profiler", "SPPowerDataType")
|
||||
if err != nil {
|
||||
return "", 0
|
||||
}
|
||||
|
||||
var health string
|
||||
var cycles int
|
||||
|
||||
lines := strings.Split(out, "\n")
|
||||
for _, line := range lines {
|
||||
lower := strings.ToLower(line)
|
||||
if strings.Contains(lower, "cycle count") {
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) == 2 {
|
||||
cycles, _ = strconv.Atoi(strings.TrimSpace(parts[1]))
|
||||
if _, after, found := strings.Cut(line, ":"); found {
|
||||
cycles, _ = strconv.Atoi(strings.TrimSpace(after))
|
||||
}
|
||||
}
|
||||
if strings.Contains(lower, "condition") {
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) == 2 {
|
||||
health = strings.TrimSpace(parts[1])
|
||||
if _, after, found := strings.Cut(line, ":"); found {
|
||||
health = strings.TrimSpace(after)
|
||||
}
|
||||
}
|
||||
}
|
||||
return health, cycles
|
||||
}
|
||||
|
||||
func getSystemPowerOutput() string {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return ""
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if cachedPower != "" && now.Sub(lastPowerAt) < powerCacheTTL {
|
||||
return cachedPower
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
out, err := runCmd(ctx, "system_profiler", "SPPowerDataType")
|
||||
if err == nil {
|
||||
cachedPower = out
|
||||
lastPowerAt = now
|
||||
}
|
||||
return cachedPower
|
||||
}
|
||||
|
||||
func collectThermal() ThermalStatus {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return ThermalStatus{}
|
||||
@@ -156,47 +172,85 @@ func collectThermal() ThermalStatus {
|
||||
|
||||
var thermal ThermalStatus
|
||||
|
||||
// Get fan info from system_profiler
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
out, err := runCmd(ctx, "system_profiler", "SPPowerDataType")
|
||||
if err == nil {
|
||||
// Fan info from cached system_profiler.
|
||||
out := getSystemPowerOutput()
|
||||
if out != "" {
|
||||
lines := strings.Split(out, "\n")
|
||||
for _, line := range lines {
|
||||
lower := strings.ToLower(line)
|
||||
if strings.Contains(lower, "fan") && strings.Contains(lower, "speed") {
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) == 2 {
|
||||
// Extract number from string like "1200 RPM"
|
||||
numStr := strings.TrimSpace(parts[1])
|
||||
numStr = strings.Split(numStr, " ")[0]
|
||||
if _, after, found := strings.Cut(line, ":"); found {
|
||||
numStr := strings.TrimSpace(after)
|
||||
numStr, _, _ = strings.Cut(numStr, " ")
|
||||
thermal.FanSpeed, _ = strconv.Atoi(numStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Try ioreg battery temperature (simple, no sudo needed)
|
||||
ctxIoreg, cancelIoreg := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
defer cancelIoreg()
|
||||
if out, err := runCmd(ctxIoreg, "sh", "-c", "ioreg -rn AppleSmartBattery | awk '/\"Temperature\"/ {print $3}'"); err == nil {
|
||||
valStr := strings.TrimSpace(out)
|
||||
if tempRaw, err := strconv.Atoi(valStr); err == nil && tempRaw > 0 {
|
||||
thermal.CPUTemp = float64(tempRaw) / 100.0
|
||||
return thermal
|
||||
// Power metrics from ioreg (fast, real-time).
|
||||
ctxPower, cancelPower := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
defer cancelPower()
|
||||
if out, err := runCmd(ctxPower, "ioreg", "-rn", "AppleSmartBattery"); err == nil {
|
||||
lines := strings.Split(out, "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Battery temperature ("Temperature" = 3055).
|
||||
if _, after, found := strings.Cut(line, "\"Temperature\" = "); found {
|
||||
valStr := strings.TrimSpace(after)
|
||||
if tempRaw, err := strconv.Atoi(valStr); err == nil && tempRaw > 0 {
|
||||
thermal.CPUTemp = float64(tempRaw) / 100.0
|
||||
}
|
||||
}
|
||||
|
||||
// Adapter power (Watts) from current adapter.
|
||||
if strings.Contains(line, "\"AdapterDetails\" = {") && !strings.Contains(line, "AppleRaw") {
|
||||
if _, after, found := strings.Cut(line, "\"Watts\"="); found {
|
||||
valStr := strings.TrimSpace(after)
|
||||
valStr, _, _ = strings.Cut(valStr, ",")
|
||||
valStr, _, _ = strings.Cut(valStr, "}")
|
||||
valStr = strings.TrimSpace(valStr)
|
||||
if watts, err := strconv.ParseFloat(valStr, 64); err == nil && watts > 0 {
|
||||
thermal.AdapterPower = watts
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// System power consumption (mW -> W).
|
||||
if _, after, found := strings.Cut(line, "\"SystemPowerIn\"="); found {
|
||||
valStr := strings.TrimSpace(after)
|
||||
valStr, _, _ = strings.Cut(valStr, ",")
|
||||
valStr, _, _ = strings.Cut(valStr, "}")
|
||||
valStr = strings.TrimSpace(valStr)
|
||||
if powerMW, err := strconv.ParseFloat(valStr, 64); err == nil && powerMW > 0 {
|
||||
thermal.SystemPower = powerMW / 1000.0
|
||||
}
|
||||
}
|
||||
|
||||
// Battery power (mW -> W, positive = discharging).
|
||||
if _, after, found := strings.Cut(line, "\"BatteryPower\"="); found {
|
||||
valStr := strings.TrimSpace(after)
|
||||
valStr, _, _ = strings.Cut(valStr, ",")
|
||||
valStr, _, _ = strings.Cut(valStr, "}")
|
||||
valStr = strings.TrimSpace(valStr)
|
||||
if powerMW, err := strconv.ParseFloat(valStr, 64); err == nil {
|
||||
thermal.BatteryPower = powerMW / 1000.0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Try thermal level as a proxy (fallback)
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
defer cancel2()
|
||||
out2, err := runCmd(ctx2, "sysctl", "-n", "machdep.xcpm.cpu_thermal_level")
|
||||
if err == nil {
|
||||
level, _ := strconv.Atoi(strings.TrimSpace(out2))
|
||||
// Estimate temp: level 0-100 roughly maps to 40-100°C
|
||||
if level >= 0 {
|
||||
thermal.CPUTemp = 45 + float64(level)*0.5
|
||||
// Fallback: thermal level proxy.
|
||||
if thermal.CPUTemp == 0 {
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
defer cancel2()
|
||||
out2, err := runCmd(ctx2, "sysctl", "-n", "machdep.xcpm.cpu_thermal_level")
|
||||
if err == nil {
|
||||
level, _ := strconv.Atoi(strings.TrimSpace(out2))
|
||||
if level >= 0 {
|
||||
thermal.CPUTemp = 45 + float64(level)*0.5
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ func parseSPBluetooth(raw string) []BluetoothDevice {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(line, " ") && strings.HasSuffix(trim, ":") {
|
||||
// Reset at top-level sections
|
||||
// Reset at top-level sections.
|
||||
currentName = ""
|
||||
connected = false
|
||||
battery = ""
|
||||
|
||||
@@ -31,7 +31,10 @@ func collectCPU() (CPUStatus, error) {
|
||||
logical = 1
|
||||
}
|
||||
|
||||
percents, err := cpu.Percent(cpuSampleInterval, true)
|
||||
// Two-call pattern for more reliable CPU usage.
|
||||
cpu.Percent(0, true)
|
||||
time.Sleep(cpuSampleInterval)
|
||||
percents, err := cpu.Percent(0, true)
|
||||
var totalPercent float64
|
||||
perCoreEstimated := false
|
||||
if err != nil || len(percents) == 0 {
|
||||
@@ -63,7 +66,7 @@ func collectCPU() (CPUStatus, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get P-core and E-core counts for Apple Silicon
|
||||
// P/E core counts for Apple Silicon.
|
||||
pCores, eCores := getCoreTopology()
|
||||
|
||||
return CPUStatus{
|
||||
@@ -84,17 +87,29 @@ func isZeroLoad(avg load.AvgStat) bool {
|
||||
return avg.Load1 == 0 && avg.Load5 == 0 && avg.Load15 == 0
|
||||
}
|
||||
|
||||
// getCoreTopology returns P-core and E-core counts on Apple Silicon.
|
||||
// Returns (0, 0) on non-Apple Silicon or if detection fails.
|
||||
var (
|
||||
// Cache for core topology.
|
||||
lastTopologyAt time.Time
|
||||
cachedP, cachedE int
|
||||
topologyTTL = 10 * time.Minute
|
||||
)
|
||||
|
||||
// getCoreTopology returns P/E core counts on Apple Silicon.
|
||||
func getCoreTopology() (pCores, eCores int) {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if cachedP > 0 || cachedE > 0 {
|
||||
if now.Sub(lastTopologyAt) < topologyTTL {
|
||||
return cachedP, cachedE
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
// Get performance level info from sysctl
|
||||
out, err := runCmd(ctx, "sysctl", "-n",
|
||||
"hw.perflevel0.logicalcpu",
|
||||
"hw.perflevel0.name",
|
||||
@@ -109,15 +124,12 @@ func getCoreTopology() (pCores, eCores int) {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// Parse perflevel0
|
||||
level0Count, _ := strconv.Atoi(strings.TrimSpace(lines[0]))
|
||||
level0Name := strings.ToLower(strings.TrimSpace(lines[1]))
|
||||
|
||||
// Parse perflevel1
|
||||
level1Count, _ := strconv.Atoi(strings.TrimSpace(lines[2]))
|
||||
level1Name := strings.ToLower(strings.TrimSpace(lines[3]))
|
||||
|
||||
// Assign based on name (Performance vs Efficiency)
|
||||
if strings.Contains(level0Name, "performance") {
|
||||
pCores = level0Count
|
||||
} else if strings.Contains(level0Name, "efficiency") {
|
||||
@@ -130,6 +142,8 @@ func getCoreTopology() (pCores, eCores int) {
|
||||
eCores = level1Count
|
||||
}
|
||||
|
||||
cachedP, cachedE = pCores, eCores
|
||||
lastTopologyAt = now
|
||||
return pCores, eCores
|
||||
}
|
||||
|
||||
@@ -231,10 +245,10 @@ func fallbackCPUUtilization(logical int) (float64, []float64, error) {
|
||||
total = maxTotal
|
||||
}
|
||||
|
||||
perCore := make([]float64, logical)
|
||||
avg := total / float64(logical)
|
||||
perCore := make([]float64, logical)
|
||||
for i := range perCore {
|
||||
perCore[i] = avg
|
||||
}
|
||||
return total, perCore, nil
|
||||
return avg, perCore, nil
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func collectDisks() ([]DiskStatus, error) {
|
||||
if strings.HasPrefix(part.Mountpoint, "/System/Volumes/") {
|
||||
continue
|
||||
}
|
||||
// Skip private volumes
|
||||
// Skip /private mounts.
|
||||
if strings.HasPrefix(part.Mountpoint, "/private/") {
|
||||
continue
|
||||
}
|
||||
@@ -58,12 +58,11 @@ func collectDisks() ([]DiskStatus, error) {
|
||||
if err != nil || usage.Total == 0 {
|
||||
continue
|
||||
}
|
||||
// Skip small volumes (< 1GB)
|
||||
// Skip <1GB volumes.
|
||||
if usage.Total < 1<<30 {
|
||||
continue
|
||||
}
|
||||
// For APFS volumes, use a more precise dedup key (bytes level)
|
||||
// to handle shared storage pools properly
|
||||
// Use size-based dedupe key for shared pools.
|
||||
volKey := fmt.Sprintf("%s:%d", part.Fstype, usage.Total)
|
||||
if seenVolume[volKey] {
|
||||
continue
|
||||
@@ -93,26 +92,42 @@ func collectDisks() ([]DiskStatus, error) {
|
||||
return disks, nil
|
||||
}
|
||||
|
||||
var (
|
||||
// External disk cache.
|
||||
lastDiskCacheAt time.Time
|
||||
diskTypeCache = make(map[string]bool)
|
||||
diskCacheTTL = 2 * time.Minute
|
||||
)
|
||||
|
||||
func annotateDiskTypes(disks []DiskStatus) {
|
||||
if len(disks) == 0 || runtime.GOOS != "darwin" || !commandExists("diskutil") {
|
||||
return
|
||||
}
|
||||
cache := make(map[string]bool)
|
||||
|
||||
now := time.Now()
|
||||
// Clear stale cache.
|
||||
if now.Sub(lastDiskCacheAt) > diskCacheTTL {
|
||||
diskTypeCache = make(map[string]bool)
|
||||
lastDiskCacheAt = now
|
||||
}
|
||||
|
||||
for i := range disks {
|
||||
base := baseDeviceName(disks[i].Device)
|
||||
if base == "" {
|
||||
base = disks[i].Device
|
||||
}
|
||||
if val, ok := cache[base]; ok {
|
||||
|
||||
if val, ok := diskTypeCache[base]; ok {
|
||||
disks[i].External = val
|
||||
continue
|
||||
}
|
||||
|
||||
external, err := isExternalDisk(base)
|
||||
if err != nil {
|
||||
external = strings.HasPrefix(disks[i].Mount, "/Volumes/")
|
||||
}
|
||||
disks[i].External = external
|
||||
cache[base] = external
|
||||
diskTypeCache[base] = external
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ const (
|
||||
powermetricsTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// Pre-compiled regex patterns for GPU usage parsing
|
||||
// Regex for GPU usage parsing.
|
||||
var (
|
||||
gpuActiveResidencyRe = regexp.MustCompile(`GPU HW active residency:\s+([\d.]+)%`)
|
||||
gpuIdleResidencyRe = regexp.MustCompile(`GPU idle residency:\s+([\d.]+)%`)
|
||||
@@ -25,7 +25,7 @@ var (
|
||||
|
||||
func (c *Collector) collectGPU(now time.Time) ([]GPUStatus, error) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Get static GPU info (cached for 10 min)
|
||||
// Static GPU info (cached 10 min).
|
||||
if len(c.cachedGPU) == 0 || c.lastGPUAt.IsZero() || now.Sub(c.lastGPUAt) >= macGPUInfoTTL {
|
||||
if gpus, err := readMacGPUInfo(); err == nil && len(gpus) > 0 {
|
||||
c.cachedGPU = gpus
|
||||
@@ -33,12 +33,12 @@ func (c *Collector) collectGPU(now time.Time) ([]GPUStatus, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get real-time GPU usage
|
||||
// Real-time GPU usage.
|
||||
if len(c.cachedGPU) > 0 {
|
||||
usage := getMacGPUUsage()
|
||||
result := make([]GPUStatus, len(c.cachedGPU))
|
||||
copy(result, c.cachedGPU)
|
||||
// Apply usage to first GPU (Apple Silicon has one integrated GPU)
|
||||
// Apply usage to first GPU (Apple Silicon).
|
||||
if len(result) > 0 {
|
||||
result[0].Usage = usage
|
||||
}
|
||||
@@ -152,19 +152,18 @@ func readMacGPUInfo() ([]GPUStatus, error) {
|
||||
return gpus, nil
|
||||
}
|
||||
|
||||
// getMacGPUUsage gets GPU active residency from powermetrics.
|
||||
// Returns -1 if unavailable (e.g., not running as root).
|
||||
// getMacGPUUsage reads GPU active residency from powermetrics.
|
||||
func getMacGPUUsage() float64 {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), powermetricsTimeout)
|
||||
defer cancel()
|
||||
|
||||
// powermetrics requires root, but we try anyway - some systems may have it enabled
|
||||
// powermetrics may require root.
|
||||
out, err := runCmd(ctx, "powermetrics", "--samplers", "gpu_power", "-i", "500", "-n", "1")
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Parse "GPU HW active residency: X.XX%"
|
||||
// Parse "GPU HW active residency: X.XX%".
|
||||
matches := gpuActiveResidencyRe.FindStringSubmatch(out)
|
||||
if len(matches) >= 2 {
|
||||
usage, err := strconv.ParseFloat(matches[1], 64)
|
||||
@@ -173,7 +172,7 @@ func getMacGPUUsage() float64 {
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: parse "GPU idle residency: X.XX%" and calculate active
|
||||
// Fallback: parse idle residency and derive active.
|
||||
matchesIdle := gpuIdleResidencyRe.FindStringSubmatch(out)
|
||||
if len(matchesIdle) >= 2 {
|
||||
idle, err := strconv.ParseFloat(matchesIdle[1], 64)
|
||||
|
||||
@@ -18,19 +18,18 @@ func collectHardware(totalRAM uint64, disks []DiskStatus) HardwareInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// Get model and CPU from system_profiler
|
||||
// Model and CPU from system_profiler.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var model, cpuModel, osVersion string
|
||||
|
||||
// Get hardware overview
|
||||
out, err := runCmd(ctx, "system_profiler", "SPHardwareDataType")
|
||||
if err == nil {
|
||||
lines := strings.Split(out, "\n")
|
||||
for _, line := range lines {
|
||||
lower := strings.ToLower(strings.TrimSpace(line))
|
||||
// Prefer "Model Name" over "Model Identifier"
|
||||
// Prefer "Model Name" over "Model Identifier".
|
||||
if strings.Contains(lower, "model name:") {
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) == 2 {
|
||||
@@ -52,7 +51,6 @@ func collectHardware(totalRAM uint64, disks []DiskStatus) HardwareInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// Get macOS version
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel2()
|
||||
out2, err := runCmd(ctx2, "sw_vers", "-productVersion")
|
||||
@@ -60,7 +58,6 @@ func collectHardware(totalRAM uint64, disks []DiskStatus) HardwareInfo {
|
||||
osVersion = "macOS " + strings.TrimSpace(out2)
|
||||
}
|
||||
|
||||
// Get disk size
|
||||
diskSize := "Unknown"
|
||||
if len(disks) > 0 {
|
||||
diskSize = humanBytes(disks[0].Total)
|
||||
|
||||
@@ -5,45 +5,43 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Health score calculation weights and thresholds
|
||||
// Health score weights and thresholds.
|
||||
const (
|
||||
// Weights (must sum to ~100 for total score)
|
||||
// Weights.
|
||||
healthCPUWeight = 30.0
|
||||
healthMemWeight = 25.0
|
||||
healthDiskWeight = 20.0
|
||||
healthThermalWeight = 15.0
|
||||
healthIOWeight = 10.0
|
||||
|
||||
// CPU thresholds
|
||||
// CPU.
|
||||
cpuNormalThreshold = 30.0
|
||||
cpuHighThreshold = 70.0
|
||||
|
||||
// Memory thresholds
|
||||
// Memory.
|
||||
memNormalThreshold = 50.0
|
||||
memHighThreshold = 80.0
|
||||
memPressureWarnPenalty = 5.0
|
||||
memPressureCritPenalty = 15.0
|
||||
|
||||
// Disk thresholds
|
||||
// Disk.
|
||||
diskWarnThreshold = 70.0
|
||||
diskCritThreshold = 90.0
|
||||
|
||||
// Thermal thresholds
|
||||
// Thermal.
|
||||
thermalNormalThreshold = 60.0
|
||||
thermalHighThreshold = 85.0
|
||||
|
||||
// Disk IO thresholds (MB/s)
|
||||
// Disk IO (MB/s).
|
||||
ioNormalThreshold = 50.0
|
||||
ioHighThreshold = 150.0
|
||||
)
|
||||
|
||||
func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, diskIO DiskIOStatus, thermal ThermalStatus) (int, string) {
|
||||
// Start with perfect score
|
||||
score := 100.0
|
||||
issues := []string{}
|
||||
|
||||
// CPU Usage (30% weight) - deduct up to 30 points
|
||||
// 0-30% CPU = 0 deduction, 30-70% = linear, 70-100% = heavy penalty
|
||||
// CPU penalty.
|
||||
cpuPenalty := 0.0
|
||||
if cpu.Usage > cpuNormalThreshold {
|
||||
if cpu.Usage > cpuHighThreshold {
|
||||
@@ -57,8 +55,7 @@ func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, d
|
||||
issues = append(issues, "High CPU")
|
||||
}
|
||||
|
||||
// Memory Usage (25% weight) - deduct up to 25 points
|
||||
// 0-50% = 0 deduction, 50-80% = linear, 80-100% = heavy penalty
|
||||
// Memory penalty.
|
||||
memPenalty := 0.0
|
||||
if mem.UsedPercent > memNormalThreshold {
|
||||
if mem.UsedPercent > memHighThreshold {
|
||||
@@ -72,7 +69,7 @@ func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, d
|
||||
issues = append(issues, "High Memory")
|
||||
}
|
||||
|
||||
// Memory Pressure (extra penalty)
|
||||
// Memory pressure penalty.
|
||||
if mem.Pressure == "warn" {
|
||||
score -= memPressureWarnPenalty
|
||||
issues = append(issues, "Memory Pressure")
|
||||
@@ -81,7 +78,7 @@ func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, d
|
||||
issues = append(issues, "Critical Memory")
|
||||
}
|
||||
|
||||
// Disk Usage (20% weight) - deduct up to 20 points
|
||||
// Disk penalty.
|
||||
diskPenalty := 0.0
|
||||
if len(disks) > 0 {
|
||||
diskUsage := disks[0].UsedPercent
|
||||
@@ -98,7 +95,7 @@ func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, d
|
||||
}
|
||||
}
|
||||
|
||||
// Thermal (15% weight) - deduct up to 15 points
|
||||
// Thermal penalty.
|
||||
thermalPenalty := 0.0
|
||||
if thermal.CPUTemp > 0 {
|
||||
if thermal.CPUTemp > thermalNormalThreshold {
|
||||
@@ -112,7 +109,7 @@ func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, d
|
||||
score -= thermalPenalty
|
||||
}
|
||||
|
||||
// Disk IO (10% weight) - deduct up to 10 points
|
||||
// Disk IO penalty.
|
||||
ioPenalty := 0.0
|
||||
totalIO := diskIO.ReadRate + diskIO.WriteRate
|
||||
if totalIO > ioNormalThreshold {
|
||||
@@ -125,7 +122,7 @@ func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, d
|
||||
}
|
||||
score -= ioPenalty
|
||||
|
||||
// Ensure score is in valid range
|
||||
// Clamp score.
|
||||
if score < 0 {
|
||||
score = 0
|
||||
}
|
||||
@@ -133,7 +130,7 @@ func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, d
|
||||
score = 100
|
||||
}
|
||||
|
||||
// Generate message
|
||||
// Build message.
|
||||
msg := "Excellent"
|
||||
if score >= 90 {
|
||||
msg = "Excellent"
|
||||
|
||||
@@ -17,7 +17,7 @@ func (c *Collector) collectNetwork(now time.Time) ([]NetworkStatus, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get IP addresses for interfaces
|
||||
// Map interface IPs.
|
||||
ifAddrs := getInterfaceIPs()
|
||||
|
||||
if c.lastNetAt.IsZero() {
|
||||
@@ -81,7 +81,7 @@ func getInterfaceIPs() map[string]string {
|
||||
}
|
||||
for _, iface := range ifaces {
|
||||
for _, addr := range iface.Addrs {
|
||||
// Only IPv4
|
||||
// IPv4 only.
|
||||
if strings.Contains(addr.Addr, ".") && !strings.HasPrefix(addr.Addr, "127.") {
|
||||
ip := strings.Split(addr.Addr, "/")[0]
|
||||
result[iface.Name] = ip
|
||||
@@ -104,14 +104,14 @@ func isNoiseInterface(name string) bool {
|
||||
}
|
||||
|
||||
func collectProxy() ProxyStatus {
|
||||
// Check environment variables first
|
||||
// Check environment variables first.
|
||||
for _, env := range []string{"https_proxy", "HTTPS_PROXY", "http_proxy", "HTTP_PROXY"} {
|
||||
if val := os.Getenv(env); val != "" {
|
||||
proxyType := "HTTP"
|
||||
if strings.HasPrefix(val, "socks") {
|
||||
proxyType = "SOCKS"
|
||||
}
|
||||
// Extract host
|
||||
// Extract host.
|
||||
host := val
|
||||
if strings.Contains(host, "://") {
|
||||
host = strings.SplitN(host, "://", 2)[1]
|
||||
@@ -123,7 +123,7 @@ func collectProxy() ProxyStatus {
|
||||
}
|
||||
}
|
||||
|
||||
// macOS: check system proxy via scutil
|
||||
// macOS: check system proxy via scutil.
|
||||
if runtime.GOOS == "darwin" {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
@@ -15,7 +15,7 @@ func collectTopProcesses() []ProcessInfo {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Use ps to get top processes by CPU
|
||||
// Use ps to get top processes by CPU.
|
||||
out, err := runCmd(ctx, "ps", "-Aceo", "pcpu,pmem,comm", "-r")
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -24,10 +24,10 @@ func collectTopProcesses() []ProcessInfo {
|
||||
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||
var procs []ProcessInfo
|
||||
for i, line := range lines {
|
||||
if i == 0 { // skip header
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
if i > 5 { // top 5
|
||||
if i > 5 {
|
||||
break
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
@@ -37,7 +37,7 @@ func collectTopProcesses() []ProcessInfo {
|
||||
cpuVal, _ := strconv.ParseFloat(fields[0], 64)
|
||||
memVal, _ := strconv.ParseFloat(fields[1], 64)
|
||||
name := fields[len(fields)-1]
|
||||
// Get just the process name without path
|
||||
// Strip path from command name.
|
||||
if idx := strings.LastIndex(name, "/"); idx >= 0 {
|
||||
name = name[idx+1:]
|
||||
}
|
||||
|
||||
@@ -11,28 +11,29 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
titleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#C79FD7")).Bold(true)
|
||||
subtleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#9E9E9E"))
|
||||
warnStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FFD75F"))
|
||||
dangerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FF6B6B")).Bold(true)
|
||||
okStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#87D787"))
|
||||
lineStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#5A5A5A"))
|
||||
hatStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FF0000"))
|
||||
titleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#C79FD7")).Bold(true)
|
||||
subtleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#737373"))
|
||||
warnStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FFD75F"))
|
||||
dangerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FF5F5F")).Bold(true)
|
||||
okStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#A5D6A7"))
|
||||
lineStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#404040"))
|
||||
hatStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FF4D4D"))
|
||||
primaryStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#BD93F9"))
|
||||
)
|
||||
|
||||
const (
|
||||
colWidth = 38
|
||||
iconCPU = "⚙"
|
||||
iconMemory = "▦"
|
||||
iconGPU = "▣"
|
||||
iconDisk = "▤"
|
||||
iconCPU = "◉"
|
||||
iconMemory = "◫"
|
||||
iconGPU = "◧"
|
||||
iconDisk = "▥"
|
||||
iconNetwork = "⇅"
|
||||
iconBattery = "▮"
|
||||
iconSensors = "♨"
|
||||
iconProcs = "▶"
|
||||
iconBattery = "◪"
|
||||
iconSensors = "◈"
|
||||
iconProcs = "❊"
|
||||
)
|
||||
|
||||
// Check if it's Christmas season (Dec 10-31)
|
||||
// isChristmasSeason reports Dec 10-31.
|
||||
func isChristmasSeason() bool {
|
||||
now := time.Now()
|
||||
month := now.Month()
|
||||
@@ -40,7 +41,7 @@ func isChristmasSeason() bool {
|
||||
return month == time.December && day >= 10 && day <= 31
|
||||
}
|
||||
|
||||
// Mole body frames (legs animate)
|
||||
// Mole body frames.
|
||||
var moleBody = [][]string{
|
||||
{
|
||||
` /\_/\`,
|
||||
@@ -68,7 +69,7 @@ var moleBody = [][]string{
|
||||
},
|
||||
}
|
||||
|
||||
// Mole body frames with Christmas hat
|
||||
// Mole body frames with Christmas hat.
|
||||
var moleBodyWithHat = [][]string{
|
||||
{
|
||||
` *`,
|
||||
@@ -104,7 +105,7 @@ var moleBodyWithHat = [][]string{
|
||||
},
|
||||
}
|
||||
|
||||
// Generate frames with horizontal movement
|
||||
// getMoleFrame renders the animated mole.
|
||||
func getMoleFrame(animFrame int, termWidth int) string {
|
||||
var body []string
|
||||
var bodyIdx int
|
||||
@@ -118,15 +119,12 @@ func getMoleFrame(animFrame int, termWidth int) string {
|
||||
body = moleBody[bodyIdx]
|
||||
}
|
||||
|
||||
// Calculate mole width (approximate)
|
||||
moleWidth := 15
|
||||
// Move across terminal width
|
||||
maxPos := termWidth - moleWidth
|
||||
if maxPos < 0 {
|
||||
maxPos = 0
|
||||
}
|
||||
|
||||
// Move position: 0 -> maxPos -> 0
|
||||
cycleLength := maxPos * 2
|
||||
if cycleLength == 0 {
|
||||
cycleLength = 1
|
||||
@@ -140,7 +138,6 @@ func getMoleFrame(animFrame int, termWidth int) string {
|
||||
var lines []string
|
||||
|
||||
if isChristmas {
|
||||
// Render with red hat on first 3 lines
|
||||
for i, line := range body {
|
||||
if i < 3 {
|
||||
lines = append(lines, padding+hatStyle.Render(line))
|
||||
@@ -164,30 +161,33 @@ type cardData struct {
|
||||
}
|
||||
|
||||
func renderHeader(m MetricsSnapshot, errMsg string, animFrame int, termWidth int) string {
|
||||
// Title
|
||||
title := titleStyle.Render("Mole Status")
|
||||
|
||||
// Health Score with color and label
|
||||
scoreStyle := getScoreStyle(m.HealthScore)
|
||||
scoreText := subtleStyle.Render("Health ") + scoreStyle.Render(fmt.Sprintf("● %d", m.HealthScore))
|
||||
|
||||
// Hardware info
|
||||
// Hardware info for a single line.
|
||||
infoParts := []string{}
|
||||
if m.Hardware.Model != "" {
|
||||
infoParts = append(infoParts, m.Hardware.Model)
|
||||
infoParts = append(infoParts, primaryStyle.Render(m.Hardware.Model))
|
||||
}
|
||||
if m.Hardware.CPUModel != "" {
|
||||
cpuInfo := m.Hardware.CPUModel
|
||||
// Append GPU core count when available.
|
||||
if len(m.GPU) > 0 && m.GPU[0].CoreCount > 0 {
|
||||
cpuInfo += fmt.Sprintf(" (%d GPU cores)", m.GPU[0].CoreCount)
|
||||
cpuInfo += fmt.Sprintf(" (%dGPU)", m.GPU[0].CoreCount)
|
||||
}
|
||||
infoParts = append(infoParts, cpuInfo)
|
||||
}
|
||||
var specs []string
|
||||
if m.Hardware.TotalRAM != "" {
|
||||
infoParts = append(infoParts, m.Hardware.TotalRAM)
|
||||
specs = append(specs, m.Hardware.TotalRAM)
|
||||
}
|
||||
if m.Hardware.DiskSize != "" {
|
||||
infoParts = append(infoParts, m.Hardware.DiskSize)
|
||||
specs = append(specs, m.Hardware.DiskSize)
|
||||
}
|
||||
if len(specs) > 0 {
|
||||
infoParts = append(infoParts, strings.Join(specs, "/"))
|
||||
}
|
||||
if m.Hardware.OSVersion != "" {
|
||||
infoParts = append(infoParts, m.Hardware.OSVersion)
|
||||
@@ -195,30 +195,24 @@ func renderHeader(m MetricsSnapshot, errMsg string, animFrame int, termWidth int
|
||||
|
||||
headerLine := title + " " + scoreText + " " + subtleStyle.Render(strings.Join(infoParts, " · "))
|
||||
|
||||
// Running mole animation
|
||||
mole := getMoleFrame(animFrame, termWidth)
|
||||
|
||||
if errMsg != "" {
|
||||
return lipgloss.JoinVertical(lipgloss.Left, headerLine, "", mole, dangerStyle.Render(errMsg), "")
|
||||
return lipgloss.JoinVertical(lipgloss.Left, headerLine, "", mole, dangerStyle.Render("ERROR: "+errMsg), "")
|
||||
}
|
||||
return headerLine + "\n" + mole
|
||||
}
|
||||
|
||||
func getScoreStyle(score int) lipgloss.Style {
|
||||
if score >= 90 {
|
||||
// Excellent - Bright Green
|
||||
return lipgloss.NewStyle().Foreground(lipgloss.Color("#87FF87")).Bold(true)
|
||||
} else if score >= 75 {
|
||||
// Good - Green
|
||||
return lipgloss.NewStyle().Foreground(lipgloss.Color("#87D787")).Bold(true)
|
||||
} else if score >= 60 {
|
||||
// Fair - Yellow
|
||||
return lipgloss.NewStyle().Foreground(lipgloss.Color("#FFD75F")).Bold(true)
|
||||
} else if score >= 40 {
|
||||
// Poor - Orange
|
||||
return lipgloss.NewStyle().Foreground(lipgloss.Color("#FFAF5F")).Bold(true)
|
||||
} else {
|
||||
// Critical - Red
|
||||
return lipgloss.NewStyle().Foreground(lipgloss.Color("#FF6B6B")).Bold(true)
|
||||
}
|
||||
}
|
||||
@@ -232,7 +226,6 @@ func buildCards(m MetricsSnapshot, _ int) []cardData {
|
||||
renderProcessCard(m.TopProcesses),
|
||||
renderNetworkCard(m.Network, m.Proxy),
|
||||
}
|
||||
// Only show sensors if we have valid temperature readings
|
||||
if hasSensorData(m.Sensors) {
|
||||
cards = append(cards, renderSensorsCard(m.Sensors))
|
||||
}
|
||||
@@ -326,7 +319,7 @@ func renderMemoryCard(mem MemoryStatus) cardData {
|
||||
} else {
|
||||
lines = append(lines, fmt.Sprintf("Swap %s", subtleStyle.Render("not in use")))
|
||||
}
|
||||
// Memory pressure
|
||||
// Memory pressure status.
|
||||
if mem.Pressure != "" {
|
||||
pressureStyle := okStyle
|
||||
pressureText := "Status " + mem.Pressure
|
||||
@@ -397,7 +390,6 @@ func formatDiskLine(label string, d DiskStatus) string {
|
||||
}
|
||||
|
||||
func ioBar(rate float64) string {
|
||||
// Scale: 0-50 MB/s maps to 0-5 blocks
|
||||
filled := int(rate / 10.0)
|
||||
if filled > 5 {
|
||||
filled = 5
|
||||
@@ -433,7 +425,7 @@ func renderProcessCard(procs []ProcessInfo) cardData {
|
||||
}
|
||||
|
||||
func miniBar(percent float64) string {
|
||||
filled := int(percent / 20) // 5 chars max for 100%
|
||||
filled := int(percent / 20)
|
||||
if filled > 5 {
|
||||
filled = 5
|
||||
}
|
||||
@@ -463,7 +455,7 @@ func renderNetworkCard(netStats []NetworkStatus, proxy ProxyStatus) cardData {
|
||||
txBar := netBar(totalTx)
|
||||
lines = append(lines, fmt.Sprintf("Down %s %s", rxBar, formatRate(totalRx)))
|
||||
lines = append(lines, fmt.Sprintf("Up %s %s", txBar, formatRate(totalTx)))
|
||||
// Show proxy and IP in one line
|
||||
// Show proxy and IP on one line.
|
||||
var infoParts []string
|
||||
if proxy.Enabled {
|
||||
infoParts = append(infoParts, "Proxy "+proxy.Type)
|
||||
@@ -479,7 +471,6 @@ func renderNetworkCard(netStats []NetworkStatus, proxy ProxyStatus) cardData {
|
||||
}
|
||||
|
||||
func netBar(rate float64) string {
|
||||
// Scale: 0-10 MB/s maps to 0-5 blocks
|
||||
filled := int(rate / 2.0)
|
||||
if filled > 5 {
|
||||
filled = 5
|
||||
@@ -503,8 +494,6 @@ func renderBatteryCard(batts []BatteryStatus, thermal ThermalStatus) cardData {
|
||||
lines = append(lines, subtleStyle.Render("No battery"))
|
||||
} else {
|
||||
b := batts[0]
|
||||
// Line 1: label + bar + percentage (consistent with other cards)
|
||||
// Only show red when battery is critically low
|
||||
statusLower := strings.ToLower(b.Status)
|
||||
percentText := fmt.Sprintf("%5.1f%%", b.Percent)
|
||||
if b.Percent < 20 && statusLower != "charging" && statusLower != "charged" {
|
||||
@@ -512,7 +501,6 @@ func renderBatteryCard(batts []BatteryStatus, thermal ThermalStatus) cardData {
|
||||
}
|
||||
lines = append(lines, fmt.Sprintf("Level %s %s", batteryProgressBar(b.Percent), percentText))
|
||||
|
||||
// Line 2: status
|
||||
statusIcon := ""
|
||||
statusStyle := subtleStyle
|
||||
if statusLower == "charging" || statusLower == "charged" {
|
||||
@@ -521,7 +509,6 @@ func renderBatteryCard(batts []BatteryStatus, thermal ThermalStatus) cardData {
|
||||
} else if b.Percent < 20 {
|
||||
statusStyle = dangerStyle
|
||||
}
|
||||
// Capitalize first letter
|
||||
statusText := b.Status
|
||||
if len(statusText) > 0 {
|
||||
statusText = strings.ToUpper(statusText[:1]) + strings.ToLower(statusText[1:])
|
||||
@@ -529,9 +516,18 @@ func renderBatteryCard(batts []BatteryStatus, thermal ThermalStatus) cardData {
|
||||
if b.TimeLeft != "" {
|
||||
statusText += " · " + b.TimeLeft
|
||||
}
|
||||
// Add power info.
|
||||
if statusLower == "charging" || statusLower == "charged" {
|
||||
if thermal.SystemPower > 0 {
|
||||
statusText += fmt.Sprintf(" · %.0fW", thermal.SystemPower)
|
||||
} else if thermal.AdapterPower > 0 {
|
||||
statusText += fmt.Sprintf(" · %.0fW Adapter", thermal.AdapterPower)
|
||||
}
|
||||
} else if thermal.BatteryPower > 0 {
|
||||
statusText += fmt.Sprintf(" · %.0fW", thermal.BatteryPower)
|
||||
}
|
||||
lines = append(lines, statusStyle.Render(statusText+statusIcon))
|
||||
|
||||
// Line 3: Health + cycles + temp
|
||||
healthParts := []string{}
|
||||
if b.Health != "" {
|
||||
healthParts = append(healthParts, b.Health)
|
||||
@@ -540,7 +536,6 @@ func renderBatteryCard(batts []BatteryStatus, thermal ThermalStatus) cardData {
|
||||
healthParts = append(healthParts, fmt.Sprintf("%d cycles", b.CycleCount))
|
||||
}
|
||||
|
||||
// Add temperature if available
|
||||
if thermal.CPUTemp > 0 {
|
||||
tempStyle := subtleStyle
|
||||
if thermal.CPUTemp > 80 {
|
||||
@@ -551,7 +546,6 @@ func renderBatteryCard(batts []BatteryStatus, thermal ThermalStatus) cardData {
|
||||
healthParts = append(healthParts, tempStyle.Render(fmt.Sprintf("%.0f°C", thermal.CPUTemp)))
|
||||
}
|
||||
|
||||
// Add fan speed if available
|
||||
if thermal.FanSpeed > 0 {
|
||||
healthParts = append(healthParts, fmt.Sprintf("%d RPM", thermal.FanSpeed))
|
||||
}
|
||||
@@ -580,14 +574,13 @@ func renderSensorsCard(sensors []SensorReading) cardData {
|
||||
|
||||
func renderCard(data cardData, width int, height int) string {
|
||||
titleText := data.icon + " " + data.title
|
||||
lineLen := width - lipgloss.Width(titleText) - 1
|
||||
lineLen := width - lipgloss.Width(titleText) - 2
|
||||
if lineLen < 4 {
|
||||
lineLen = 4
|
||||
}
|
||||
header := titleStyle.Render(titleText) + " " + lineStyle.Render(strings.Repeat("─", lineLen))
|
||||
header := titleStyle.Render(titleText) + " " + lineStyle.Render(strings.Repeat("╌", lineLen))
|
||||
content := header + "\n" + strings.Join(data.lines, "\n")
|
||||
|
||||
// Pad to target height
|
||||
lines := strings.Split(content, "\n")
|
||||
for len(lines) < height {
|
||||
lines = append(lines, "")
|
||||
@@ -596,7 +589,7 @@ func renderCard(data cardData, width int, height int) string {
|
||||
}
|
||||
|
||||
func progressBar(percent float64) string {
|
||||
total := 18
|
||||
total := 16
|
||||
if percent < 0 {
|
||||
percent = 0
|
||||
}
|
||||
@@ -604,9 +597,6 @@ func progressBar(percent float64) string {
|
||||
percent = 100
|
||||
}
|
||||
filled := int(percent / 100 * float64(total))
|
||||
if filled > total {
|
||||
filled = total
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
for i := 0; i < total; i++ {
|
||||
@@ -620,7 +610,7 @@ func progressBar(percent float64) string {
|
||||
}
|
||||
|
||||
func batteryProgressBar(percent float64) string {
|
||||
total := 18
|
||||
total := 16
|
||||
if percent < 0 {
|
||||
percent = 0
|
||||
}
|
||||
@@ -628,9 +618,6 @@ func batteryProgressBar(percent float64) string {
|
||||
percent = 100
|
||||
}
|
||||
filled := int(percent / 100 * float64(total))
|
||||
if filled > total {
|
||||
filled = total
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
for i := 0; i < total; i++ {
|
||||
@@ -645,9 +632,9 @@ func batteryProgressBar(percent float64) string {
|
||||
|
||||
func colorizePercent(percent float64, s string) string {
|
||||
switch {
|
||||
case percent >= 90:
|
||||
case percent >= 85:
|
||||
return dangerStyle.Render(s)
|
||||
case percent >= 70:
|
||||
case percent >= 60:
|
||||
return warnStyle.Render(s)
|
||||
default:
|
||||
return okStyle.Render(s)
|
||||
@@ -766,7 +753,6 @@ func renderTwoColumns(cards []cardData, width int) string {
|
||||
}
|
||||
}
|
||||
|
||||
// Add empty lines between rows for separation
|
||||
var spacedRows []string
|
||||
for i, r := range rows {
|
||||
if i > 0 {
|
||||
|
||||
2
go.mod
2
go.mod
@@ -9,7 +9,7 @@ require (
|
||||
github.com/charmbracelet/bubbletea v1.3.10
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/shirou/gopsutil/v3 v3.24.5
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/sync v0.19.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
4
go.sum
4
go.sum
@@ -64,8 +64,8 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
|
||||
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
||||
580
install.sh
580
install.sh
@@ -1,16 +1,16 @@
|
||||
#!/bin/bash
|
||||
# Mole Installation Script
|
||||
# Mole - Installer for manual installs.
|
||||
# Fetches source/binaries and installs to prefix.
|
||||
# Supports update and edge installs.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Simple spinner
|
||||
_SPINNER_PID=""
|
||||
start_line_spinner() {
|
||||
local msg="$1"
|
||||
@@ -36,67 +36,54 @@ stop_line_spinner() { if [[ -n "$_SPINNER_PID" ]]; then
|
||||
printf "\r\033[K"
|
||||
fi; }
|
||||
|
||||
# Verbosity (0 = quiet, 1 = verbose)
|
||||
VERBOSE=1
|
||||
|
||||
# Icons (duplicated from lib/core/common.sh - necessary as install.sh runs standalone)
|
||||
readonly ICON_SUCCESS="✓"
|
||||
readonly ICON_ADMIN="●"
|
||||
readonly ICON_CONFIRM="◎"
|
||||
readonly ICON_ERROR="☻"
|
||||
# Icons duplicated from lib/core/common.sh (install.sh runs standalone).
|
||||
# Avoid readonly to prevent conflicts when sourcing common.sh later.
|
||||
ICON_SUCCESS="✓"
|
||||
ICON_ADMIN="●"
|
||||
ICON_CONFIRM="◎"
|
||||
ICON_ERROR="☻"
|
||||
|
||||
# Logging functions
|
||||
log_info() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}$1${NC}"; }
|
||||
log_success() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${GREEN}${ICON_SUCCESS}${NC} $1"; }
|
||||
log_warning() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${YELLOW}$1${NC}"; }
|
||||
log_error() { echo -e "${RED}${ICON_ERROR}${NC} $1"; }
|
||||
log_error() { echo -e "${YELLOW}${ICON_ERROR}${NC} $1"; }
|
||||
log_admin() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_ADMIN}${NC} $1"; }
|
||||
log_confirm() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_CONFIRM}${NC} $1"; }
|
||||
|
||||
# Default installation directory
|
||||
# Install defaults
|
||||
INSTALL_DIR="/usr/local/bin"
|
||||
CONFIG_DIR="$HOME/.config/mole"
|
||||
SOURCE_DIR=""
|
||||
|
||||
# Default action (install|update)
|
||||
ACTION="install"
|
||||
|
||||
show_help() {
|
||||
cat << 'EOF'
|
||||
Mole Installation Script
|
||||
========================
|
||||
# Resolve source dir (local checkout, env override, or download).
|
||||
needs_sudo() {
|
||||
if [[ -e "$INSTALL_DIR" ]]; then
|
||||
[[ ! -w "$INSTALL_DIR" ]]
|
||||
return
|
||||
fi
|
||||
|
||||
USAGE:
|
||||
./install.sh [OPTIONS]
|
||||
|
||||
OPTIONS:
|
||||
--prefix PATH Install to custom directory (default: /usr/local/bin)
|
||||
--config PATH Config directory (default: ~/.config/mole)
|
||||
--update Update Mole to the latest version
|
||||
--uninstall Uninstall mole
|
||||
--help, -h Show this help
|
||||
|
||||
EXAMPLES:
|
||||
./install.sh # Install to /usr/local/bin
|
||||
./install.sh --prefix ~/.local/bin # Install to custom directory
|
||||
./install.sh --update # Update Mole in place
|
||||
./install.sh --uninstall # Uninstall mole
|
||||
|
||||
The installer will:
|
||||
1. Copy mole binary and scripts to the install directory
|
||||
2. Set up config directory with all modules
|
||||
3. Make the mole command available system-wide
|
||||
EOF
|
||||
echo ""
|
||||
local parent_dir
|
||||
parent_dir="$(dirname "$INSTALL_DIR")"
|
||||
[[ ! -w "$parent_dir" ]]
|
||||
}
|
||||
|
||||
maybe_sudo() {
|
||||
if needs_sudo; then
|
||||
sudo "$@"
|
||||
else
|
||||
"$@"
|
||||
fi
|
||||
}
|
||||
|
||||
# Resolve the directory containing source files (supports curl | bash)
|
||||
resolve_source_dir() {
|
||||
if [[ -n "$SOURCE_DIR" && -d "$SOURCE_DIR" && -f "$SOURCE_DIR/mole" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# 1) If script is on disk, use its directory (only when mole executable present)
|
||||
if [[ -n "${BASH_SOURCE[0]:-}" && -f "${BASH_SOURCE[0]}" ]]; then
|
||||
local script_dir
|
||||
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
@@ -106,27 +93,53 @@ resolve_source_dir() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# 2) If CLEAN_SOURCE_DIR env is provided, honor it
|
||||
if [[ -n "${CLEAN_SOURCE_DIR:-}" && -d "$CLEAN_SOURCE_DIR" && -f "$CLEAN_SOURCE_DIR/mole" ]]; then
|
||||
SOURCE_DIR="$CLEAN_SOURCE_DIR"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# 3) Fallback: fetch repository to a temp directory (works for curl | bash)
|
||||
local tmp
|
||||
tmp="$(mktemp -d)"
|
||||
# Expand tmp now so trap doesn't depend on local scope
|
||||
trap "rm -rf '$tmp'" EXIT
|
||||
trap "stop_line_spinner 2>/dev/null; rm -rf '$tmp'" EXIT
|
||||
|
||||
start_line_spinner "Fetching Mole source..."
|
||||
local branch="${MOLE_VERSION:-}"
|
||||
if [[ -z "$branch" ]]; then
|
||||
branch="$(get_latest_release_tag || true)"
|
||||
fi
|
||||
if [[ -z "$branch" ]]; then
|
||||
branch="$(get_latest_release_tag_from_git || true)"
|
||||
fi
|
||||
if [[ -z "$branch" ]]; then
|
||||
branch="main"
|
||||
fi
|
||||
if [[ "$branch" != "main" ]]; then
|
||||
branch="$(normalize_release_tag "$branch")"
|
||||
fi
|
||||
local url="https://github.com/tw93/mole/archive/refs/heads/main.tar.gz"
|
||||
|
||||
if [[ "$branch" != "main" ]]; then
|
||||
url="https://github.com/tw93/mole/archive/refs/tags/${branch}.tar.gz"
|
||||
fi
|
||||
|
||||
start_line_spinner "Fetching Mole source (${branch})..."
|
||||
if command -v curl > /dev/null 2>&1; then
|
||||
if curl -fsSL -o "$tmp/mole.tar.gz" "https://github.com/tw93/mole/archive/refs/heads/main.tar.gz"; then
|
||||
if curl -fsSL -o "$tmp/mole.tar.gz" "$url" 2> /dev/null; then
|
||||
if tar -xzf "$tmp/mole.tar.gz" -C "$tmp" 2> /dev/null; then
|
||||
stop_line_spinner
|
||||
|
||||
local extracted_dir
|
||||
extracted_dir=$(find "$tmp" -mindepth 1 -maxdepth 1 -type d | head -n 1)
|
||||
|
||||
if [[ -n "$extracted_dir" && -f "$extracted_dir/mole" ]]; then
|
||||
SOURCE_DIR="$extracted_dir"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
else
|
||||
stop_line_spinner
|
||||
tar -xzf "$tmp/mole.tar.gz" -C "$tmp"
|
||||
# Extracted folder name: mole-main
|
||||
if [[ -d "$tmp/mole-main" ]]; then
|
||||
SOURCE_DIR="$tmp/mole-main"
|
||||
return 0
|
||||
if [[ "$branch" != "main" ]]; then
|
||||
log_error "Failed to fetch version ${branch}. Check if tag exists."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -134,7 +147,12 @@ resolve_source_dir() {
|
||||
|
||||
start_line_spinner "Cloning Mole source..."
|
||||
if command -v git > /dev/null 2>&1; then
|
||||
if git clone --depth=1 https://github.com/tw93/mole.git "$tmp/mole" > /dev/null 2>&1; then
|
||||
local git_args=("--depth=1")
|
||||
if [[ "$branch" != "main" ]]; then
|
||||
git_args+=("--branch" "$branch")
|
||||
fi
|
||||
|
||||
if git clone "${git_args[@]}" https://github.com/tw93/mole.git "$tmp/mole" > /dev/null 2>&1; then
|
||||
stop_line_spinner
|
||||
SOURCE_DIR="$tmp/mole"
|
||||
return 0
|
||||
@@ -146,6 +164,7 @@ resolve_source_dir() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Version helpers
|
||||
get_source_version() {
|
||||
local source_mole="$SOURCE_DIR/mole"
|
||||
if [[ -f "$source_mole" ]]; then
|
||||
@@ -153,30 +172,118 @@ get_source_version() {
|
||||
fi
|
||||
}
|
||||
|
||||
get_latest_release_tag() {
|
||||
local tag
|
||||
if ! command -v curl > /dev/null 2>&1; then
|
||||
return 1
|
||||
fi
|
||||
tag=$(curl -fsSL --connect-timeout 2 --max-time 3 \
|
||||
"https://api.github.com/repos/tw93/mole/releases/latest" 2> /dev/null |
|
||||
sed -n 's/.*"tag_name":[[:space:]]*"\([^"]*\)".*/\1/p' | head -n1)
|
||||
if [[ -z "$tag" ]]; then
|
||||
return 1
|
||||
fi
|
||||
printf '%s\n' "$tag"
|
||||
}
|
||||
|
||||
get_latest_release_tag_from_git() {
|
||||
if ! command -v git > /dev/null 2>&1; then
|
||||
return 1
|
||||
fi
|
||||
git ls-remote --tags --refs https://github.com/tw93/mole.git 2> /dev/null |
|
||||
awk -F/ '{print $NF}' |
|
||||
grep -E '^V[0-9]' |
|
||||
sort -V |
|
||||
tail -n 1
|
||||
}
|
||||
|
||||
normalize_release_tag() {
|
||||
local tag="$1"
|
||||
while [[ "$tag" =~ ^[vV] ]]; do
|
||||
tag="${tag#v}"
|
||||
tag="${tag#V}"
|
||||
done
|
||||
if [[ -n "$tag" ]]; then
|
||||
printf 'V%s\n' "$tag"
|
||||
fi
|
||||
}
|
||||
|
||||
get_installed_version() {
|
||||
local binary="$INSTALL_DIR/mole"
|
||||
if [[ -x "$binary" ]]; then
|
||||
# Try running the binary first (preferred method)
|
||||
local version
|
||||
version=$("$binary" --version 2> /dev/null | awk 'NF {print $NF; exit}')
|
||||
version=$("$binary" --version 2> /dev/null | awk '/Mole version/ {print $NF; exit}')
|
||||
if [[ -n "$version" ]]; then
|
||||
echo "$version"
|
||||
else
|
||||
# Fallback: parse VERSION from file (in case binary is broken)
|
||||
sed -n 's/^VERSION="\(.*\)"$/\1/p' "$binary" | head -n1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
# CLI parsing (supports main/latest and version tokens).
|
||||
parse_args() {
|
||||
local -a args=("$@")
|
||||
local version_token=""
|
||||
local i skip_next=false
|
||||
for i in "${!args[@]}"; do
|
||||
local token="${args[$i]}"
|
||||
[[ -z "$token" ]] && continue
|
||||
# Skip values for options that take arguments
|
||||
if [[ "$skip_next" == "true" ]]; then
|
||||
skip_next=false
|
||||
continue
|
||||
fi
|
||||
if [[ "$token" == "--prefix" || "$token" == "--config" ]]; then
|
||||
skip_next=true
|
||||
continue
|
||||
fi
|
||||
if [[ "$token" == -* ]]; then
|
||||
continue
|
||||
fi
|
||||
if [[ -n "$version_token" ]]; then
|
||||
log_error "Unexpected argument: $token"
|
||||
exit 1
|
||||
fi
|
||||
case "$token" in
|
||||
latest | main)
|
||||
export MOLE_VERSION="main"
|
||||
export MOLE_EDGE_INSTALL="true"
|
||||
version_token="$token"
|
||||
unset 'args[$i]'
|
||||
;;
|
||||
[0-9]* | V[0-9]* | v[0-9]*)
|
||||
export MOLE_VERSION="$token"
|
||||
version_token="$token"
|
||||
unset 'args[$i]'
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $token"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [[ ${#args[@]} -gt 0 ]]; then
|
||||
set -- ${args[@]+"${args[@]}"}
|
||||
else
|
||||
set --
|
||||
fi
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--prefix)
|
||||
if [[ -z "${2:-}" ]]; then
|
||||
log_error "Missing value for --prefix"
|
||||
exit 1
|
||||
fi
|
||||
INSTALL_DIR="$2"
|
||||
shift 2
|
||||
;;
|
||||
--config)
|
||||
if [[ -z "${2:-}" ]]; then
|
||||
log_error "Missing value for --config"
|
||||
exit 1
|
||||
fi
|
||||
CONFIG_DIR="$2"
|
||||
shift 2
|
||||
;;
|
||||
@@ -184,76 +291,177 @@ parse_args() {
|
||||
ACTION="update"
|
||||
shift 1
|
||||
;;
|
||||
--uninstall)
|
||||
uninstall_mole
|
||||
exit 0
|
||||
;;
|
||||
--verbose | -v)
|
||||
VERBOSE=1
|
||||
shift 1
|
||||
;;
|
||||
--help | -h)
|
||||
show_help
|
||||
exit 0
|
||||
log_error "Unknown option: $1"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
show_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# Check system requirements
|
||||
# Environment checks and directory setup
|
||||
check_requirements() {
|
||||
# Check if running on macOS
|
||||
if [[ "$OSTYPE" != "darwin"* ]]; then
|
||||
log_error "This tool is designed for macOS only"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if already installed via Homebrew
|
||||
if command -v brew > /dev/null 2>&1 && brew list mole > /dev/null 2>&1; then
|
||||
if [[ "$ACTION" == "update" ]]; then
|
||||
return 0
|
||||
local mole_path
|
||||
mole_path=$(command -v mole 2> /dev/null || true)
|
||||
local is_homebrew_binary=false
|
||||
|
||||
if [[ -n "$mole_path" && -L "$mole_path" ]]; then
|
||||
if readlink "$mole_path" | grep -q "Cellar/mole"; then
|
||||
is_homebrew_binary=true
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}Mole is installed via Homebrew${NC}"
|
||||
echo ""
|
||||
echo "Choose one:"
|
||||
echo -e " 1. Update via Homebrew: ${GREEN}brew upgrade mole${NC}"
|
||||
echo -e " 2. Switch to manual: ${GREEN}brew uninstall mole${NC} then re-run this"
|
||||
echo ""
|
||||
exit 1
|
||||
if [[ "$is_homebrew_binary" == "true" ]]; then
|
||||
if [[ "$ACTION" == "update" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}Mole is installed via Homebrew${NC}"
|
||||
echo ""
|
||||
echo "Choose one:"
|
||||
echo -e " 1. Update via Homebrew: ${GREEN}brew upgrade mole${NC}"
|
||||
echo -e " 2. Switch to manual: ${GREEN}brew uninstall --force mole${NC} then re-run this"
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
log_warning "Cleaning up stale Homebrew installation..."
|
||||
brew uninstall --force mole > /dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if install directory exists and is writable
|
||||
if [[ ! -d "$(dirname "$INSTALL_DIR")" ]]; then
|
||||
log_error "Parent directory $(dirname "$INSTALL_DIR") does not exist"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create installation directories
|
||||
create_directories() {
|
||||
# Create install directory if it doesn't exist
|
||||
if [[ ! -d "$INSTALL_DIR" ]]; then
|
||||
if [[ "$INSTALL_DIR" == "/usr/local/bin" ]] && [[ ! -w "$(dirname "$INSTALL_DIR")" ]]; then
|
||||
sudo mkdir -p "$INSTALL_DIR"
|
||||
else
|
||||
mkdir -p "$INSTALL_DIR"
|
||||
fi
|
||||
maybe_sudo mkdir -p "$INSTALL_DIR"
|
||||
fi
|
||||
|
||||
# Create config directory
|
||||
mkdir -p "$CONFIG_DIR"
|
||||
mkdir -p "$CONFIG_DIR/bin"
|
||||
mkdir -p "$CONFIG_DIR/lib"
|
||||
if ! mkdir -p "$CONFIG_DIR" "$CONFIG_DIR/bin" "$CONFIG_DIR/lib"; then
|
||||
log_error "Failed to create config directory: $CONFIG_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# Install files
|
||||
# Binary install helpers
|
||||
build_binary_from_source() {
|
||||
local binary_name="$1"
|
||||
local target_path="$2"
|
||||
local cmd_dir=""
|
||||
|
||||
case "$binary_name" in
|
||||
analyze)
|
||||
cmd_dir="cmd/analyze"
|
||||
;;
|
||||
status)
|
||||
cmd_dir="cmd/status"
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if ! command -v go > /dev/null 2>&1; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ ! -d "$SOURCE_DIR/$cmd_dir" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
start_line_spinner "Building ${binary_name} from source..."
|
||||
else
|
||||
echo "Building ${binary_name} from source..."
|
||||
fi
|
||||
|
||||
if (cd "$SOURCE_DIR" && go build -ldflags="-s -w" -o "$target_path" "./$cmd_dir" > /dev/null 2>&1); then
|
||||
if [[ -t 1 ]]; then stop_line_spinner; fi
|
||||
chmod +x "$target_path"
|
||||
log_success "Built ${binary_name} from source"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -t 1 ]]; then stop_line_spinner; fi
|
||||
log_warning "Failed to build ${binary_name} from source"
|
||||
return 1
|
||||
}
|
||||
|
||||
download_binary() {
|
||||
local binary_name="$1"
|
||||
local target_path="$CONFIG_DIR/bin/${binary_name}-go"
|
||||
local arch
|
||||
arch=$(uname -m)
|
||||
local arch_suffix="amd64"
|
||||
if [[ "$arch" == "arm64" ]]; then
|
||||
arch_suffix="arm64"
|
||||
fi
|
||||
|
||||
if [[ -f "$SOURCE_DIR/bin/${binary_name}-go" ]]; then
|
||||
cp "$SOURCE_DIR/bin/${binary_name}-go" "$target_path"
|
||||
chmod +x "$target_path"
|
||||
log_success "Installed local ${binary_name} binary"
|
||||
return 0
|
||||
elif [[ -f "$SOURCE_DIR/bin/${binary_name}-darwin-${arch_suffix}" ]]; then
|
||||
cp "$SOURCE_DIR/bin/${binary_name}-darwin-${arch_suffix}" "$target_path"
|
||||
chmod +x "$target_path"
|
||||
log_success "Installed local ${binary_name} binary"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local version
|
||||
version=$(get_source_version)
|
||||
if [[ -z "$version" ]]; then
|
||||
log_warning "Could not determine version for ${binary_name}, trying local build"
|
||||
if build_binary_from_source "$binary_name" "$target_path"; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
local url="https://github.com/tw93/mole/releases/download/V${version}/${binary_name}-darwin-${arch_suffix}"
|
||||
|
||||
# Skip preflight network checks to avoid false negatives.
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
start_line_spinner "Downloading ${binary_name}..."
|
||||
else
|
||||
echo "Downloading ${binary_name}..."
|
||||
fi
|
||||
|
||||
if curl -fsSL --connect-timeout 10 --max-time 60 -o "$target_path" "$url"; then
|
||||
if [[ -t 1 ]]; then stop_line_spinner; fi
|
||||
chmod +x "$target_path"
|
||||
log_success "Downloaded ${binary_name} binary"
|
||||
else
|
||||
if [[ -t 1 ]]; then stop_line_spinner; fi
|
||||
log_warning "Could not download ${binary_name} binary (v${version}), trying local build"
|
||||
if build_binary_from_source "$binary_name" "$target_path"; then
|
||||
return 0
|
||||
fi
|
||||
log_error "Failed to install ${binary_name} binary"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# File installation (bin/lib/scripts + go helpers).
|
||||
install_files() {
|
||||
|
||||
resolve_source_dir
|
||||
@@ -265,17 +473,13 @@ install_files() {
|
||||
install_dir_abs="$(cd "$INSTALL_DIR" && pwd)"
|
||||
config_dir_abs="$(cd "$CONFIG_DIR" && pwd)"
|
||||
|
||||
# Copy main executable when destination differs
|
||||
if [[ -f "$SOURCE_DIR/mole" ]]; then
|
||||
if [[ "$source_dir_abs" != "$install_dir_abs" ]]; then
|
||||
if [[ "$INSTALL_DIR" == "/usr/local/bin" ]] && [[ ! -w "$INSTALL_DIR" ]]; then
|
||||
if needs_sudo; then
|
||||
log_admin "Admin access required for /usr/local/bin"
|
||||
sudo cp "$SOURCE_DIR/mole" "$INSTALL_DIR/mole"
|
||||
sudo chmod +x "$INSTALL_DIR/mole"
|
||||
else
|
||||
cp "$SOURCE_DIR/mole" "$INSTALL_DIR/mole"
|
||||
chmod +x "$INSTALL_DIR/mole"
|
||||
fi
|
||||
maybe_sudo cp "$SOURCE_DIR/mole" "$INSTALL_DIR/mole"
|
||||
maybe_sudo chmod +x "$INSTALL_DIR/mole"
|
||||
log_success "Installed mole to $INSTALL_DIR"
|
||||
fi
|
||||
else
|
||||
@@ -283,32 +487,30 @@ install_files() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install mo alias for Mole if available
|
||||
if [[ -f "$SOURCE_DIR/mo" ]]; then
|
||||
if [[ "$source_dir_abs" == "$install_dir_abs" ]]; then
|
||||
log_success "mo alias already present"
|
||||
else
|
||||
if [[ "$INSTALL_DIR" == "/usr/local/bin" ]] && [[ ! -w "$INSTALL_DIR" ]]; then
|
||||
sudo cp "$SOURCE_DIR/mo" "$INSTALL_DIR/mo"
|
||||
sudo chmod +x "$INSTALL_DIR/mo"
|
||||
else
|
||||
cp "$SOURCE_DIR/mo" "$INSTALL_DIR/mo"
|
||||
chmod +x "$INSTALL_DIR/mo"
|
||||
fi
|
||||
maybe_sudo cp "$SOURCE_DIR/mo" "$INSTALL_DIR/mo"
|
||||
maybe_sudo chmod +x "$INSTALL_DIR/mo"
|
||||
log_success "Installed mo alias"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy configuration and modules
|
||||
if [[ -d "$SOURCE_DIR/bin" ]]; then
|
||||
local source_bin_abs="$(cd "$SOURCE_DIR/bin" && pwd)"
|
||||
local config_bin_abs="$(cd "$CONFIG_DIR/bin" && pwd)"
|
||||
if [[ "$source_bin_abs" == "$config_bin_abs" ]]; then
|
||||
log_success "Modules already synced"
|
||||
else
|
||||
cp -r "$SOURCE_DIR/bin"/* "$CONFIG_DIR/bin/"
|
||||
chmod +x "$CONFIG_DIR/bin"/*
|
||||
log_success "Installed modules"
|
||||
local -a bin_files=("$SOURCE_DIR/bin"/*)
|
||||
if [[ ${#bin_files[@]} -gt 0 ]]; then
|
||||
cp -r "${bin_files[@]}" "$CONFIG_DIR/bin/"
|
||||
for file in "$CONFIG_DIR/bin/"*; do
|
||||
[[ -e "$file" ]] && chmod +x "$file"
|
||||
done
|
||||
log_success "Installed modules"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -318,12 +520,14 @@ install_files() {
|
||||
if [[ "$source_lib_abs" == "$config_lib_abs" ]]; then
|
||||
log_success "Libraries already synced"
|
||||
else
|
||||
cp -r "$SOURCE_DIR/lib"/* "$CONFIG_DIR/lib/"
|
||||
log_success "Installed libraries"
|
||||
local -a lib_files=("$SOURCE_DIR/lib"/*)
|
||||
if [[ ${#lib_files[@]} -gt 0 ]]; then
|
||||
cp -r "${lib_files[@]}" "$CONFIG_DIR/lib/"
|
||||
log_success "Installed libraries"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy other files if they exist and directories differ
|
||||
if [[ "$config_dir_abs" != "$source_dir_abs" ]]; then
|
||||
for file in README.md LICENSE install.sh; do
|
||||
if [[ -f "$SOURCE_DIR/$file" ]]; then
|
||||
@@ -336,22 +540,23 @@ install_files() {
|
||||
chmod +x "$CONFIG_DIR/install.sh"
|
||||
fi
|
||||
|
||||
# Update the mole script to use the config directory when installed elsewhere
|
||||
if [[ "$source_dir_abs" != "$install_dir_abs" ]]; then
|
||||
if [[ "$INSTALL_DIR" == "/usr/local/bin" ]] && [[ ! -w "$INSTALL_DIR" ]]; then
|
||||
sudo sed -i '' "s|SCRIPT_DIR=.*|SCRIPT_DIR=\"$CONFIG_DIR\"|" "$INSTALL_DIR/mole"
|
||||
else
|
||||
sed -i '' "s|SCRIPT_DIR=.*|SCRIPT_DIR=\"$CONFIG_DIR\"|" "$INSTALL_DIR/mole"
|
||||
fi
|
||||
maybe_sudo sed -i '' "s|SCRIPT_DIR=.*|SCRIPT_DIR=\"$CONFIG_DIR\"|" "$INSTALL_DIR/mole"
|
||||
fi
|
||||
|
||||
if ! download_binary "analyze"; then
|
||||
exit 1
|
||||
fi
|
||||
if ! download_binary "status"; then
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify installation
|
||||
# Verification and PATH hint
|
||||
verify_installation() {
|
||||
|
||||
if [[ -x "$INSTALL_DIR/mole" ]] && [[ -f "$CONFIG_DIR/lib/core/common.sh" ]]; then
|
||||
|
||||
# Test if mole command works
|
||||
if "$INSTALL_DIR/mole" --help > /dev/null 2>&1; then
|
||||
return 0
|
||||
else
|
||||
@@ -363,14 +568,11 @@ verify_installation() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Add to PATH if needed
|
||||
setup_path() {
|
||||
# Check if install directory is in PATH
|
||||
if [[ ":$PATH:" == *":$INSTALL_DIR:"* ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Only suggest PATH setup for custom directories
|
||||
if [[ "$INSTALL_DIR" != "/usr/local/bin" ]]; then
|
||||
log_warning "$INSTALL_DIR is not in your PATH"
|
||||
echo ""
|
||||
@@ -428,77 +630,7 @@ print_usage_summary() {
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Uninstall function
|
||||
uninstall_mole() {
|
||||
log_confirm "Uninstalling Mole"
|
||||
echo ""
|
||||
|
||||
# Remove executable
|
||||
if [[ -f "$INSTALL_DIR/mole" ]]; then
|
||||
if [[ "$INSTALL_DIR" == "/usr/local/bin" ]] && [[ ! -w "$INSTALL_DIR" ]]; then
|
||||
log_admin "Admin access required"
|
||||
sudo rm -f "$INSTALL_DIR/mole"
|
||||
else
|
||||
rm -f "$INSTALL_DIR/mole"
|
||||
fi
|
||||
log_success "Removed mole executable"
|
||||
fi
|
||||
|
||||
if [[ -f "$INSTALL_DIR/mo" ]]; then
|
||||
if [[ "$INSTALL_DIR" == "/usr/local/bin" ]] && [[ ! -w "$INSTALL_DIR" ]]; then
|
||||
sudo rm -f "$INSTALL_DIR/mo"
|
||||
else
|
||||
rm -f "$INSTALL_DIR/mo"
|
||||
fi
|
||||
log_success "Removed mo alias"
|
||||
fi
|
||||
|
||||
# SAFETY CHECK: Verify config directory is safe to remove
|
||||
# Only allow removal of mole-specific directories
|
||||
local is_safe=0
|
||||
|
||||
# Additional safety: never delete system critical paths (check first)
|
||||
case "$CONFIG_DIR" in
|
||||
/ | /usr | /usr/local | /usr/local/bin | /usr/local/lib | /usr/local/share | \
|
||||
/Library | /System | /bin | /sbin | /etc | /var | /opt | "$HOME" | "$HOME/Library" | \
|
||||
/usr/local/lib/* | /usr/local/share/* | /Library/* | /System/*)
|
||||
is_safe=0
|
||||
;;
|
||||
*)
|
||||
# Safe patterns: must be in user's home and end with 'mole'
|
||||
if [[ "$CONFIG_DIR" == "$HOME/.config/mole" ]] ||
|
||||
[[ "$CONFIG_DIR" == "$HOME"/.*/mole ]]; then
|
||||
is_safe=1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
# Ask before removing config directory
|
||||
if [[ -d "$CONFIG_DIR" ]]; then
|
||||
if [[ $is_safe -eq 0 ]]; then
|
||||
log_warning "Config directory $CONFIG_DIR is not safe to auto-remove"
|
||||
log_warning "Skipping automatic removal for safety"
|
||||
echo ""
|
||||
echo "Please manually review and remove mole-specific files from:"
|
||||
echo " $CONFIG_DIR"
|
||||
else
|
||||
echo ""
|
||||
read -p "Remove configuration directory $CONFIG_DIR? (y/N): " -n 1 -r
|
||||
echo ""
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
rm -rf "$CONFIG_DIR"
|
||||
log_success "Removed configuration"
|
||||
else
|
||||
log_success "Configuration preserved"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
log_confirm "Mole uninstalled successfully"
|
||||
}
|
||||
|
||||
# Main installation function
|
||||
# Main install/update flows
|
||||
perform_install() {
|
||||
resolve_source_dir
|
||||
local source_version
|
||||
@@ -517,6 +649,14 @@ perform_install() {
|
||||
installed_version="$source_version"
|
||||
fi
|
||||
|
||||
# Edge installs get a suffix to make the version explicit.
|
||||
if [[ "${MOLE_EDGE_INSTALL:-}" == "true" ]]; then
|
||||
installed_version="${installed_version}-edge"
|
||||
echo ""
|
||||
log_warning "Edge version installed on main branch"
|
||||
log_info "This is a testing version; use 'mo update' to switch to stable"
|
||||
fi
|
||||
|
||||
print_usage_summary "installed" "$installed_version"
|
||||
}
|
||||
|
||||
@@ -524,51 +664,19 @@ perform_update() {
|
||||
check_requirements
|
||||
|
||||
if command -v brew > /dev/null 2>&1 && brew list mole > /dev/null 2>&1; then
|
||||
# Try to use shared function if available (when running from installed Mole)
|
||||
resolve_source_dir 2> /dev/null || true
|
||||
local current_version
|
||||
current_version=$(get_installed_version || echo "unknown")
|
||||
if [[ -f "$SOURCE_DIR/lib/core/common.sh" ]]; then
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
source "$SOURCE_DIR/lib/core/common.sh"
|
||||
update_via_homebrew "$VERSION"
|
||||
update_via_homebrew "$current_version"
|
||||
else
|
||||
# Fallback: inline implementation
|
||||
if [[ -t 1 ]]; then
|
||||
start_line_spinner "Updating Homebrew..."
|
||||
else
|
||||
echo "Updating Homebrew..."
|
||||
fi
|
||||
brew update 2>&1 | grep -Ev "^(==>|Already up-to-date)" || true
|
||||
if [[ -t 1 ]]; then
|
||||
stop_line_spinner
|
||||
fi
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
start_line_spinner "Upgrading Mole..."
|
||||
else
|
||||
echo "Upgrading Mole..."
|
||||
fi
|
||||
local upgrade_output
|
||||
upgrade_output=$(brew upgrade mole 2>&1) || true
|
||||
if [[ -t 1 ]]; then
|
||||
stop_line_spinner
|
||||
fi
|
||||
|
||||
if echo "$upgrade_output" | grep -q "already installed"; then
|
||||
local current_version
|
||||
current_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}')
|
||||
echo -e "${GREEN}✓${NC} Already on latest version (${current_version:-$VERSION})"
|
||||
elif echo "$upgrade_output" | grep -q "Error:"; then
|
||||
log_error "Homebrew upgrade failed"
|
||||
echo "$upgrade_output" | grep "Error:" >&2
|
||||
exit 1
|
||||
else
|
||||
echo "$upgrade_output" | grep -Ev "^(==>|Updating Homebrew|Warning:)" || true
|
||||
local new_version
|
||||
new_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}')
|
||||
echo -e "${GREEN}✓${NC} Updated to latest version (${new_version:-$VERSION})"
|
||||
fi
|
||||
|
||||
rm -f "$HOME/.cache/mole/version_check" "$HOME/.cache/mole/update_message"
|
||||
log_error "Cannot update Homebrew-managed Mole without full installation"
|
||||
echo ""
|
||||
echo "Please update via Homebrew:"
|
||||
echo -e " ${GREEN}brew upgrade mole${NC}"
|
||||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
@@ -592,11 +700,10 @@ perform_update() {
|
||||
fi
|
||||
|
||||
if [[ "$installed_version" == "$target_version" ]]; then
|
||||
echo -e "${GREEN}✓${NC} Already on latest version ($installed_version)"
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version ($installed_version)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Update with minimal output (suppress info/success, show errors only)
|
||||
local old_verbose=$VERBOSE
|
||||
VERBOSE=0
|
||||
create_directories || {
|
||||
@@ -624,10 +731,9 @@ perform_update() {
|
||||
updated_version="$target_version"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓${NC} Updated to latest version ($updated_version)"
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version ($updated_version)"
|
||||
}
|
||||
|
||||
# Run requested action
|
||||
parse_args "$@"
|
||||
|
||||
case "$ACTION" in
|
||||
|
||||
202
lib/check/all.sh
202
lib/check/all.sh
@@ -35,7 +35,7 @@ check_touchid_sudo() {
|
||||
# Check if Touch ID is configured for sudo
|
||||
local pam_file="/etc/pam.d/sudo"
|
||||
if [[ -f "$pam_file" ]] && grep -q "pam_tid.so" "$pam_file" 2> /dev/null; then
|
||||
echo -e " ${GREEN}✓${NC} Touch ID Enabled for sudo"
|
||||
echo -e " ${GREEN}✓${NC} Touch ID Biometric authentication enabled"
|
||||
else
|
||||
# Check if Touch ID is supported
|
||||
local is_supported=false
|
||||
@@ -48,7 +48,7 @@ check_touchid_sudo() {
|
||||
fi
|
||||
|
||||
if [[ "$is_supported" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Touch ID ${YELLOW}Not configured${NC} for sudo"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Touch ID ${YELLOW}Not configured for sudo${NC}"
|
||||
export TOUCHID_NOT_CONFIGURED=true
|
||||
fi
|
||||
fi
|
||||
@@ -60,9 +60,9 @@ check_rosetta() {
|
||||
# Check Rosetta 2 (for Apple Silicon Macs)
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
if [[ -f "/Library/Apple/usr/share/rosetta/rosetta" ]]; then
|
||||
echo -e " ${GREEN}✓${NC} Rosetta 2 Installed"
|
||||
echo -e " ${GREEN}✓${NC} Rosetta 2 Intel app translation ready"
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Rosetta 2 ${YELLOW}Not installed${NC}"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Rosetta 2 ${YELLOW}Intel app support missing${NC}"
|
||||
export ROSETTA_NOT_INSTALLED=true
|
||||
fi
|
||||
fi
|
||||
@@ -77,14 +77,15 @@ check_git_config() {
|
||||
local git_email=$(git config --global user.email 2> /dev/null || echo "")
|
||||
|
||||
if [[ -n "$git_name" && -n "$git_email" ]]; then
|
||||
echo -e " ${GREEN}✓${NC} Git Config Configured"
|
||||
echo -e " ${GREEN}✓${NC} Git Global identity configured"
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Git Config ${YELLOW}Not configured${NC}"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Git ${YELLOW}User identity not set${NC}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_all_config() {
|
||||
echo -e "${BLUE}${ICON_ARROW}${NC} System Configuration"
|
||||
check_touchid_sudo
|
||||
check_rosetta
|
||||
check_git_config
|
||||
@@ -101,9 +102,9 @@ check_filevault() {
|
||||
if command -v fdesetup > /dev/null 2>&1; then
|
||||
local fv_status=$(fdesetup status 2> /dev/null || echo "")
|
||||
if echo "$fv_status" | grep -q "FileVault is On"; then
|
||||
echo -e " ${GREEN}✓${NC} FileVault Enabled"
|
||||
echo -e " ${GREEN}✓${NC} FileVault Disk encryption active"
|
||||
else
|
||||
echo -e " ${RED}✗${NC} FileVault ${RED}Disabled${NC} (Recommend enabling)"
|
||||
echo -e " ${RED}✗${NC} FileVault ${RED}Disk encryption disabled${NC}"
|
||||
export FILEVAULT_DISABLED=true
|
||||
fi
|
||||
fi
|
||||
@@ -112,15 +113,13 @@ check_filevault() {
|
||||
check_firewall() {
|
||||
# Check whitelist
|
||||
if command -v is_whitelisted > /dev/null && is_whitelisted "firewall"; then return; fi
|
||||
# Check firewall status
|
||||
# Check firewall status using socketfilterfw (more reliable than defaults on modern macOS)
|
||||
unset FIREWALL_DISABLED
|
||||
local firewall_status=$(defaults read /Library/Preferences/com.apple.alf globalstate 2> /dev/null || echo "0")
|
||||
if [[ "$firewall_status" == "1" || "$firewall_status" == "2" ]]; then
|
||||
echo -e " ${GREEN}✓${NC} Firewall Enabled"
|
||||
local firewall_output=$(sudo /usr/libexec/ApplicationFirewall/socketfilterfw --getglobalstate 2> /dev/null || echo "")
|
||||
if [[ "$firewall_output" == *"State = 1"* ]] || [[ "$firewall_output" == *"State = 2"* ]]; then
|
||||
echo -e " ${GREEN}✓${NC} Firewall Network protection enabled"
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Firewall ${YELLOW}Disabled${NC} (Consider enabling)"
|
||||
echo -e " ${GRAY}System Settings → Network → Firewall, or run:${NC}"
|
||||
echo -e " ${GRAY}sudo defaults write /Library/Preferences/com.apple.alf globalstate -int 1${NC}"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Firewall ${YELLOW}Network protection disabled${NC}"
|
||||
export FIREWALL_DISABLED=true
|
||||
fi
|
||||
}
|
||||
@@ -132,12 +131,10 @@ check_gatekeeper() {
|
||||
if command -v spctl > /dev/null 2>&1; then
|
||||
local gk_status=$(spctl --status 2> /dev/null || echo "")
|
||||
if echo "$gk_status" | grep -q "enabled"; then
|
||||
echo -e " ${GREEN}✓${NC} Gatekeeper Active"
|
||||
echo -e " ${GREEN}✓${NC} Gatekeeper App download protection active"
|
||||
unset GATEKEEPER_DISABLED
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Gatekeeper ${YELLOW}Disabled${NC}"
|
||||
echo -e " ${GRAY}Enable via System Settings → Privacy & Security, or:${NC}"
|
||||
echo -e " ${GRAY}sudo spctl --master-enable${NC}"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Gatekeeper ${YELLOW}App security disabled${NC}"
|
||||
export GATEKEEPER_DISABLED=true
|
||||
fi
|
||||
fi
|
||||
@@ -150,15 +147,15 @@ check_sip() {
|
||||
if command -v csrutil > /dev/null 2>&1; then
|
||||
local sip_status=$(csrutil status 2> /dev/null || echo "")
|
||||
if echo "$sip_status" | grep -q "enabled"; then
|
||||
echo -e " ${GREEN}✓${NC} SIP Enabled"
|
||||
echo -e " ${GREEN}✓${NC} SIP System integrity protected"
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} SIP ${YELLOW}Disabled${NC}"
|
||||
echo -e " ${GRAY}Restart into Recovery → Utilities → Terminal → run: csrutil enable${NC}"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} SIP ${YELLOW}System protection disabled${NC}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_all_security() {
|
||||
echo -e "${BLUE}${ICON_ARROW}${NC} Security Status"
|
||||
check_filevault
|
||||
check_firewall
|
||||
check_gatekeeper
|
||||
@@ -174,7 +171,7 @@ CACHE_DIR="${HOME}/.cache/mole"
|
||||
CACHE_TTL=600 # 10 minutes in seconds
|
||||
|
||||
# Ensure cache directory exists
|
||||
mkdir -p "$CACHE_DIR" 2> /dev/null || true
|
||||
ensure_user_dir "$CACHE_DIR"
|
||||
|
||||
clear_cache_file() {
|
||||
local file="$1"
|
||||
@@ -207,68 +204,6 @@ is_cache_valid() {
|
||||
[[ $cache_age -lt $ttl ]]
|
||||
}
|
||||
|
||||
check_homebrew_updates() {
|
||||
# Check whitelist
|
||||
if command -v is_whitelisted > /dev/null && is_whitelisted "check_brew_updates"; then return; fi
|
||||
if ! command -v brew > /dev/null 2>&1; then
|
||||
return
|
||||
fi
|
||||
|
||||
local cache_file="$CACHE_DIR/brew_updates"
|
||||
local formula_count=0
|
||||
local cask_count=0
|
||||
|
||||
if is_cache_valid "$cache_file"; then
|
||||
read -r formula_count cask_count < "$cache_file" 2> /dev/null || true
|
||||
formula_count=${formula_count:-0}
|
||||
cask_count=${cask_count:-0}
|
||||
else
|
||||
# Show spinner while checking
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Checking Homebrew..."
|
||||
fi
|
||||
|
||||
local outdated_list=""
|
||||
outdated_list=$(brew outdated --quiet 2> /dev/null || echo "")
|
||||
if [[ -n "$outdated_list" ]]; then
|
||||
formula_count=$(echo "$outdated_list" | wc -l | tr -d ' ')
|
||||
fi
|
||||
|
||||
local cask_list=""
|
||||
cask_list=$(brew outdated --cask --quiet 2> /dev/null || echo "")
|
||||
if [[ -n "$cask_list" ]]; then
|
||||
cask_count=$(echo "$cask_list" | wc -l | tr -d ' ')
|
||||
fi
|
||||
|
||||
echo "$formula_count $cask_count" > "$cache_file" 2> /dev/null || true
|
||||
|
||||
# Stop spinner before output
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
fi
|
||||
|
||||
local total_count=$((formula_count + cask_count))
|
||||
export BREW_FORMULA_OUTDATED_COUNT=$formula_count
|
||||
export BREW_CASK_OUTDATED_COUNT=$cask_count
|
||||
export BREW_OUTDATED_COUNT=$total_count
|
||||
|
||||
if [[ $total_count -gt 0 ]]; then
|
||||
local breakdown=""
|
||||
if [[ $formula_count -gt 0 && $cask_count -gt 0 ]]; then
|
||||
breakdown=" (${formula_count} formula, ${cask_count} cask)"
|
||||
elif [[ $formula_count -gt 0 ]]; then
|
||||
breakdown=" (${formula_count} formula)"
|
||||
elif [[ $cask_count -gt 0 ]]; then
|
||||
breakdown=" (${cask_count} cask)"
|
||||
fi
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Homebrew ${YELLOW}${total_count} updates${NC}${breakdown}"
|
||||
echo -e " ${GRAY}Run: ${GREEN}brew upgrade${NC} ${GRAY}and/or${NC} ${GREEN}brew upgrade --cask${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} Homebrew Up to date"
|
||||
fi
|
||||
}
|
||||
|
||||
# Cache software update list to avoid calling softwareupdate twice
|
||||
SOFTWARE_UPDATE_LIST=""
|
||||
|
||||
@@ -300,19 +235,56 @@ check_macos_update() {
|
||||
local updates_available="false"
|
||||
if [[ $(get_software_updates) == "Updates Available" ]]; then
|
||||
updates_available="true"
|
||||
|
||||
# Verify with softwareupdate using --no-scan to avoid triggering a fresh scan
|
||||
# which can timeout. We prioritize avoiding false negatives (missing actual updates)
|
||||
# over false positives, so we only clear the update flag when softwareupdate
|
||||
# explicitly reports "No new software available"
|
||||
local sw_output=""
|
||||
local sw_status=0
|
||||
local spinner_started=false
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking macOS updates..."
|
||||
spinner_started=true
|
||||
fi
|
||||
|
||||
local softwareupdate_timeout="${MO_SOFTWAREUPDATE_TIMEOUT:-10}"
|
||||
if sw_output=$(run_with_timeout "$softwareupdate_timeout" softwareupdate -l --no-scan 2> /dev/null); then
|
||||
:
|
||||
else
|
||||
sw_status=$?
|
||||
fi
|
||||
|
||||
if [[ "$spinner_started" == "true" ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
|
||||
# Debug logging for troubleshooting
|
||||
if [[ -n "${MO_DEBUG:-}" ]]; then
|
||||
echo "[DEBUG] softwareupdate exit status: $sw_status, output lines: $(echo "$sw_output" | wc -l | tr -d ' ')" >&2
|
||||
fi
|
||||
|
||||
# Prefer avoiding false negatives: if the system indicates updates are pending,
|
||||
# only clear the flag when softwareupdate returns a list without any update entries.
|
||||
if [[ $sw_status -eq 0 && -n "$sw_output" ]]; then
|
||||
if ! echo "$sw_output" | grep -qE '^[[:space:]]*\*'; then
|
||||
updates_available="false"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
export MACOS_UPDATE_AVAILABLE="$updates_available"
|
||||
|
||||
if [[ "$updates_available" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} macOS ${YELLOW}Update available${NC}"
|
||||
echo -e " ${GRAY}update available in final step${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} macOS Up to date"
|
||||
echo -e " ${GREEN}✓${NC} macOS System up to date"
|
||||
fi
|
||||
}
|
||||
|
||||
check_mole_update() {
|
||||
if command -v is_whitelisted > /dev/null && is_whitelisted "check_mole_update"; then return; fi
|
||||
|
||||
# Check if Mole has updates
|
||||
# Auto-detect version from mole main script
|
||||
local current_version
|
||||
@@ -333,16 +305,27 @@ check_mole_update() {
|
||||
else
|
||||
# Show spinner while checking
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Checking Mole version..."
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking Mole version..."
|
||||
fi
|
||||
|
||||
# Try to get latest version from GitHub
|
||||
if command -v curl > /dev/null 2>&1; then
|
||||
latest_version=$(curl -fsSL https://api.github.com/repos/tw93/mole/releases/latest 2> /dev/null | grep '"tag_name"' | sed -E 's/.*"v?([^"]+)".*/\1/' || echo "")
|
||||
# Save to cache
|
||||
if [[ -n "$latest_version" ]]; then
|
||||
echo "$latest_version" > "$cache_file" 2> /dev/null || true
|
||||
# Run in background to allow Ctrl+C to interrupt
|
||||
local temp_version
|
||||
temp_version=$(mktemp_file "mole_version_check")
|
||||
curl -fsSL --connect-timeout 3 --max-time 5 https://api.github.com/repos/tw93/mole/releases/latest 2> /dev/null | grep '"tag_name"' | sed -E 's/.*"v?([^"]+)".*/\1/' > "$temp_version" &
|
||||
local curl_pid=$!
|
||||
|
||||
# Wait for curl to complete (allows Ctrl+C to interrupt)
|
||||
if wait "$curl_pid" 2> /dev/null; then
|
||||
latest_version=$(cat "$temp_version" 2> /dev/null || echo "")
|
||||
# Save to cache
|
||||
if [[ -n "$latest_version" ]]; then
|
||||
ensure_user_file "$cache_file"
|
||||
echo "$latest_version" > "$cache_file" 2> /dev/null || true
|
||||
fi
|
||||
fi
|
||||
rm -f "$temp_version" 2> /dev/null || true
|
||||
fi
|
||||
|
||||
# Stop spinner
|
||||
@@ -361,13 +344,12 @@ check_mole_update() {
|
||||
# Compare versions
|
||||
if [[ "$(printf '%s\n' "$current_version" "$latest_version" | sort -V | head -1)" == "$current_version" ]]; then
|
||||
export MOLE_UPDATE_AVAILABLE="true"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Mole ${YELLOW}${latest_version} available${NC} (current: ${current_version})"
|
||||
echo -e " ${GRAY}Run: ${GREEN}mo update${NC}"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Mole ${YELLOW}${latest_version} available${NC} (running ${current_version})"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} Mole Up to date (${current_version})"
|
||||
echo -e " ${GREEN}✓${NC} Mole Latest version ${current_version}"
|
||||
fi
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} Mole Up to date (${current_version})"
|
||||
echo -e " ${GREEN}✓${NC} Mole Latest version ${current_version}"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -375,12 +357,11 @@ check_all_updates() {
|
||||
# Reset spinner flag for softwareupdate
|
||||
unset SOFTWAREUPDATE_SPINNER_SHOWN
|
||||
|
||||
check_homebrew_updates
|
||||
|
||||
# Preload software update data to avoid delays between subsequent checks
|
||||
# Only redirect stdout, keep stderr for spinner display
|
||||
get_software_updates > /dev/null
|
||||
|
||||
echo -e "${BLUE}${ICON_ARROW}${NC} System Updates"
|
||||
check_appstore_updates
|
||||
check_macos_update
|
||||
check_mole_update
|
||||
@@ -488,7 +469,7 @@ check_login_items() {
|
||||
if [[ -t 0 ]]; then
|
||||
# Show spinner while getting login items
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Checking login items..."
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking login items..."
|
||||
fi
|
||||
|
||||
while IFS= read -r login_item; do
|
||||
@@ -503,16 +484,16 @@ check_login_items() {
|
||||
fi
|
||||
|
||||
if [[ $login_items_count -gt 15 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Login Items ${YELLOW}${login_items_count} apps${NC} auto-start (High)"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Login Items ${YELLOW}${login_items_count} apps${NC}"
|
||||
elif [[ $login_items_count -gt 0 ]]; then
|
||||
echo -e " ${GREEN}✓${NC} Login Items ${login_items_count} apps auto-start"
|
||||
echo -e " ${GREEN}✓${NC} Login Items ${login_items_count} apps"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} Login Items None"
|
||||
return
|
||||
fi
|
||||
|
||||
# Show items in a single line
|
||||
local preview_limit=5
|
||||
# Show items in a single line (compact)
|
||||
local preview_limit=3
|
||||
((preview_limit > login_items_count)) && preview_limit=$login_items_count
|
||||
|
||||
local items_display=""
|
||||
@@ -526,11 +507,10 @@ check_login_items() {
|
||||
|
||||
if ((login_items_count > preview_limit)); then
|
||||
local remaining=$((login_items_count - preview_limit))
|
||||
items_display="${items_display}, and ${remaining} more"
|
||||
items_display="${items_display} +${remaining}"
|
||||
fi
|
||||
|
||||
echo -e " ${GRAY}${items_display}${NC}"
|
||||
echo -e " ${GRAY}Manage in System Settings → Login Items${NC}"
|
||||
}
|
||||
|
||||
check_cache_size() {
|
||||
@@ -544,7 +524,7 @@ check_cache_size() {
|
||||
|
||||
# Show spinner while calculating cache size
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Scanning cache..."
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning cache..."
|
||||
fi
|
||||
|
||||
for cache_path in "${cache_paths[@]}"; do
|
||||
@@ -581,7 +561,8 @@ check_swap_usage() {
|
||||
if command -v sysctl > /dev/null 2>&1; then
|
||||
local swap_info=$(sysctl vm.swapusage 2> /dev/null || echo "")
|
||||
if [[ -n "$swap_info" ]]; then
|
||||
local swap_used=$(echo "$swap_info" | grep -o "used = [0-9.]*[GM]" | awk '{print $3}' || echo "0M")
|
||||
local swap_used=$(echo "$swap_info" | grep -o "used = [0-9.]*[GM]" | awk 'NR==1{print $3}')
|
||||
swap_used=${swap_used:-0M}
|
||||
local swap_num="${swap_used//[GM]/}"
|
||||
|
||||
if [[ "$swap_used" == *"G"* ]]; then
|
||||
@@ -601,19 +582,14 @@ check_swap_usage() {
|
||||
check_brew_health() {
|
||||
# Check whitelist
|
||||
if command -v is_whitelisted > /dev/null && is_whitelisted "check_brew_health"; then return; fi
|
||||
# Check Homebrew status (fast)
|
||||
if command -v brew > /dev/null 2>&1; then
|
||||
# Skip slow 'brew doctor' check by default
|
||||
echo -e " ${GREEN}✓${NC} Homebrew Installed"
|
||||
fi
|
||||
}
|
||||
|
||||
check_system_health() {
|
||||
echo -e "${BLUE}${ICON_ARROW}${NC} System Health"
|
||||
check_disk_space
|
||||
check_memory_usage
|
||||
check_swap_usage
|
||||
check_login_items
|
||||
check_cache_size
|
||||
# Time Machine check is optional; skip by default to avoid noise on systems without backups
|
||||
check_brew_health
|
||||
}
|
||||
|
||||
@@ -24,9 +24,9 @@ get_memory_info() {
|
||||
vm_output=$(vm_stat 2> /dev/null || echo "")
|
||||
page_size=4096
|
||||
|
||||
active=$(echo "$vm_output" | LC_ALL=C awk '/Pages active:/ {print $NF}' | tr -d '.' 2> /dev/null || echo "0")
|
||||
wired=$(echo "$vm_output" | LC_ALL=C awk '/Pages wired down:/ {print $NF}' | tr -d '.' 2> /dev/null || echo "0")
|
||||
compressed=$(echo "$vm_output" | LC_ALL=C awk '/Pages occupied by compressor:/ {print $NF}' | tr -d '.' 2> /dev/null || echo "0")
|
||||
active=$(echo "$vm_output" | LC_ALL=C awk '/Pages active:/ {print $NF}' | tr -d '.\n' 2> /dev/null)
|
||||
wired=$(echo "$vm_output" | LC_ALL=C awk '/Pages wired down:/ {print $NF}' | tr -d '.\n' 2> /dev/null)
|
||||
compressed=$(echo "$vm_output" | LC_ALL=C awk '/Pages occupied by compressor:/ {print $NF}' | tr -d '.\n' 2> /dev/null)
|
||||
|
||||
active=${active:-0}
|
||||
wired=${wired:-0}
|
||||
@@ -47,8 +47,8 @@ get_disk_info() {
|
||||
df_output=$(command df -k "$home" 2> /dev/null | tail -1)
|
||||
|
||||
local total_kb used_kb
|
||||
total_kb=$(echo "$df_output" | LC_ALL=C awk '{print $2}' 2> /dev/null || echo "0")
|
||||
used_kb=$(echo "$df_output" | LC_ALL=C awk '{print $3}' 2> /dev/null || echo "0")
|
||||
total_kb=$(echo "$df_output" | LC_ALL=C awk 'NR==1{print $2}' 2> /dev/null)
|
||||
used_kb=$(echo "$df_output" | LC_ALL=C awk 'NR==1{print $3}' 2> /dev/null)
|
||||
|
||||
total_kb=${total_kb:-0}
|
||||
used_kb=${used_kb:-0}
|
||||
@@ -122,16 +122,30 @@ EOF
|
||||
# Collect all optimization items
|
||||
local -a items=()
|
||||
|
||||
# Always-on items (no size checks - instant)
|
||||
items+=('system_maintenance|System Maintenance|Rebuild system databases & flush caches|true')
|
||||
items+=('maintenance_scripts|Maintenance Scripts|Rotate system logs|true')
|
||||
items+=('recent_items|Recent Items|Clear recent apps/documents/servers lists|true')
|
||||
items+=('log_cleanup|Diagnostics Cleanup|Purge old diagnostic & crash logs|true')
|
||||
items+=('mail_downloads|Mail Downloads|Clear old mail attachments (> 30 days)|true')
|
||||
items+=('swap_cleanup|Swap Refresh|Reset swap files and dynamic pager|true')
|
||||
items+=('spotlight_cache_cleanup|Spotlight Cache|Clear user-level Spotlight indexes|true')
|
||||
items+=('developer_cleanup|Developer Cleanup|Clear Xcode DerivedData & DeviceSupport|true')
|
||||
items+=('network_optimization|Network Optimization|Flush DNS, ARP & reset mDNS|true')
|
||||
# Core optimizations (safe and valuable)
|
||||
items+=('system_maintenance|DNS & Spotlight Check|Refresh DNS cache & verify Spotlight status|true')
|
||||
items+=('cache_refresh|Finder Cache Refresh|Refresh QuickLook thumbnails & icon services cache|true')
|
||||
items+=('saved_state_cleanup|App State Cleanup|Remove old saved application states (30+ days)|true')
|
||||
items+=('fix_broken_configs|Broken Config Repair|Fix corrupted preferences files|true')
|
||||
items+=('network_optimization|Network Cache Refresh|Optimize DNS cache & restart mDNSResponder|true')
|
||||
|
||||
# Advanced optimizations (high value, auto-run with safety checks)
|
||||
items+=('sqlite_vacuum|Database Optimization|Compress SQLite databases for Mail, Safari & Messages (skips if apps are running)|true')
|
||||
items+=('launch_services_rebuild|LaunchServices Repair|Repair "Open with" menu & file associations|true')
|
||||
items+=('font_cache_rebuild|Font Cache Rebuild|Rebuild font database to fix rendering issues|true')
|
||||
items+=('dock_refresh|Dock Refresh|Fix broken icons and visual glitches in the Dock|true')
|
||||
|
||||
# System performance optimizations (new)
|
||||
items+=('memory_pressure_relief|Memory Optimization|Release inactive memory to improve system responsiveness|true')
|
||||
items+=('network_stack_optimize|Network Stack Refresh|Flush routing table and ARP cache to resolve network issues|true')
|
||||
items+=('disk_permissions_repair|Permission Repair|Fix user directory permission issues|true')
|
||||
items+=('bluetooth_reset|Bluetooth Refresh|Restart Bluetooth module to fix connectivity (skips if in use)|true')
|
||||
items+=('spotlight_index_optimize|Spotlight Optimization|Rebuild index if search is slow (smart detection)|true')
|
||||
|
||||
# Removed high-risk optimizations:
|
||||
# - startup_items_cleanup: Risk of deleting legitimate app helpers
|
||||
# - system_services_refresh: Risk of data loss when killing system services
|
||||
# - dyld_cache_update: Low benefit, time-consuming, auto-managed by macOS
|
||||
|
||||
# Output items as JSON
|
||||
local first=true
|
||||
|
||||
@@ -1,29 +1,19 @@
|
||||
#!/bin/bash
|
||||
# User GUI Applications Cleanup Module
|
||||
# Desktop applications, communication tools, media players, games, utilities
|
||||
|
||||
# User GUI Applications Cleanup Module (desktop apps, media, utilities).
|
||||
set -euo pipefail
|
||||
|
||||
# Clean Xcode and iOS development tools
|
||||
# Archives can be significant in size (app packaging files)
|
||||
# DeviceSupport files for old iOS versions can accumulate
|
||||
# Note: Skips critical files if Xcode is running
|
||||
# Xcode and iOS tooling.
|
||||
clean_xcode_tools() {
|
||||
# Check if Xcode is running for safer cleanup of critical resources
|
||||
# Skip DerivedData/Archives while Xcode is running.
|
||||
local xcode_running=false
|
||||
if pgrep -x "Xcode" > /dev/null 2>&1; then
|
||||
xcode_running=true
|
||||
fi
|
||||
|
||||
# Safe to clean regardless of Xcode state
|
||||
safe_clean ~/Library/Developer/CoreSimulator/Caches/* "Simulator cache"
|
||||
safe_clean ~/Library/Developer/CoreSimulator/Devices/*/data/tmp/* "Simulator temp files"
|
||||
safe_clean ~/Library/Caches/com.apple.dt.Xcode/* "Xcode cache"
|
||||
safe_clean ~/Library/Developer/Xcode/iOS\ Device\ Logs/* "iOS device logs"
|
||||
safe_clean ~/Library/Developer/Xcode/watchOS\ Device\ Logs/* "watchOS device logs"
|
||||
safe_clean ~/Library/Developer/Xcode/Products/* "Xcode build products"
|
||||
|
||||
# Clean build artifacts only if Xcode is not running
|
||||
if [[ "$xcode_running" == "false" ]]; then
|
||||
safe_clean ~/Library/Developer/Xcode/DerivedData/* "Xcode derived data"
|
||||
safe_clean ~/Library/Developer/Xcode/Archives/* "Xcode archives"
|
||||
@@ -31,20 +21,18 @@ clean_xcode_tools() {
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Xcode is running, skipping DerivedData and Archives cleanup"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean code editors (VS Code, Sublime, etc.)
|
||||
# Code editors.
|
||||
clean_code_editors() {
|
||||
safe_clean ~/Library/Application\ Support/Code/logs/* "VS Code logs"
|
||||
safe_clean ~/Library/Application\ Support/Code/Cache/* "VS Code cache"
|
||||
safe_clean ~/Library/Application\ Support/Code/CachedExtensions/* "VS Code extension cache"
|
||||
safe_clean ~/Library/Application\ Support/Code/CachedData/* "VS Code data cache"
|
||||
# safe_clean ~/Library/Caches/JetBrains/* "JetBrains cache"
|
||||
safe_clean ~/Library/Caches/com.sublimetext.*/* "Sublime Text cache"
|
||||
}
|
||||
|
||||
# Clean communication apps (Slack, Discord, Zoom, etc.)
|
||||
# Communication apps.
|
||||
clean_communication_apps() {
|
||||
safe_clean ~/Library/Application\ Support/discord/Cache/* "Discord cache"
|
||||
safe_clean ~/Library/Application\ Support/legcord/Cache/* "Legcord cache"
|
||||
safe_clean ~/Library/Application\ Support/Slack/Cache/* "Slack cache"
|
||||
safe_clean ~/Library/Caches/us.zoom.xos/* "Zoom cache"
|
||||
safe_clean ~/Library/Caches/com.tencent.xinWeChat/* "WeChat cache"
|
||||
@@ -56,49 +44,43 @@ clean_communication_apps() {
|
||||
safe_clean ~/Library/Caches/com.tencent.WeWorkMac/* "WeCom cache"
|
||||
safe_clean ~/Library/Caches/com.feishu.*/* "Feishu cache"
|
||||
}
|
||||
|
||||
# Clean DingTalk
|
||||
# DingTalk.
|
||||
clean_dingtalk() {
|
||||
safe_clean ~/Library/Caches/dd.work.exclusive4aliding/* "DingTalk (iDingTalk) cache"
|
||||
safe_clean ~/Library/Caches/dd.work.exclusive4aliding/* "DingTalk iDingTalk cache"
|
||||
safe_clean ~/Library/Caches/com.alibaba.AliLang.osx/* "AliLang security component"
|
||||
safe_clean ~/Library/Application\ Support/iDingTalk/log/* "DingTalk logs"
|
||||
safe_clean ~/Library/Application\ Support/iDingTalk/holmeslogs/* "DingTalk holmes logs"
|
||||
}
|
||||
|
||||
# Clean AI assistants
|
||||
# AI assistants.
|
||||
clean_ai_apps() {
|
||||
safe_clean ~/Library/Caches/com.openai.chat/* "ChatGPT cache"
|
||||
safe_clean ~/Library/Caches/com.anthropic.claudefordesktop/* "Claude desktop cache"
|
||||
safe_clean ~/Library/Logs/Claude/* "Claude logs"
|
||||
}
|
||||
|
||||
# Clean design and creative tools
|
||||
# Design and creative tools.
|
||||
clean_design_tools() {
|
||||
safe_clean ~/Library/Caches/com.bohemiancoding.sketch3/* "Sketch cache"
|
||||
safe_clean ~/Library/Application\ Support/com.bohemiancoding.sketch3/cache/* "Sketch app cache"
|
||||
safe_clean ~/Library/Caches/Adobe/* "Adobe cache"
|
||||
safe_clean ~/Library/Caches/com.adobe.*/* "Adobe app caches"
|
||||
safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache"
|
||||
safe_clean ~/Library/Caches/com.raycast.macos/* "Raycast cache"
|
||||
# Raycast cache is protected (clipboard history, images).
|
||||
}
|
||||
|
||||
# Clean video editing tools
|
||||
# Video editing tools.
|
||||
clean_video_tools() {
|
||||
safe_clean ~/Library/Caches/net.telestream.screenflow10/* "ScreenFlow cache"
|
||||
safe_clean ~/Library/Caches/com.apple.FinalCut/* "Final Cut Pro cache"
|
||||
safe_clean ~/Library/Caches/com.blackmagic-design.DaVinciResolve/* "DaVinci Resolve cache"
|
||||
safe_clean ~/Library/Caches/com.adobe.PremierePro.*/* "Premiere Pro cache"
|
||||
}
|
||||
|
||||
# Clean 3D and CAD tools
|
||||
# 3D and CAD tools.
|
||||
clean_3d_tools() {
|
||||
safe_clean ~/Library/Caches/org.blenderfoundation.blender/* "Blender cache"
|
||||
safe_clean ~/Library/Caches/com.maxon.cinema4d/* "Cinema 4D cache"
|
||||
safe_clean ~/Library/Caches/com.autodesk.*/* "Autodesk cache"
|
||||
safe_clean ~/Library/Caches/com.sketchup.*/* "SketchUp cache"
|
||||
}
|
||||
|
||||
# Clean productivity apps
|
||||
# Productivity apps.
|
||||
clean_productivity_apps() {
|
||||
safe_clean ~/Library/Caches/com.tw93.MiaoYan/* "MiaoYan cache"
|
||||
safe_clean ~/Library/Caches/com.klee.desktop/* "Klee cache"
|
||||
@@ -107,31 +89,24 @@ clean_productivity_apps() {
|
||||
safe_clean ~/Library/Caches/com.filo.client/* "Filo cache"
|
||||
safe_clean ~/Library/Caches/com.flomoapp.mac/* "Flomo cache"
|
||||
}
|
||||
|
||||
# Clean music and media players
|
||||
# Note: Spotify cache is protected by default (may contain offline music)
|
||||
# Users can override via whitelist settings
|
||||
# Music/media players (protect Spotify offline music).
|
||||
clean_media_players() {
|
||||
# Spotify cache protection: check for offline music indicators
|
||||
local spotify_cache="$HOME/Library/Caches/com.spotify.client"
|
||||
local spotify_data="$HOME/Library/Application Support/Spotify"
|
||||
local has_offline_music=false
|
||||
|
||||
# Check for offline music database or large cache (>500MB)
|
||||
# Heuristics: offline DB or large cache.
|
||||
if [[ -f "$spotify_data/PersistentCache/Storage/offline.bnk" ]] ||
|
||||
[[ -d "$spotify_data/PersistentCache/Storage" && -n "$(find "$spotify_data/PersistentCache/Storage" -type f -name "*.file" 2> /dev/null | head -1)" ]]; then
|
||||
has_offline_music=true
|
||||
elif [[ -d "$spotify_cache" ]]; then
|
||||
local cache_size_kb
|
||||
cache_size_kb=$(get_path_size_kb "$spotify_cache")
|
||||
# Large cache (>500MB) likely contains offline music
|
||||
if [[ $cache_size_kb -ge 512000 ]]; then
|
||||
has_offline_music=true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$has_offline_music" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Spotify cache protected (offline music detected)"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Spotify cache protected · offline music detected"
|
||||
note_activity
|
||||
else
|
||||
safe_clean ~/Library/Caches/com.spotify.client/* "Spotify cache"
|
||||
@@ -145,8 +120,7 @@ clean_media_players() {
|
||||
safe_clean ~/Library/Caches/com.kugou.mac/* "Kugou Music cache"
|
||||
safe_clean ~/Library/Caches/com.kuwo.mac/* "Kuwo Music cache"
|
||||
}
|
||||
|
||||
# Clean video players
|
||||
# Video players.
|
||||
clean_video_players() {
|
||||
safe_clean ~/Library/Caches/com.colliderli.iina "IINA cache"
|
||||
safe_clean ~/Library/Caches/org.videolan.vlc "VLC cache"
|
||||
@@ -157,8 +131,7 @@ clean_video_players() {
|
||||
safe_clean ~/Library/Caches/com.douyu.*/* "Douyu cache"
|
||||
safe_clean ~/Library/Caches/com.huya.*/* "Huya cache"
|
||||
}
|
||||
|
||||
# Clean download managers
|
||||
# Download managers.
|
||||
clean_download_managers() {
|
||||
safe_clean ~/Library/Caches/net.xmac.aria2gui "Aria2 cache"
|
||||
safe_clean ~/Library/Caches/org.m0k.transmission "Transmission cache"
|
||||
@@ -167,8 +140,7 @@ clean_download_managers() {
|
||||
safe_clean ~/Library/Caches/com.folx.*/* "Folx cache"
|
||||
safe_clean ~/Library/Caches/com.charlessoft.pacifist/* "Pacifist cache"
|
||||
}
|
||||
|
||||
# Clean gaming platforms
|
||||
# Gaming platforms.
|
||||
clean_gaming_platforms() {
|
||||
safe_clean ~/Library/Caches/com.valvesoftware.steam/* "Steam cache"
|
||||
safe_clean ~/Library/Application\ Support/Steam/htmlcache/* "Steam web cache"
|
||||
@@ -179,48 +151,41 @@ clean_gaming_platforms() {
|
||||
safe_clean ~/Library/Caches/com.gog.galaxy/* "GOG Galaxy cache"
|
||||
safe_clean ~/Library/Caches/com.riotgames.*/* "Riot Games cache"
|
||||
}
|
||||
|
||||
# Clean translation and dictionary apps
|
||||
# Translation/dictionary apps.
|
||||
clean_translation_apps() {
|
||||
safe_clean ~/Library/Caches/com.youdao.YoudaoDict "Youdao Dictionary cache"
|
||||
safe_clean ~/Library/Caches/com.eudic.* "Eudict cache"
|
||||
safe_clean ~/Library/Caches/com.bob-build.Bob "Bob Translation cache"
|
||||
}
|
||||
|
||||
# Clean screenshot and screen recording tools
|
||||
# Screenshot/recording tools.
|
||||
clean_screenshot_tools() {
|
||||
safe_clean ~/Library/Caches/com.cleanshot.* "CleanShot cache"
|
||||
safe_clean ~/Library/Caches/com.reincubate.camo "Camo cache"
|
||||
safe_clean ~/Library/Caches/com.xnipapp.xnip "Xnip cache"
|
||||
}
|
||||
|
||||
# Clean email clients
|
||||
# Email clients.
|
||||
clean_email_clients() {
|
||||
safe_clean ~/Library/Caches/com.readdle.smartemail-Mac "Spark cache"
|
||||
safe_clean ~/Library/Caches/com.airmail.* "Airmail cache"
|
||||
}
|
||||
|
||||
# Clean task management apps
|
||||
# Task management apps.
|
||||
clean_task_apps() {
|
||||
safe_clean ~/Library/Caches/com.todoist.mac.Todoist "Todoist cache"
|
||||
safe_clean ~/Library/Caches/com.any.do.* "Any.do cache"
|
||||
}
|
||||
|
||||
# Clean shell and terminal utilities
|
||||
# Shell/terminal utilities.
|
||||
clean_shell_utils() {
|
||||
safe_clean ~/.zcompdump* "Zsh completion cache"
|
||||
safe_clean ~/.lesshst "less history"
|
||||
safe_clean ~/.viminfo.tmp "Vim temporary files"
|
||||
safe_clean ~/.wget-hsts "wget HSTS cache"
|
||||
}
|
||||
|
||||
# Clean input method and system utilities
|
||||
# Input methods and system utilities.
|
||||
clean_system_utils() {
|
||||
safe_clean ~/Library/Caches/com.runjuu.Input-Source-Pro/* "Input Source Pro cache"
|
||||
safe_clean ~/Library/Caches/macos-wakatime.WakaTime/* "WakaTime cache"
|
||||
}
|
||||
|
||||
# Clean note-taking apps
|
||||
# Note-taking apps.
|
||||
clean_note_apps() {
|
||||
safe_clean ~/Library/Caches/notion.id/* "Notion cache"
|
||||
safe_clean ~/Library/Caches/md.obsidian/* "Obsidian cache"
|
||||
@@ -229,23 +194,21 @@ clean_note_apps() {
|
||||
safe_clean ~/Library/Caches/com.evernote.*/* "Evernote cache"
|
||||
safe_clean ~/Library/Caches/com.yinxiang.*/* "Yinxiang Note cache"
|
||||
}
|
||||
|
||||
# Clean launcher and automation tools
|
||||
# Launchers and automation tools.
|
||||
clean_launcher_apps() {
|
||||
safe_clean ~/Library/Caches/com.runningwithcrayons.Alfred/* "Alfred cache"
|
||||
safe_clean ~/Library/Caches/cx.c3.theunarchiver/* "The Unarchiver cache"
|
||||
}
|
||||
|
||||
# Clean remote desktop tools
|
||||
# Remote desktop tools.
|
||||
clean_remote_desktop() {
|
||||
safe_clean ~/Library/Caches/com.teamviewer.*/* "TeamViewer cache"
|
||||
safe_clean ~/Library/Caches/com.anydesk.*/* "AnyDesk cache"
|
||||
safe_clean ~/Library/Caches/com.todesk.*/* "ToDesk cache"
|
||||
safe_clean ~/Library/Caches/com.sunlogin.*/* "Sunlogin cache"
|
||||
}
|
||||
|
||||
# Main function to clean all user GUI applications
|
||||
# Main entry for GUI app cleanup.
|
||||
clean_user_gui_applications() {
|
||||
stop_section_spinner
|
||||
clean_xcode_tools
|
||||
clean_code_editors
|
||||
clean_communication_apps
|
||||
|
||||
@@ -1,27 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Application Data Cleanup Module
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Clean .DS_Store (Finder metadata), home uses maxdepth 5, excludes slow paths, max 500 files
|
||||
# Args: $1=target_dir, $2=label
|
||||
clean_ds_store_tree() {
|
||||
local target="$1"
|
||||
local label="$2"
|
||||
|
||||
[[ -d "$target" ]] || return 0
|
||||
|
||||
local file_count=0
|
||||
local total_bytes=0
|
||||
local spinner_active="false"
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" "
|
||||
start_inline_spinner "Cleaning Finder metadata..."
|
||||
spinner_active="true"
|
||||
fi
|
||||
|
||||
# Build exclusion paths for find (skip common slow/large directories)
|
||||
local -a exclude_paths=(
|
||||
-path "*/Library/Application Support/MobileSync" -prune -o
|
||||
-path "*/Library/Developer" -prune -o
|
||||
@@ -30,15 +22,11 @@ clean_ds_store_tree() {
|
||||
-path "*/.git" -prune -o
|
||||
-path "*/Library/Caches" -prune -o
|
||||
)
|
||||
|
||||
# Build find command to avoid unbound array expansion with set -u
|
||||
local -a find_cmd=("command" "find" "$target")
|
||||
if [[ "$target" == "$HOME" ]]; then
|
||||
find_cmd+=("-maxdepth" "5")
|
||||
fi
|
||||
find_cmd+=("${exclude_paths[@]}" "-type" "f" "-name" ".DS_Store" "-print0")
|
||||
|
||||
# Find .DS_Store files with exclusions and depth limit
|
||||
while IFS= read -r -d '' ds_file; do
|
||||
local size
|
||||
size=$(get_file_size "$ds_file")
|
||||
@@ -47,27 +35,21 @@ clean_ds_store_tree() {
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
rm -f "$ds_file" 2> /dev/null || true
|
||||
fi
|
||||
|
||||
# Stop after 500 files to avoid hanging
|
||||
if [[ $file_count -ge 500 ]]; then
|
||||
if [[ $file_count -ge $MOLE_MAX_DS_STORE_FILES ]]; then
|
||||
break
|
||||
fi
|
||||
done < <("${find_cmd[@]}" 2> /dev/null || true)
|
||||
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
stop_inline_spinner
|
||||
echo -ne "\r\033[K"
|
||||
stop_section_spinner
|
||||
fi
|
||||
|
||||
if [[ $file_count -gt 0 ]]; then
|
||||
local size_human
|
||||
size_human=$(bytes_to_human "$total_bytes")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}→${NC} $label ${YELLOW}($file_count files, $size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $label ${YELLOW}($file_count files, $size_human dry)${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label ${GREEN}($file_count files, $size_human)${NC}"
|
||||
fi
|
||||
|
||||
local size_kb=$(((total_bytes + 1023) / 1024))
|
||||
((files_cleaned += file_count))
|
||||
((total_size_cleaned += size_kb))
|
||||
@@ -75,188 +57,132 @@ clean_ds_store_tree() {
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean data for uninstalled apps (caches/logs/states older than 60 days)
|
||||
# Protects system apps, major vendors, scans /Applications+running processes
|
||||
# Max 100 items/pattern, 2s du timeout. Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
|
||||
# Scan system for installed application bundle IDs
|
||||
# Orphaned app data (60+ days inactive). Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
|
||||
# Usage: scan_installed_apps "output_file"
|
||||
scan_installed_apps() {
|
||||
local installed_bundles="$1"
|
||||
|
||||
# Scan all Applications directories
|
||||
# Cache installed app scan briefly to speed repeated runs.
|
||||
local cache_file="$HOME/.cache/mole/installed_apps_cache"
|
||||
local cache_age_seconds=300 # 5 minutes
|
||||
if [[ -f "$cache_file" ]]; then
|
||||
local cache_mtime=$(get_file_mtime "$cache_file")
|
||||
local current_time=$(date +%s)
|
||||
local age=$((current_time - cache_mtime))
|
||||
if [[ $age -lt $cache_age_seconds ]]; then
|
||||
debug_log "Using cached app list (age: ${age}s)"
|
||||
if [[ -r "$cache_file" ]] && [[ -s "$cache_file" ]]; then
|
||||
if cat "$cache_file" > "$installed_bundles" 2> /dev/null; then
|
||||
return 0
|
||||
else
|
||||
debug_log "Warning: Failed to read cache, rebuilding"
|
||||
fi
|
||||
else
|
||||
debug_log "Warning: Cache file empty or unreadable, rebuilding"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
debug_log "Scanning installed applications (cache expired or missing)"
|
||||
local -a app_dirs=(
|
||||
"/Applications"
|
||||
"/System/Applications"
|
||||
"$HOME/Applications"
|
||||
)
|
||||
|
||||
# Create a temp dir for parallel results to avoid write contention
|
||||
# Temp dir avoids write contention across parallel scans.
|
||||
local scan_tmp_dir=$(create_temp_dir)
|
||||
|
||||
# Start progress indicator with real-time count
|
||||
local progress_count_file="$scan_tmp_dir/progress_count"
|
||||
echo "0" > "$progress_count_file"
|
||||
|
||||
# Background spinner that shows live progress
|
||||
(
|
||||
trap 'exit 0' TERM INT EXIT
|
||||
local spinner_chars="|/-\\"
|
||||
local i=0
|
||||
while true; do
|
||||
local count=$(cat "$progress_count_file" 2> /dev/null || echo "0")
|
||||
local c="${spinner_chars:$((i % 4)):1}"
|
||||
echo -ne "\r\033[K $c Scanning installed apps... $count found" >&2
|
||||
((i++))
|
||||
sleep 0.1
|
||||
done
|
||||
) &
|
||||
local spinner_pid=$!
|
||||
|
||||
# Parallel scan for applications
|
||||
local pids=()
|
||||
local dir_idx=0
|
||||
for app_dir in "${app_dirs[@]}"; do
|
||||
[[ -d "$app_dir" ]] || continue
|
||||
(
|
||||
# Quickly find all .app bundles first
|
||||
local -a app_paths=()
|
||||
while IFS= read -r app_path; do
|
||||
[[ -n "$app_path" ]] && app_paths+=("$app_path")
|
||||
done < <(find "$app_dir" -name '*.app' -maxdepth 3 -type d 2> /dev/null)
|
||||
|
||||
# Read bundle IDs with PlistBuddy
|
||||
local count=0
|
||||
for app_path in "${app_paths[@]:-}"; do
|
||||
local plist_path="$app_path/Contents/Info.plist"
|
||||
[[ ! -f "$plist_path" ]] && continue
|
||||
|
||||
local bundle_id=$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" "$plist_path" 2> /dev/null || echo "")
|
||||
|
||||
if [[ -n "$bundle_id" ]]; then
|
||||
echo "$bundle_id"
|
||||
((count++))
|
||||
|
||||
# Batch update progress every 10 apps to reduce I/O
|
||||
if [[ $((count % 10)) -eq 0 ]]; then
|
||||
local current=$(cat "$progress_count_file" 2> /dev/null || echo "0")
|
||||
echo "$((current + 10))" > "$progress_count_file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Final progress update
|
||||
if [[ $((count % 10)) -ne 0 ]]; then
|
||||
local current=$(cat "$progress_count_file" 2> /dev/null || echo "0")
|
||||
echo "$((current + count % 10))" > "$progress_count_file"
|
||||
fi
|
||||
) > "$scan_tmp_dir/apps_${dir_idx}.txt" &
|
||||
pids+=($!)
|
||||
((dir_idx++))
|
||||
done
|
||||
|
||||
# Get running applications and LaunchAgents in parallel
|
||||
# Collect running apps and LaunchAgents to avoid false orphan cleanup.
|
||||
(
|
||||
local running_apps=$(run_with_timeout 5 osascript -e 'tell application "System Events" to get bundle identifier of every application process' 2> /dev/null || echo "")
|
||||
echo "$running_apps" | tr ',' '\n' | sed -e 's/^ *//;s/ *$//' -e '/^$/d' > "$scan_tmp_dir/running.txt"
|
||||
) &
|
||||
pids+=($!)
|
||||
|
||||
(
|
||||
run_with_timeout 5 find ~/Library/LaunchAgents /Library/LaunchAgents \
|
||||
-name "*.plist" -type f 2> /dev/null |
|
||||
xargs -I {} basename {} .plist > "$scan_tmp_dir/agents.txt" 2> /dev/null || true
|
||||
) &
|
||||
pids+=($!)
|
||||
|
||||
# Wait for all background scans to complete
|
||||
debug_log "Waiting for ${#pids[@]} background processes: ${pids[*]}"
|
||||
for pid in "${pids[@]}"; do
|
||||
wait "$pid" 2> /dev/null || true
|
||||
done
|
||||
|
||||
# Stop the spinner
|
||||
kill -TERM "$spinner_pid" 2> /dev/null || true
|
||||
wait "$spinner_pid" 2> /dev/null || true
|
||||
echo -ne "\r\033[K" >&2
|
||||
|
||||
# Merge all results
|
||||
debug_log "All background processes completed"
|
||||
cat "$scan_tmp_dir"/*.txt >> "$installed_bundles" 2> /dev/null || true
|
||||
safe_remove "$scan_tmp_dir" true
|
||||
|
||||
# Deduplicate
|
||||
sort -u "$installed_bundles" -o "$installed_bundles"
|
||||
|
||||
ensure_user_dir "$(dirname "$cache_file")"
|
||||
cp "$installed_bundles" "$cache_file" 2> /dev/null || true
|
||||
local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ')
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Found $app_count active/installed apps"
|
||||
debug_log "Scanned $app_count unique applications"
|
||||
}
|
||||
|
||||
# Check if bundle is orphaned
|
||||
# Usage: is_bundle_orphaned "bundle_id" "directory_path" "installed_bundles_file"
|
||||
is_bundle_orphaned() {
|
||||
local bundle_id="$1"
|
||||
local directory_path="$2"
|
||||
local installed_bundles="$3"
|
||||
|
||||
# Skip system-critical and protected apps
|
||||
if should_protect_data "$bundle_id"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if app exists in our scan
|
||||
if grep -Fxq "$bundle_id" "$installed_bundles" 2> /dev/null; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check against centralized protected patterns (app_protection.sh)
|
||||
if should_protect_data "$bundle_id"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extra check for specific system bundles not covered by patterns
|
||||
case "$bundle_id" in
|
||||
loginwindow | dock | systempreferences | finder | safari)
|
||||
loginwindow | dock | systempreferences | systemsettings | settings | controlcenter | finder | safari)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check file age - only clean if 60+ days inactive
|
||||
# Use existing logic
|
||||
if [[ -e "$directory_path" ]]; then
|
||||
local last_modified_epoch=$(get_file_mtime "$directory_path")
|
||||
local current_epoch=$(date +%s)
|
||||
local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400))
|
||||
|
||||
if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-60} ]]; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Clean data for uninstalled apps (caches/logs/states older than 60 days)
|
||||
# Protects system apps, major vendors, scans /Applications+running processes
|
||||
# Max 100 items/pattern, 2s du timeout. Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
|
||||
# Orphaned app data sweep.
|
||||
clean_orphaned_app_data() {
|
||||
# Quick permission check - if we can't access Library folders, skip
|
||||
if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then
|
||||
stop_section_spinner
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Library folders"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Build list of installed/active apps
|
||||
start_section_spinner "Scanning installed apps..."
|
||||
local installed_bundles=$(create_temp_file)
|
||||
scan_installed_apps "$installed_bundles"
|
||||
|
||||
# Track statistics
|
||||
stop_section_spinner
|
||||
local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ')
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Found $app_count active/installed apps"
|
||||
local orphaned_count=0
|
||||
local total_orphaned_kb=0
|
||||
|
||||
# Unified orphaned resource scanner (caches, logs, states, webkit, HTTP, cookies)
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning orphaned app resources..."
|
||||
|
||||
# Define resource types to scan
|
||||
# CRITICAL: NEVER add LaunchAgents or LaunchDaemons (breaks login items/startup apps)
|
||||
start_section_spinner "Scanning orphaned app resources..."
|
||||
# CRITICAL: NEVER add LaunchAgents or LaunchDaemons (breaks login items/startup apps).
|
||||
local -a resource_types=(
|
||||
"$HOME/Library/Caches|Caches|com.*:org.*:net.*:io.*"
|
||||
"$HOME/Library/Logs|Logs|com.*:org.*:net.*:io.*"
|
||||
@@ -265,52 +191,32 @@ clean_orphaned_app_data() {
|
||||
"$HOME/Library/HTTPStorages|HTTP|com.*:org.*:net.*:io.*"
|
||||
"$HOME/Library/Cookies|Cookies|*.binarycookies"
|
||||
)
|
||||
|
||||
orphaned_count=0
|
||||
|
||||
for resource_type in "${resource_types[@]}"; do
|
||||
IFS='|' read -r base_path label patterns <<< "$resource_type"
|
||||
|
||||
# Check both existence and permission to avoid hanging
|
||||
if [[ ! -d "$base_path" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Quick permission check - if we can't ls the directory, skip it
|
||||
if ! ls "$base_path" > /dev/null 2>&1; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Build file pattern array
|
||||
local -a file_patterns=()
|
||||
IFS=':' read -ra pattern_arr <<< "$patterns"
|
||||
for pat in "${pattern_arr[@]}"; do
|
||||
file_patterns+=("$base_path/$pat")
|
||||
done
|
||||
|
||||
# Scan and clean orphaned items
|
||||
for item_path in "${file_patterns[@]}"; do
|
||||
# Use shell glob (no ls needed)
|
||||
# Limit iterations to prevent hanging on directories with too many files
|
||||
local iteration_count=0
|
||||
local max_iterations=100
|
||||
|
||||
for match in $item_path; do
|
||||
[[ -e "$match" ]] || continue
|
||||
|
||||
# Safety: limit iterations to prevent infinite loops on massive directories
|
||||
((iteration_count++))
|
||||
if [[ $iteration_count -gt $max_iterations ]]; then
|
||||
if [[ $iteration_count -gt $MOLE_MAX_ORPHAN_ITERATIONS ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
# Extract bundle ID from filename
|
||||
local bundle_id=$(basename "$match")
|
||||
bundle_id="${bundle_id%.savedState}"
|
||||
bundle_id="${bundle_id%.binarycookies}"
|
||||
|
||||
if is_bundle_orphaned "$bundle_id" "$match" "$installed_bundles"; then
|
||||
# Use timeout to prevent du from hanging on network mounts or problematic paths
|
||||
local size_kb
|
||||
size_kb=$(get_path_size_kb "$match")
|
||||
if [[ -z "$size_kb" || "$size_kb" == "0" ]]; then
|
||||
@@ -323,14 +229,11 @@ clean_orphaned_app_data() {
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
stop_inline_spinner
|
||||
|
||||
stop_section_spinner
|
||||
if [[ $orphaned_count -gt 0 ]]; then
|
||||
local orphaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}')
|
||||
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count items (~${orphaned_mb}MB)"
|
||||
note_activity
|
||||
fi
|
||||
|
||||
rm -f "$installed_bundles"
|
||||
}
|
||||
|
||||
@@ -1,22 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Clean Homebrew caches and remove orphaned dependencies
|
||||
# Skips if run within 2 days, runs cleanup/autoremove in parallel with 120s timeout
|
||||
# Env: MO_BREW_TIMEOUT, DRY_RUN
|
||||
# Skips if run within 7 days, runs cleanup/autoremove in parallel with 120s timeout
|
||||
clean_homebrew() {
|
||||
command -v brew > /dev/null 2>&1 || return 0
|
||||
|
||||
# Dry run mode - just indicate what would happen
|
||||
if [[ "${DRY_RUN:-false}" == "true" ]]; then
|
||||
echo -e " ${YELLOW}→${NC} Homebrew (would cleanup and autoremove)"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Homebrew · would cleanup and autoremove"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Smart caching: check if brew cleanup was run recently (within 2 days)
|
||||
# Skip if cleaned recently to avoid repeated heavy operations.
|
||||
local brew_cache_file="${HOME}/.cache/mole/brew_last_cleanup"
|
||||
local cache_valid_days=2
|
||||
local cache_valid_days=7
|
||||
local should_skip=false
|
||||
|
||||
if [[ -f "$brew_cache_file" ]]; then
|
||||
local last_cleanup
|
||||
last_cleanup=$(cat "$brew_cache_file" 2> /dev/null || echo "0")
|
||||
@@ -24,71 +19,80 @@ clean_homebrew() {
|
||||
current_time=$(date +%s)
|
||||
local time_diff=$((current_time - last_cleanup))
|
||||
local days_diff=$((time_diff / 86400))
|
||||
|
||||
if [[ $days_diff -lt $cache_valid_days ]]; then
|
||||
should_skip=true
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew (cleaned ${days_diff}d ago, skipped)"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew · cleaned ${days_diff}d ago, skipped"
|
||||
fi
|
||||
fi
|
||||
|
||||
[[ "$should_skip" == "true" ]] && return 0
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew cleanup and autoremove..."
|
||||
# Skip cleanup if cache is small; still run autoremove.
|
||||
local skip_cleanup=false
|
||||
local brew_cache_size=0
|
||||
if [[ -d ~/Library/Caches/Homebrew ]]; then
|
||||
brew_cache_size=$(run_with_timeout 3 du -sk ~/Library/Caches/Homebrew 2> /dev/null | awk '{print $1}')
|
||||
local du_exit=$?
|
||||
if [[ $du_exit -eq 0 && -n "$brew_cache_size" && "$brew_cache_size" -lt 51200 ]]; then
|
||||
skip_cleanup=true
|
||||
fi
|
||||
fi
|
||||
|
||||
# Spinner reflects whether cleanup is skipped.
|
||||
if [[ -t 1 ]]; then
|
||||
if [[ "$skip_cleanup" == "true" ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew autoremove (cleanup skipped)..."
|
||||
else
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew cleanup and autoremove..."
|
||||
fi
|
||||
fi
|
||||
# Run cleanup/autoremove in parallel with a timeout guard.
|
||||
local timeout_seconds=${MO_BREW_TIMEOUT:-120}
|
||||
|
||||
# Run brew cleanup and autoremove in parallel for performance
|
||||
local brew_tmp_file autoremove_tmp_file
|
||||
brew_tmp_file=$(create_temp_file)
|
||||
local brew_pid autoremove_pid
|
||||
if [[ "$skip_cleanup" == "false" ]]; then
|
||||
brew_tmp_file=$(create_temp_file)
|
||||
(brew cleanup > "$brew_tmp_file" 2>&1) &
|
||||
brew_pid=$!
|
||||
fi
|
||||
autoremove_tmp_file=$(create_temp_file)
|
||||
|
||||
(brew cleanup > "$brew_tmp_file" 2>&1) &
|
||||
local brew_pid=$!
|
||||
|
||||
(brew autoremove > "$autoremove_tmp_file" 2>&1) &
|
||||
local autoremove_pid=$!
|
||||
|
||||
autoremove_pid=$!
|
||||
local elapsed=0
|
||||
local brew_done=false
|
||||
local autoremove_done=false
|
||||
|
||||
# Wait for both to complete or timeout
|
||||
[[ "$skip_cleanup" == "true" ]] && brew_done=true
|
||||
while [[ "$brew_done" == "false" ]] || [[ "$autoremove_done" == "false" ]]; do
|
||||
if [[ $elapsed -ge $timeout_seconds ]]; then
|
||||
kill -TERM $brew_pid $autoremove_pid 2> /dev/null || true
|
||||
[[ -n "$brew_pid" ]] && kill -TERM $brew_pid 2> /dev/null || true
|
||||
kill -TERM $autoremove_pid 2> /dev/null || true
|
||||
break
|
||||
fi
|
||||
|
||||
kill -0 $brew_pid 2> /dev/null || brew_done=true
|
||||
[[ -n "$brew_pid" ]] && { kill -0 $brew_pid 2> /dev/null || brew_done=true; }
|
||||
kill -0 $autoremove_pid 2> /dev/null || autoremove_done=true
|
||||
|
||||
sleep 1
|
||||
((elapsed++))
|
||||
done
|
||||
|
||||
# Wait for processes to finish
|
||||
local brew_success=false
|
||||
if wait $brew_pid 2> /dev/null; then
|
||||
brew_success=true
|
||||
if [[ "$skip_cleanup" == "false" && -n "$brew_pid" ]]; then
|
||||
if wait $brew_pid 2> /dev/null; then
|
||||
brew_success=true
|
||||
fi
|
||||
fi
|
||||
|
||||
local autoremove_success=false
|
||||
if wait $autoremove_pid 2> /dev/null; then
|
||||
autoremove_success=true
|
||||
fi
|
||||
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
# Process cleanup output and extract metrics
|
||||
if [[ "$brew_success" == "true" && -f "$brew_tmp_file" ]]; then
|
||||
# Summarize cleanup results.
|
||||
if [[ "$skip_cleanup" == "true" ]]; then
|
||||
# Cleanup was skipped due to small cache size
|
||||
local size_mb=$((brew_cache_size / 1024))
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup · cache ${size_mb}MB, skipped"
|
||||
elif [[ "$brew_success" == "true" && -f "$brew_tmp_file" ]]; then
|
||||
local brew_output
|
||||
brew_output=$(cat "$brew_tmp_file" 2> /dev/null || echo "")
|
||||
local removed_count freed_space
|
||||
removed_count=$(printf '%s\n' "$brew_output" | grep -c "Removing:" 2> /dev/null || true)
|
||||
freed_space=$(printf '%s\n' "$brew_output" | grep -o "[0-9.]*[KMGT]B freed" 2> /dev/null | tail -1 || true)
|
||||
|
||||
if [[ $removed_count -gt 0 ]] || [[ -n "$freed_space" ]]; then
|
||||
if [[ -n "$freed_space" ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup ${GREEN}($freed_space)${NC}"
|
||||
@@ -97,26 +101,26 @@ clean_homebrew() {
|
||||
fi
|
||||
fi
|
||||
elif [[ $elapsed -ge $timeout_seconds ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Homebrew cleanup timed out (run ${GRAY}brew cleanup${NC} manually)"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Homebrew cleanup timed out · run ${GRAY}brew cleanup${NC} manually"
|
||||
fi
|
||||
|
||||
# Process autoremove output - only show if packages were removed
|
||||
# Only surface autoremove output when packages were removed.
|
||||
if [[ "$autoremove_success" == "true" && -f "$autoremove_tmp_file" ]]; then
|
||||
local autoremove_output
|
||||
autoremove_output=$(cat "$autoremove_tmp_file" 2> /dev/null || echo "")
|
||||
local removed_packages
|
||||
removed_packages=$(printf '%s\n' "$autoremove_output" | grep -c "^Uninstalling" 2> /dev/null || true)
|
||||
|
||||
if [[ $removed_packages -gt 0 ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed orphaned dependencies (${removed_packages} packages)"
|
||||
fi
|
||||
elif [[ $elapsed -ge $timeout_seconds ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Autoremove timed out (run ${GRAY}brew autoremove${NC} manually)"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Autoremove timed out · run ${GRAY}brew autoremove${NC} manually"
|
||||
fi
|
||||
|
||||
# Update cache timestamp on successful completion
|
||||
if [[ "$brew_success" == "true" || "$autoremove_success" == "true" ]]; then
|
||||
mkdir -p "$(dirname "$brew_cache_file")"
|
||||
# Update cache timestamp on successful completion or when cleanup was intelligently skipped
|
||||
# This prevents repeated cache size checks within the 7-day window
|
||||
# Update cache timestamp when any work succeeded or was intentionally skipped.
|
||||
if [[ "$skip_cleanup" == "true" ]] || [[ "$brew_success" == "true" ]] || [[ "$autoremove_success" == "true" ]]; then
|
||||
ensure_user_file "$brew_cache_file"
|
||||
date +%s > "$brew_cache_file"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -1,20 +1,11 @@
|
||||
#!/bin/bash
|
||||
# Cache Cleanup Module
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Trigger all TCC permission dialogs upfront to avoid random interruptions
|
||||
# Only runs once (uses ~/.cache/mole/permissions_granted flag)
|
||||
# Preflight TCC prompts once to avoid mid-run interruptions.
|
||||
check_tcc_permissions() {
|
||||
# Only check in interactive mode
|
||||
[[ -t 1 ]] || return 0
|
||||
|
||||
local permission_flag="$HOME/.cache/mole/permissions_granted"
|
||||
|
||||
# Skip if permissions were already granted
|
||||
[[ -f "$permission_flag" ]] && return 0
|
||||
|
||||
# Key protected directories that require TCC approval
|
||||
local -a tcc_dirs=(
|
||||
"$HOME/Library/Caches"
|
||||
"$HOME/Library/Logs"
|
||||
@@ -22,14 +13,11 @@ check_tcc_permissions() {
|
||||
"$HOME/Library/Containers"
|
||||
"$HOME/.cache"
|
||||
)
|
||||
|
||||
# Quick permission test - if first directory is accessible, likely others are too
|
||||
# Use simple ls test instead of find to avoid triggering permission dialogs prematurely
|
||||
# Quick permission probe (avoid deep scans).
|
||||
local needs_permission_check=false
|
||||
if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then
|
||||
needs_permission_check=true
|
||||
fi
|
||||
|
||||
if [[ "$needs_permission_check" == "true" ]]; then
|
||||
echo ""
|
||||
echo -e "${BLUE}First-time setup${NC}"
|
||||
@@ -38,46 +26,31 @@ check_tcc_permissions() {
|
||||
echo ""
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to continue: "
|
||||
read -r
|
||||
|
||||
MOLE_SPINNER_PREFIX="" start_inline_spinner "Requesting permissions..."
|
||||
|
||||
# Trigger all TCC prompts upfront by accessing each directory
|
||||
# Using find -maxdepth 1 ensures we touch the directory without deep scanning
|
||||
# Touch each directory to trigger prompts without deep scanning.
|
||||
for dir in "${tcc_dirs[@]}"; do
|
||||
[[ -d "$dir" ]] && command find "$dir" -maxdepth 1 -type d > /dev/null 2>&1
|
||||
done
|
||||
|
||||
stop_inline_spinner
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Mark permissions as granted (won't prompt again)
|
||||
mkdir -p "$(dirname "$permission_flag")" 2> /dev/null || true
|
||||
touch "$permission_flag" 2> /dev/null || true
|
||||
# Mark as granted to avoid repeat prompts.
|
||||
ensure_user_file "$permission_flag"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Clean browser Service Worker cache, protecting web editing tools (capcut, photopea, pixlr)
|
||||
# Args: $1=browser_name, $2=cache_path
|
||||
# Clean Service Worker cache while protecting critical web editors.
|
||||
clean_service_worker_cache() {
|
||||
local browser_name="$1"
|
||||
local cache_path="$2"
|
||||
|
||||
[[ ! -d "$cache_path" ]] && return 0
|
||||
|
||||
local cleaned_size=0
|
||||
local protected_count=0
|
||||
|
||||
# Find all cache directories and calculate sizes with timeout protection
|
||||
while IFS= read -r cache_dir; do
|
||||
[[ ! -d "$cache_dir" ]] && continue
|
||||
|
||||
# Extract domain from path using regex
|
||||
# Pattern matches: letters/numbers, hyphens, then dot, then TLD
|
||||
# Example: "abc123_https_example.com_0" → "example.com"
|
||||
# Extract a best-effort domain name from cache folder.
|
||||
local domain=$(basename "$cache_dir" | grep -oE '[a-zA-Z0-9][-a-zA-Z0-9]*\.[a-zA-Z]{2,}' | head -1 || echo "")
|
||||
local size=$(run_with_timeout 5 get_path_size_kb "$cache_dir")
|
||||
|
||||
# Check if domain is protected
|
||||
local is_protected=false
|
||||
for protected_domain in "${PROTECTED_SW_DOMAINS[@]}"; do
|
||||
if [[ "$domain" == *"$protected_domain"* ]]; then
|
||||
@@ -86,8 +59,6 @@ clean_service_worker_cache() {
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Clean if not protected
|
||||
if [[ "$is_protected" == "false" ]]; then
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
safe_remove "$cache_dir" true || true
|
||||
@@ -95,15 +66,12 @@ clean_service_worker_cache() {
|
||||
cleaned_size=$((cleaned_size + size))
|
||||
fi
|
||||
done < <(run_with_timeout 10 sh -c "find '$cache_path' -type d -depth 2 2> /dev/null || true")
|
||||
|
||||
if [[ $cleaned_size -gt 0 ]]; then
|
||||
# Temporarily stop spinner for clean output
|
||||
local spinner_was_running=false
|
||||
if [[ -t 1 && -n "${INLINE_SPINNER_PID:-}" ]]; then
|
||||
stop_inline_spinner
|
||||
spinner_was_running=true
|
||||
fi
|
||||
|
||||
local cleaned_mb=$((cleaned_size / 1024))
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if [[ $protected_count -gt 0 ]]; then
|
||||
@@ -112,32 +80,84 @@ clean_service_worker_cache() {
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $browser_name Service Worker (${cleaned_mb}MB)"
|
||||
fi
|
||||
else
|
||||
echo -e " ${YELLOW}→${NC} $browser_name Service Worker (would clean ${cleaned_mb}MB, ${protected_count} protected)"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $browser_name Service Worker (would clean ${cleaned_mb}MB, ${protected_count} protected)"
|
||||
fi
|
||||
note_activity
|
||||
|
||||
# Restart spinner if it was running
|
||||
if [[ "$spinner_was_running" == "true" ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning browser Service Worker caches..."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean Next.js (.next/cache) and Python (__pycache__) build caches
|
||||
# Uses maxdepth 3, excludes Library/.Trash/node_modules, 10s timeout per scan
|
||||
# Next.js/Python project caches with tight scan bounds and timeouts.
|
||||
clean_project_caches() {
|
||||
stop_inline_spinner 2> /dev/null || true
|
||||
# Fast pre-check before scanning the whole home dir.
|
||||
local has_dev_projects=false
|
||||
local -a common_dev_dirs=(
|
||||
"$HOME/Code"
|
||||
"$HOME/Projects"
|
||||
"$HOME/workspace"
|
||||
"$HOME/github"
|
||||
"$HOME/dev"
|
||||
"$HOME/work"
|
||||
"$HOME/src"
|
||||
"$HOME/repos"
|
||||
"$HOME/Development"
|
||||
"$HOME/www"
|
||||
"$HOME/golang"
|
||||
"$HOME/go"
|
||||
"$HOME/rust"
|
||||
"$HOME/python"
|
||||
"$HOME/ruby"
|
||||
"$HOME/java"
|
||||
"$HOME/dotnet"
|
||||
"$HOME/node"
|
||||
)
|
||||
for dir in "${common_dev_dirs[@]}"; do
|
||||
if [[ -d "$dir" ]]; then
|
||||
has_dev_projects=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
# Fallback: look for project markers near $HOME.
|
||||
if [[ "$has_dev_projects" == "false" ]]; then
|
||||
local -a project_markers=(
|
||||
"node_modules"
|
||||
".git"
|
||||
"target"
|
||||
"go.mod"
|
||||
"Cargo.toml"
|
||||
"package.json"
|
||||
"pom.xml"
|
||||
"build.gradle"
|
||||
)
|
||||
local spinner_active=false
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" "
|
||||
start_inline_spinner "Detecting dev projects..."
|
||||
spinner_active=true
|
||||
fi
|
||||
for marker in "${project_markers[@]}"; do
|
||||
if run_with_timeout 3 sh -c "find '$HOME' -maxdepth 2 -name '$marker' -not -path '*/Library/*' -not -path '*/.Trash/*' 2>/dev/null | head -1" | grep -q .; then
|
||||
has_dev_projects=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
stop_inline_spinner 2> /dev/null || true
|
||||
fi
|
||||
[[ "$has_dev_projects" == "false" ]] && return 0
|
||||
fi
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" "
|
||||
start_inline_spinner "Searching project caches..."
|
||||
fi
|
||||
|
||||
local nextjs_tmp_file
|
||||
nextjs_tmp_file=$(create_temp_file)
|
||||
local pycache_tmp_file
|
||||
pycache_tmp_file=$(create_temp_file)
|
||||
local find_timeout=10
|
||||
|
||||
# 1. Start Next.js search
|
||||
# Parallel scans (Next.js and __pycache__).
|
||||
(
|
||||
command find "$HOME" -P -mount -type d -name ".next" -maxdepth 3 \
|
||||
-not -path "*/Library/*" \
|
||||
@@ -147,8 +167,6 @@ clean_project_caches() {
|
||||
2> /dev/null || true
|
||||
) > "$nextjs_tmp_file" 2>&1 &
|
||||
local next_pid=$!
|
||||
|
||||
# 2. Start Python search
|
||||
(
|
||||
command find "$HOME" -P -mount -type d -name "__pycache__" -maxdepth 3 \
|
||||
-not -path "*/Library/*" \
|
||||
@@ -158,90 +176,42 @@ clean_project_caches() {
|
||||
2> /dev/null || true
|
||||
) > "$pycache_tmp_file" 2>&1 &
|
||||
local py_pid=$!
|
||||
|
||||
# 3. Wait for both with timeout
|
||||
local elapsed=0
|
||||
while [[ $elapsed -lt $find_timeout ]]; do
|
||||
local check_interval=0.2 # Check every 200ms instead of 1s for smoother experience
|
||||
while [[ $(echo "$elapsed < $find_timeout" | awk '{print ($1 < $2)}') -eq 1 ]]; do
|
||||
if ! kill -0 $next_pid 2> /dev/null && ! kill -0 $py_pid 2> /dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
((elapsed++))
|
||||
sleep $check_interval
|
||||
elapsed=$(echo "$elapsed + $check_interval" | awk '{print $1 + $2}')
|
||||
done
|
||||
|
||||
# 4. Clean up any stuck processes
|
||||
# Kill stuck scans after timeout.
|
||||
for pid in $next_pid $py_pid; do
|
||||
if kill -0 "$pid" 2> /dev/null; then
|
||||
kill -TERM "$pid" 2> /dev/null || true
|
||||
local grace_period=0
|
||||
while [[ $grace_period -lt 20 ]]; do
|
||||
if ! kill -0 "$pid" 2> /dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 0.1
|
||||
((grace_period++))
|
||||
done
|
||||
if kill -0 "$pid" 2> /dev/null; then
|
||||
kill -KILL "$pid" 2> /dev/null || true
|
||||
fi
|
||||
wait "$pid" 2> /dev/null || true
|
||||
else
|
||||
wait "$pid" 2> /dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
|
||||
# 5. Process Next.js results
|
||||
while IFS= read -r next_dir; do
|
||||
[[ -d "$next_dir/cache" ]] && safe_clean "$next_dir/cache"/* "Next.js build cache" || true
|
||||
done < "$nextjs_tmp_file"
|
||||
|
||||
# 6. Process Python results
|
||||
while IFS= read -r pycache; do
|
||||
[[ -d "$pycache" ]] && safe_clean "$pycache"/* "Python bytecode cache" || true
|
||||
done < "$pycache_tmp_file"
|
||||
}
|
||||
|
||||
# Clean Spotlight user caches
|
||||
clean_spotlight_caches() {
|
||||
local cleaned_size=0
|
||||
local cleaned_count=0
|
||||
|
||||
# CoreSpotlight user cache (can grow very large, safe to delete)
|
||||
local spotlight_cache="$HOME/Library/Metadata/CoreSpotlight"
|
||||
if [[ -d "$spotlight_cache" ]]; then
|
||||
local size_kb=$(get_path_size_kb "$spotlight_cache")
|
||||
if [[ "$size_kb" -gt 0 ]]; then
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
safe_remove "$spotlight_cache" true && {
|
||||
((cleaned_size += size_kb))
|
||||
((cleaned_count++))
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Spotlight cache ($(bytes_to_human $((size_kb * 1024))))"
|
||||
note_activity
|
||||
}
|
||||
else
|
||||
((cleaned_size += size_kb))
|
||||
echo -e " ${YELLOW}→${NC} Spotlight cache (would clean $(bytes_to_human $((size_kb * 1024))))"
|
||||
note_activity
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Spotlight saved application state
|
||||
local spotlight_state="$HOME/Library/Saved Application State/com.apple.spotlight.Spotlight.savedState"
|
||||
if [[ -d "$spotlight_state" ]]; then
|
||||
local size_kb=$(get_path_size_kb "$spotlight_state")
|
||||
if [[ "$size_kb" -gt 0 ]]; then
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
safe_remove "$spotlight_state" true && {
|
||||
((cleaned_size += size_kb))
|
||||
((cleaned_count++))
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Spotlight state ($(bytes_to_human $((size_kb * 1024))))"
|
||||
note_activity
|
||||
}
|
||||
else
|
||||
((cleaned_size += size_kb))
|
||||
echo -e " ${YELLOW}→${NC} Spotlight state (would clean $(bytes_to_human $((size_kb * 1024))))"
|
||||
note_activity
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $cleaned_size -gt 0 ]]; then
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += cleaned_size))
|
||||
((total_items++))
|
||||
fi
|
||||
}
|
||||
|
||||
170
lib/clean/dev.sh
170
lib/clean/dev.sh
@@ -1,52 +1,53 @@
|
||||
#!/bin/bash
|
||||
# Developer Tools Cleanup Module
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Helper function to clean tool caches using their built-in commands
|
||||
# Args: $1 - description, $@ - command to execute
|
||||
# Env: DRY_RUN
|
||||
# Tool cache helper (respects DRY_RUN).
|
||||
clean_tool_cache() {
|
||||
local description="$1"
|
||||
shift
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if "$@" > /dev/null 2>&1; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $description"
|
||||
fi
|
||||
else
|
||||
echo -e " ${YELLOW}→${NC} $description (would clean)"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $description · would clean"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Clean npm cache (command + directories)
|
||||
# npm cache clean clears official npm cache, safe_clean handles alternative package managers
|
||||
# Env: DRY_RUN
|
||||
# npm/pnpm/yarn/bun caches.
|
||||
clean_dev_npm() {
|
||||
if command -v npm > /dev/null 2>&1; then
|
||||
# clean_tool_cache now calculates size before cleanup for better statistics
|
||||
clean_tool_cache "npm cache" npm cache clean --force
|
||||
note_activity
|
||||
fi
|
||||
|
||||
# Clean alternative package manager caches
|
||||
# Clean pnpm store cache
|
||||
local pnpm_default_store=~/Library/pnpm/store
|
||||
# Check if pnpm is actually usable (not just Corepack shim)
|
||||
if command -v pnpm > /dev/null 2>&1 && COREPACK_ENABLE_DOWNLOAD_PROMPT=0 pnpm --version > /dev/null 2>&1; then
|
||||
COREPACK_ENABLE_DOWNLOAD_PROMPT=0 clean_tool_cache "pnpm cache" pnpm store prune
|
||||
local pnpm_store_path
|
||||
start_section_spinner "Checking store path..."
|
||||
pnpm_store_path=$(COREPACK_ENABLE_DOWNLOAD_PROMPT=0 run_with_timeout 2 pnpm store path 2> /dev/null) || pnpm_store_path=""
|
||||
stop_section_spinner
|
||||
if [[ -n "$pnpm_store_path" && "$pnpm_store_path" != "$pnpm_default_store" ]]; then
|
||||
safe_clean "$pnpm_default_store"/* "Orphaned pnpm store"
|
||||
fi
|
||||
else
|
||||
# pnpm not installed or not usable, just clean the default store directory
|
||||
safe_clean "$pnpm_default_store"/* "pnpm store"
|
||||
fi
|
||||
note_activity
|
||||
safe_clean ~/.tnpm/_cacache/* "tnpm cache directory"
|
||||
safe_clean ~/.tnpm/_logs/* "tnpm logs"
|
||||
safe_clean ~/.yarn/cache/* "Yarn cache"
|
||||
safe_clean ~/.bun/install/cache/* "Bun cache"
|
||||
}
|
||||
|
||||
# Clean Python/pip cache (command + directories)
|
||||
# pip cache purge clears official pip cache, safe_clean handles other Python tools
|
||||
# Env: DRY_RUN
|
||||
# Python/pip ecosystem caches.
|
||||
clean_dev_python() {
|
||||
if command -v pip3 > /dev/null 2>&1; then
|
||||
# clean_tool_cache now calculates size before cleanup for better statistics
|
||||
clean_tool_cache "pip cache" bash -c 'pip3 cache purge >/dev/null 2>&1 || true'
|
||||
note_activity
|
||||
fi
|
||||
|
||||
# Clean Python ecosystem caches
|
||||
safe_clean ~/.pyenv/cache/* "pyenv cache"
|
||||
safe_clean ~/.cache/poetry/* "Poetry cache"
|
||||
safe_clean ~/.cache/uv/* "uv cache"
|
||||
@@ -61,60 +62,53 @@ clean_dev_python() {
|
||||
safe_clean ~/anaconda3/pkgs/* "Anaconda packages cache"
|
||||
safe_clean ~/.cache/wandb/* "Weights & Biases cache"
|
||||
}
|
||||
|
||||
# Clean Go cache (command + directories)
|
||||
# go clean handles build and module caches comprehensively
|
||||
# Env: DRY_RUN
|
||||
# Go build/module caches.
|
||||
clean_dev_go() {
|
||||
if command -v go > /dev/null 2>&1; then
|
||||
# clean_tool_cache now calculates size before cleanup for better statistics
|
||||
clean_tool_cache "Go cache" bash -c 'go clean -modcache >/dev/null 2>&1 || true; go clean -cache >/dev/null 2>&1 || true'
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean Rust/cargo cache directories
|
||||
# Rust/cargo caches.
|
||||
clean_dev_rust() {
|
||||
safe_clean ~/.cargo/registry/cache/* "Rust cargo cache"
|
||||
safe_clean ~/.cargo/git/* "Cargo git cache"
|
||||
safe_clean ~/.rustup/downloads/* "Rust downloads cache"
|
||||
}
|
||||
|
||||
# Clean Docker cache (command + directories)
|
||||
# Env: DRY_RUN
|
||||
# Docker caches (guarded by daemon check).
|
||||
clean_dev_docker() {
|
||||
if command -v docker > /dev/null 2>&1; then
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
# Check if Docker daemon is running (with timeout to prevent hanging)
|
||||
start_section_spinner "Checking Docker daemon..."
|
||||
local docker_running=false
|
||||
if run_with_timeout 3 docker info > /dev/null 2>&1; then
|
||||
docker_running=true
|
||||
fi
|
||||
stop_section_spinner
|
||||
if [[ "$docker_running" == "true" ]]; then
|
||||
clean_tool_cache "Docker build cache" docker builder prune -af
|
||||
else
|
||||
note_activity
|
||||
echo -e " ${GRAY}${ICON_SUCCESS}${NC} Docker build cache (daemon not running)"
|
||||
debug_log "Docker daemon not running, skipping Docker cache cleanup"
|
||||
fi
|
||||
else
|
||||
note_activity
|
||||
echo -e " ${YELLOW}→${NC} Docker build cache (would clean)"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker build cache · would clean"
|
||||
fi
|
||||
fi
|
||||
|
||||
safe_clean ~/.docker/buildx/cache/* "Docker BuildX cache"
|
||||
}
|
||||
|
||||
# Clean Nix package manager
|
||||
# Env: DRY_RUN
|
||||
# Nix garbage collection.
|
||||
clean_dev_nix() {
|
||||
if command -v nix-collect-garbage > /dev/null 2>&1; then
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
clean_tool_cache "Nix garbage collection" nix-collect-garbage --delete-older-than 30d
|
||||
else
|
||||
echo -e " ${YELLOW}→${NC} Nix garbage collection (would clean)"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Nix garbage collection · would clean"
|
||||
fi
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean cloud CLI tools cache
|
||||
# Cloud CLI caches.
|
||||
clean_dev_cloud() {
|
||||
safe_clean ~/.kube/cache/* "Kubernetes cache"
|
||||
safe_clean ~/.local/share/containers/storage/tmp/* "Container storage temp"
|
||||
@@ -122,11 +116,8 @@ clean_dev_cloud() {
|
||||
safe_clean ~/.config/gcloud/logs/* "Google Cloud logs"
|
||||
safe_clean ~/.azure/logs/* "Azure CLI logs"
|
||||
}
|
||||
|
||||
# Clean frontend build tool caches
|
||||
# Frontend build caches.
|
||||
clean_dev_frontend() {
|
||||
safe_clean ~/.pnpm-store/* "pnpm store cache"
|
||||
safe_clean ~/.local/share/pnpm/store/* "pnpm global store"
|
||||
safe_clean ~/.cache/typescript/* "TypeScript cache"
|
||||
safe_clean ~/.cache/electron/* "Electron cache"
|
||||
safe_clean ~/.cache/node-gyp/* "node-gyp cache"
|
||||
@@ -139,49 +130,30 @@ clean_dev_frontend() {
|
||||
safe_clean ~/.cache/eslint/* "ESLint cache"
|
||||
safe_clean ~/.cache/prettier/* "Prettier cache"
|
||||
}
|
||||
|
||||
# Clean mobile development tools
|
||||
# iOS simulator cleanup can free significant space (70GB+ in some cases)
|
||||
# DeviceSupport files accumulate for each iOS version connected
|
||||
# Simulator runtime caches can grow large over time
|
||||
# Mobile dev caches (can be large).
|
||||
clean_dev_mobile() {
|
||||
# Clean Xcode unavailable simulators
|
||||
# Removes old and unused local iOS simulator data from old unused runtimes
|
||||
# Can free up significant space (70GB+ in some cases)
|
||||
if command -v xcrun > /dev/null 2>&1; then
|
||||
debug_log "Checking for unavailable Xcode simulators"
|
||||
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
clean_tool_cache "Xcode unavailable simulators" xcrun simctl delete unavailable
|
||||
else
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking unavailable simulators..."
|
||||
fi
|
||||
|
||||
# Run command manually to control UI output order
|
||||
start_section_spinner "Checking unavailable simulators..."
|
||||
if xcrun simctl delete unavailable > /dev/null 2>&1; then
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
stop_section_spinner
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators"
|
||||
else
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
# Silently fail or log error if needed, matching clean_tool_cache behavior
|
||||
stop_section_spinner
|
||||
fi
|
||||
fi
|
||||
note_activity
|
||||
fi
|
||||
|
||||
# Clean iOS DeviceSupport - more comprehensive cleanup
|
||||
# DeviceSupport directories store debug symbols for each iOS version
|
||||
# Safe to clean caches and logs, but preserve device support files themselves
|
||||
# DeviceSupport caches/logs (preserve core support files).
|
||||
safe_clean ~/Library/Developer/Xcode/iOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "iOS device symbol cache"
|
||||
safe_clean ~/Library/Developer/Xcode/iOS\ DeviceSupport/*.log "iOS device support logs"
|
||||
safe_clean ~/Library/Developer/Xcode/watchOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "watchOS device symbol cache"
|
||||
safe_clean ~/Library/Developer/Xcode/tvOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "tvOS device symbol cache"
|
||||
|
||||
# Clean simulator runtime caches
|
||||
# RuntimeRoot caches can accumulate system library caches
|
||||
# Simulator runtime caches.
|
||||
safe_clean ~/Library/Developer/CoreSimulator/Profiles/Runtimes/*/Contents/Resources/RuntimeRoot/System/Library/Caches/* "Simulator runtime cache"
|
||||
|
||||
safe_clean ~/Library/Caches/Google/AndroidStudio*/* "Android Studio cache"
|
||||
safe_clean ~/Library/Caches/CocoaPods/* "CocoaPods cache"
|
||||
safe_clean ~/.cache/flutter/* "Flutter cache"
|
||||
@@ -190,16 +162,14 @@ clean_dev_mobile() {
|
||||
safe_clean ~/Library/Developer/Xcode/UserData/IB\ Support/* "Xcode Interface Builder cache"
|
||||
safe_clean ~/.cache/swift-package-manager/* "Swift package manager cache"
|
||||
}
|
||||
|
||||
# Clean JVM ecosystem tools
|
||||
# JVM ecosystem caches.
|
||||
clean_dev_jvm() {
|
||||
safe_clean ~/.gradle/caches/* "Gradle caches"
|
||||
safe_clean ~/.gradle/daemon/* "Gradle daemon logs"
|
||||
safe_clean ~/.sbt/* "SBT cache"
|
||||
safe_clean ~/.ivy2/cache/* "Ivy cache"
|
||||
}
|
||||
|
||||
# Clean other language tools
|
||||
# Other language tool caches.
|
||||
clean_dev_other_langs() {
|
||||
safe_clean ~/.bundle/cache/* "Ruby Bundler cache"
|
||||
safe_clean ~/.composer/cache/* "PHP Composer cache"
|
||||
@@ -209,8 +179,7 @@ clean_dev_other_langs() {
|
||||
safe_clean ~/.cache/zig/* "Zig cache"
|
||||
safe_clean ~/Library/Caches/deno/* "Deno cache"
|
||||
}
|
||||
|
||||
# Clean CI/CD and DevOps tools
|
||||
# CI/CD and DevOps caches.
|
||||
clean_dev_cicd() {
|
||||
safe_clean ~/.cache/terraform/* "Terraform cache"
|
||||
safe_clean ~/.grafana/cache/* "Grafana cache"
|
||||
@@ -221,8 +190,7 @@ clean_dev_cicd() {
|
||||
safe_clean ~/.circleci/cache/* "CircleCI cache"
|
||||
safe_clean ~/.sonar/* "SonarQube cache"
|
||||
}
|
||||
|
||||
# Clean database tools
|
||||
# Database tool caches.
|
||||
clean_dev_database() {
|
||||
safe_clean ~/Library/Caches/com.sequel-ace.sequel-ace/* "Sequel Ace cache"
|
||||
safe_clean ~/Library/Caches/com.eggerapps.Sequel-Pro/* "Sequel Pro cache"
|
||||
@@ -231,8 +199,7 @@ clean_dev_database() {
|
||||
safe_clean ~/Library/Caches/com.dbeaver.* "DBeaver cache"
|
||||
safe_clean ~/Library/Caches/com.redis.RedisInsight "Redis Insight cache"
|
||||
}
|
||||
|
||||
# Clean API/network debugging tools
|
||||
# API/debugging tool caches.
|
||||
clean_dev_api_tools() {
|
||||
safe_clean ~/Library/Caches/com.postmanlabs.mac/* "Postman cache"
|
||||
safe_clean ~/Library/Caches/com.konghq.insomnia/* "Insomnia cache"
|
||||
@@ -241,11 +208,9 @@ clean_dev_api_tools() {
|
||||
safe_clean ~/Library/Caches/com.charlesproxy.charles/* "Charles Proxy cache"
|
||||
safe_clean ~/Library/Caches/com.proxyman.NSProxy/* "Proxyman cache"
|
||||
}
|
||||
|
||||
# Clean misc dev tools
|
||||
# Misc dev tool caches.
|
||||
clean_dev_misc() {
|
||||
safe_clean ~/Library/Caches/com.unity3d.*/* "Unity cache"
|
||||
# safe_clean ~/Library/Caches/com.jetbrains.toolbox/* "JetBrains Toolbox cache"
|
||||
safe_clean ~/Library/Caches/com.mongodb.compass/* "MongoDB Compass cache"
|
||||
safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache"
|
||||
safe_clean ~/Library/Caches/com.github.GitHubDesktop/* "GitHub Desktop cache"
|
||||
@@ -253,8 +218,7 @@ clean_dev_misc() {
|
||||
safe_clean ~/Library/Caches/KSCrash/* "KSCrash reports"
|
||||
safe_clean ~/Library/Caches/com.crashlytics.data/* "Crashlytics data"
|
||||
}
|
||||
|
||||
# Clean shell and version control
|
||||
# Shell and VCS leftovers.
|
||||
clean_dev_shell() {
|
||||
safe_clean ~/.gitconfig.lock "Git config lock"
|
||||
safe_clean ~/.gitconfig.bak* "Git config backup"
|
||||
@@ -264,19 +228,21 @@ clean_dev_shell() {
|
||||
safe_clean ~/.zsh_history.bak* "Zsh history backup"
|
||||
safe_clean ~/.cache/pre-commit/* "pre-commit cache"
|
||||
}
|
||||
|
||||
# Clean network utilities
|
||||
# Network tool caches.
|
||||
clean_dev_network() {
|
||||
safe_clean ~/.cache/curl/* "curl cache"
|
||||
safe_clean ~/.cache/wget/* "wget cache"
|
||||
safe_clean ~/Library/Caches/curl/* "curl cache (macOS)"
|
||||
safe_clean ~/Library/Caches/wget/* "wget cache (macOS)"
|
||||
safe_clean ~/Library/Caches/curl/* "macOS curl cache"
|
||||
safe_clean ~/Library/Caches/wget/* "macOS wget cache"
|
||||
}
|
||||
|
||||
# Main developer tools cleanup function
|
||||
# Calls all specialized cleanup functions
|
||||
# Env: DRY_RUN
|
||||
# Orphaned SQLite temp files (-shm/-wal). Disabled due to low ROI.
|
||||
clean_sqlite_temp_files() {
|
||||
return 0
|
||||
}
|
||||
# Main developer tools cleanup sequence.
|
||||
clean_developer_tools() {
|
||||
stop_section_spinner
|
||||
clean_sqlite_temp_files
|
||||
clean_dev_npm
|
||||
clean_dev_python
|
||||
clean_dev_go
|
||||
@@ -286,10 +252,7 @@ clean_developer_tools() {
|
||||
clean_dev_nix
|
||||
clean_dev_shell
|
||||
clean_dev_frontend
|
||||
|
||||
# Project build caches (delegated to clean_caches module)
|
||||
clean_project_caches
|
||||
|
||||
clean_dev_mobile
|
||||
clean_dev_jvm
|
||||
clean_dev_other_langs
|
||||
@@ -298,29 +261,20 @@ clean_developer_tools() {
|
||||
clean_dev_api_tools
|
||||
clean_dev_network
|
||||
clean_dev_misc
|
||||
|
||||
# Homebrew caches and cleanup (delegated to clean_brew module)
|
||||
safe_clean ~/Library/Caches/Homebrew/* "Homebrew cache"
|
||||
|
||||
# Clean Homebrew locks intelligently (avoid repeated sudo prompts)
|
||||
# Clean Homebrew locks without repeated sudo prompts.
|
||||
local brew_lock_dirs=(
|
||||
"/opt/homebrew/var/homebrew/locks"
|
||||
"/usr/local/var/homebrew/locks"
|
||||
)
|
||||
|
||||
for lock_dir in "${brew_lock_dirs[@]}"; do
|
||||
if [[ -d "$lock_dir" && -w "$lock_dir" ]]; then
|
||||
# User can write, safe to clean
|
||||
safe_clean "$lock_dir"/* "Homebrew lock files"
|
||||
elif [[ -d "$lock_dir" ]]; then
|
||||
# Directory exists but not writable. Check if empty to avoid noise.
|
||||
if [[ -n "$(ls -A "$lock_dir" 2> /dev/null)" ]]; then
|
||||
# Only try sudo ONCE if we really need to, or just skip to avoid spam
|
||||
# Decision: Skip strict system/root owned locks to avoid nag.
|
||||
if find "$lock_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
|
||||
debug_log "Skipping read-only Homebrew locks in $lock_dir"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
clean_homebrew
|
||||
}
|
||||
|
||||
891
lib/clean/project.sh
Normal file
891
lib/clean/project.sh
Normal file
@@ -0,0 +1,891 @@
|
||||
#!/bin/bash
|
||||
# Project Purge Module (mo purge).
|
||||
# Removes heavy project build artifacts and dependencies.
|
||||
set -euo pipefail
|
||||
|
||||
PROJECT_LIB_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CORE_LIB_DIR="$(cd "$PROJECT_LIB_DIR/../core" && pwd)"
|
||||
if ! command -v ensure_user_dir > /dev/null 2>&1; then
|
||||
# shellcheck disable=SC1090
|
||||
source "$CORE_LIB_DIR/common.sh"
|
||||
fi
|
||||
|
||||
# Targets to look for (heavy build artifacts).
|
||||
readonly PURGE_TARGETS=(
|
||||
"node_modules"
|
||||
"target" # Rust, Maven
|
||||
"build" # Gradle, various
|
||||
"dist" # JS builds
|
||||
"venv" # Python
|
||||
".venv" # Python
|
||||
".gradle" # Gradle local
|
||||
"__pycache__" # Python
|
||||
".next" # Next.js
|
||||
".nuxt" # Nuxt.js
|
||||
".output" # Nuxt.js
|
||||
"vendor" # PHP Composer
|
||||
"obj" # C# / Unity
|
||||
".turbo" # Turborepo cache
|
||||
".parcel-cache" # Parcel bundler
|
||||
".dart_tool" # Flutter/Dart build cache
|
||||
".zig-cache" # Zig
|
||||
"zig-out" # Zig
|
||||
)
|
||||
# Minimum age in days before considering for cleanup.
|
||||
readonly MIN_AGE_DAYS=7
|
||||
# Scan depth defaults (relative to search root).
|
||||
readonly PURGE_MIN_DEPTH_DEFAULT=2
|
||||
readonly PURGE_MAX_DEPTH_DEFAULT=8
|
||||
# Search paths (default, can be overridden via config file).
|
||||
readonly DEFAULT_PURGE_SEARCH_PATHS=(
|
||||
"$HOME/www"
|
||||
"$HOME/dev"
|
||||
"$HOME/Projects"
|
||||
"$HOME/GitHub"
|
||||
"$HOME/Code"
|
||||
"$HOME/Workspace"
|
||||
"$HOME/Repos"
|
||||
"$HOME/Development"
|
||||
)
|
||||
|
||||
# Config file for custom purge paths.
|
||||
readonly PURGE_CONFIG_FILE="$HOME/.config/mole/purge_paths"
|
||||
|
||||
# Resolved search paths.
|
||||
PURGE_SEARCH_PATHS=()
|
||||
|
||||
# Project indicators for container detection.
|
||||
readonly PROJECT_INDICATORS=(
|
||||
"package.json"
|
||||
"Cargo.toml"
|
||||
"go.mod"
|
||||
"pyproject.toml"
|
||||
"requirements.txt"
|
||||
"pom.xml"
|
||||
"build.gradle"
|
||||
"Gemfile"
|
||||
"composer.json"
|
||||
"pubspec.yaml"
|
||||
"Makefile"
|
||||
"build.zig"
|
||||
"build.zig.zon"
|
||||
".git"
|
||||
)
|
||||
|
||||
# Check if a directory contains projects (directly or in subdirectories).
|
||||
is_project_container() {
|
||||
local dir="$1"
|
||||
local max_depth="${2:-2}"
|
||||
|
||||
# Skip hidden/system directories.
|
||||
local basename
|
||||
basename=$(basename "$dir")
|
||||
[[ "$basename" == .* ]] && return 1
|
||||
[[ "$basename" == "Library" ]] && return 1
|
||||
[[ "$basename" == "Applications" ]] && return 1
|
||||
[[ "$basename" == "Movies" ]] && return 1
|
||||
[[ "$basename" == "Music" ]] && return 1
|
||||
[[ "$basename" == "Pictures" ]] && return 1
|
||||
[[ "$basename" == "Public" ]] && return 1
|
||||
|
||||
# Single find expression for indicators.
|
||||
local -a find_args=("$dir" "-maxdepth" "$max_depth" "(")
|
||||
local first=true
|
||||
for indicator in "${PROJECT_INDICATORS[@]}"; do
|
||||
if [[ "$first" == "true" ]]; then
|
||||
first=false
|
||||
else
|
||||
find_args+=("-o")
|
||||
fi
|
||||
find_args+=("-name" "$indicator")
|
||||
done
|
||||
find_args+=(")" "-print" "-quit")
|
||||
|
||||
if find "${find_args[@]}" 2> /dev/null | grep -q .; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Discover project directories in $HOME.
|
||||
discover_project_dirs() {
|
||||
local -a discovered=()
|
||||
|
||||
for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do
|
||||
if [[ -d "$path" ]]; then
|
||||
discovered+=("$path")
|
||||
fi
|
||||
done
|
||||
|
||||
# Scan $HOME for other containers (depth 1).
|
||||
local dir
|
||||
for dir in "$HOME"/*/; do
|
||||
[[ ! -d "$dir" ]] && continue
|
||||
dir="${dir%/}" # Remove trailing slash
|
||||
|
||||
local already_found=false
|
||||
for existing in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do
|
||||
if [[ "$dir" == "$existing" ]]; then
|
||||
already_found=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
[[ "$already_found" == "true" ]] && continue
|
||||
|
||||
if is_project_container "$dir" 2; then
|
||||
discovered+=("$dir")
|
||||
fi
|
||||
done
|
||||
|
||||
printf '%s\n' "${discovered[@]}" | sort -u
|
||||
}
|
||||
|
||||
# Save discovered paths to config.
|
||||
save_discovered_paths() {
|
||||
local -a paths=("$@")
|
||||
|
||||
ensure_user_dir "$(dirname "$PURGE_CONFIG_FILE")"
|
||||
|
||||
cat > "$PURGE_CONFIG_FILE" << 'EOF'
|
||||
# Mole Purge Paths - Auto-discovered project directories
|
||||
# Edit this file to customize, or run: mo purge --paths
|
||||
# Add one path per line (supports ~ for home directory)
|
||||
EOF
|
||||
|
||||
printf '\n' >> "$PURGE_CONFIG_FILE"
|
||||
for path in "${paths[@]}"; do
|
||||
# Convert $HOME to ~ for portability
|
||||
path="${path/#$HOME/~}"
|
||||
echo "$path" >> "$PURGE_CONFIG_FILE"
|
||||
done
|
||||
}
|
||||
|
||||
# Load purge paths from config or auto-discover
|
||||
load_purge_config() {
|
||||
PURGE_SEARCH_PATHS=()
|
||||
|
||||
if [[ -f "$PURGE_CONFIG_FILE" ]]; then
|
||||
while IFS= read -r line; do
|
||||
line="${line#"${line%%[![:space:]]*}"}"
|
||||
line="${line%"${line##*[![:space:]]}"}"
|
||||
|
||||
[[ -z "$line" || "$line" =~ ^# ]] && continue
|
||||
|
||||
line="${line/#\~/$HOME}"
|
||||
|
||||
PURGE_SEARCH_PATHS+=("$line")
|
||||
done < "$PURGE_CONFIG_FILE"
|
||||
fi
|
||||
|
||||
if [[ ${#PURGE_SEARCH_PATHS[@]} -eq 0 ]]; then
|
||||
if [[ -t 1 ]] && [[ -z "${_PURGE_DISCOVERY_SILENT:-}" ]]; then
|
||||
echo -e "${GRAY}First run: discovering project directories...${NC}" >&2
|
||||
fi
|
||||
|
||||
local -a discovered=()
|
||||
while IFS= read -r path; do
|
||||
[[ -n "$path" ]] && discovered+=("$path")
|
||||
done < <(discover_project_dirs)
|
||||
|
||||
if [[ ${#discovered[@]} -gt 0 ]]; then
|
||||
PURGE_SEARCH_PATHS=("${discovered[@]}")
|
||||
save_discovered_paths "${discovered[@]}"
|
||||
|
||||
if [[ -t 1 ]] && [[ -z "${_PURGE_DISCOVERY_SILENT:-}" ]]; then
|
||||
echo -e "${GRAY}Found ${#discovered[@]} project directories, saved to config${NC}" >&2
|
||||
fi
|
||||
else
|
||||
PURGE_SEARCH_PATHS=("${DEFAULT_PURGE_SEARCH_PATHS[@]}")
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Initialize paths on script load.
|
||||
load_purge_config
|
||||
|
||||
# Args: $1 - path to check
|
||||
# Safe cleanup requires the path be inside a project directory.
|
||||
is_safe_project_artifact() {
|
||||
local path="$1"
|
||||
local search_path="$2"
|
||||
if [[ "$path" != /* ]]; then
|
||||
return 1
|
||||
fi
|
||||
# Must not be a direct child of the search root.
|
||||
local relative_path="${path#"$search_path"/}"
|
||||
local depth=$(echo "$relative_path" | tr -cd '/' | wc -c)
|
||||
if [[ $depth -lt 1 ]]; then
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Detect if directory is a Rails project root
|
||||
is_rails_project_root() {
|
||||
local dir="$1"
|
||||
[[ -f "$dir/config/application.rb" ]] || return 1
|
||||
[[ -f "$dir/Gemfile" ]] || return 1
|
||||
[[ -f "$dir/bin/rails" || -f "$dir/config/environment.rb" ]]
|
||||
}
|
||||
|
||||
# Detect if directory is a Go project root
|
||||
is_go_project_root() {
|
||||
local dir="$1"
|
||||
[[ -f "$dir/go.mod" ]]
|
||||
}
|
||||
|
||||
# Detect if directory is a PHP Composer project root
|
||||
is_php_project_root() {
|
||||
local dir="$1"
|
||||
[[ -f "$dir/composer.json" ]]
|
||||
}
|
||||
|
||||
# Check if a vendor directory should be protected from purge
|
||||
# Expects path to be a vendor directory (basename == vendor)
|
||||
# Strategy: Only clean PHP Composer vendor, protect all others
|
||||
is_protected_vendor_dir() {
|
||||
local path="$1"
|
||||
local base
|
||||
base=$(basename "$path")
|
||||
[[ "$base" == "vendor" ]] || return 1
|
||||
local parent_dir
|
||||
parent_dir=$(dirname "$path")
|
||||
|
||||
# PHP Composer vendor can be safely regenerated with 'composer install'
|
||||
# Do NOT protect it (return 1 = not protected = can be cleaned)
|
||||
if is_php_project_root "$parent_dir"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Rails vendor (importmap dependencies) - should be protected
|
||||
if is_rails_project_root "$parent_dir"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Go vendor (optional vendoring) - protect to avoid accidental deletion
|
||||
if is_go_project_root "$parent_dir"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Unknown vendor type - protect by default (conservative approach)
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check if an artifact should be protected from purge
|
||||
is_protected_purge_artifact() {
|
||||
local path="$1"
|
||||
local base
|
||||
base=$(basename "$path")
|
||||
|
||||
case "$base" in
|
||||
vendor)
|
||||
is_protected_vendor_dir "$path"
|
||||
return $?
|
||||
;;
|
||||
esac
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Scan purge targets using fd (fast) or pruned find.
|
||||
scan_purge_targets() {
|
||||
local search_path="$1"
|
||||
local output_file="$2"
|
||||
local min_depth="${MOLE_PURGE_MIN_DEPTH:-$PURGE_MIN_DEPTH_DEFAULT}"
|
||||
local max_depth="${MOLE_PURGE_MAX_DEPTH:-$PURGE_MAX_DEPTH_DEFAULT}"
|
||||
if [[ ! "$min_depth" =~ ^[0-9]+$ ]]; then
|
||||
min_depth="$PURGE_MIN_DEPTH_DEFAULT"
|
||||
fi
|
||||
if [[ ! "$max_depth" =~ ^[0-9]+$ ]]; then
|
||||
max_depth="$PURGE_MAX_DEPTH_DEFAULT"
|
||||
fi
|
||||
if [[ "$max_depth" -lt "$min_depth" ]]; then
|
||||
max_depth="$min_depth"
|
||||
fi
|
||||
if [[ ! -d "$search_path" ]]; then
|
||||
return
|
||||
fi
|
||||
if command -v fd > /dev/null 2>&1; then
|
||||
# Escape regex special characters in target names for fd patterns
|
||||
local escaped_targets=()
|
||||
for target in "${PURGE_TARGETS[@]}"; do
|
||||
escaped_targets+=("$(printf '%s' "$target" | sed -e 's/[][(){}.^$*+?|\\]/\\&/g')")
|
||||
done
|
||||
local pattern="($(
|
||||
IFS='|'
|
||||
echo "${escaped_targets[*]}"
|
||||
))"
|
||||
local fd_args=(
|
||||
"--absolute-path"
|
||||
"--hidden"
|
||||
"--no-ignore"
|
||||
"--type" "d"
|
||||
"--min-depth" "$min_depth"
|
||||
"--max-depth" "$max_depth"
|
||||
"--threads" "4"
|
||||
"--exclude" ".git"
|
||||
"--exclude" "Library"
|
||||
"--exclude" ".Trash"
|
||||
"--exclude" "Applications"
|
||||
)
|
||||
fd "${fd_args[@]}" "$pattern" "$search_path" 2> /dev/null | while IFS= read -r item; do
|
||||
if is_safe_project_artifact "$item" "$search_path"; then
|
||||
echo "$item"
|
||||
fi
|
||||
done | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
||||
else
|
||||
# Pruned find avoids descending into heavy directories.
|
||||
local prune_args=()
|
||||
local prune_dirs=(".git" "Library" ".Trash" "Applications")
|
||||
for dir in "${prune_dirs[@]}"; do
|
||||
prune_args+=("-name" "$dir" "-prune" "-o")
|
||||
done
|
||||
for target in "${PURGE_TARGETS[@]}"; do
|
||||
prune_args+=("-name" "$target" "-print" "-prune" "-o")
|
||||
done
|
||||
local find_expr=()
|
||||
for dir in "${prune_dirs[@]}"; do
|
||||
find_expr+=("-name" "$dir" "-prune" "-o")
|
||||
done
|
||||
local i=0
|
||||
for target in "${PURGE_TARGETS[@]}"; do
|
||||
find_expr+=("-name" "$target" "-print" "-prune")
|
||||
if [[ $i -lt $((${#PURGE_TARGETS[@]} - 1)) ]]; then
|
||||
find_expr+=("-o")
|
||||
fi
|
||||
((i++))
|
||||
done
|
||||
command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \
|
||||
\( "${find_expr[@]}" \) 2> /dev/null | while IFS= read -r item; do
|
||||
if is_safe_project_artifact "$item" "$search_path"; then
|
||||
echo "$item"
|
||||
fi
|
||||
done | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
||||
fi
|
||||
}
|
||||
# Filter out nested artifacts (e.g. node_modules inside node_modules).
|
||||
filter_nested_artifacts() {
|
||||
while IFS= read -r item; do
|
||||
local parent_dir=$(dirname "$item")
|
||||
local is_nested=false
|
||||
for target in "${PURGE_TARGETS[@]}"; do
|
||||
if [[ "$parent_dir" == *"/$target/"* || "$parent_dir" == *"/$target" ]]; then
|
||||
is_nested=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_nested" == "false" ]]; then
|
||||
echo "$item"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
filter_protected_artifacts() {
|
||||
while IFS= read -r item; do
|
||||
if ! is_protected_purge_artifact "$item"; then
|
||||
echo "$item"
|
||||
fi
|
||||
done
|
||||
}
|
||||
# Args: $1 - path
|
||||
# Check if a path was modified recently (safety check).
|
||||
is_recently_modified() {
|
||||
local path="$1"
|
||||
local age_days=$MIN_AGE_DAYS
|
||||
if [[ ! -e "$path" ]]; then
|
||||
return 1
|
||||
fi
|
||||
local mod_time
|
||||
mod_time=$(get_file_mtime "$path")
|
||||
local current_time=$(date +%s)
|
||||
local age_seconds=$((current_time - mod_time))
|
||||
local age_in_days=$((age_seconds / 86400))
|
||||
if [[ $age_in_days -lt $age_days ]]; then
|
||||
return 0 # Recently modified
|
||||
else
|
||||
return 1 # Old enough to clean
|
||||
fi
|
||||
}
|
||||
# Args: $1 - path
|
||||
# Get directory size in KB.
|
||||
get_dir_size_kb() {
|
||||
local path="$1"
|
||||
if [[ -d "$path" ]]; then
|
||||
du -sk "$path" 2> /dev/null | awk '{print $1}' || echo "0"
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
# Purge category selector.
|
||||
select_purge_categories() {
|
||||
local -a categories=("$@")
|
||||
local total_items=${#categories[@]}
|
||||
local clear_line=$'\r\033[2K'
|
||||
if [[ $total_items -eq 0 ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Calculate items per page based on terminal height.
|
||||
_get_items_per_page() {
|
||||
local term_height=24
|
||||
if [[ -t 0 ]] || [[ -t 2 ]]; then
|
||||
term_height=$(stty size < /dev/tty 2> /dev/null | awk '{print $1}')
|
||||
fi
|
||||
if [[ -z "$term_height" || $term_height -le 0 ]]; then
|
||||
if command -v tput > /dev/null 2>&1; then
|
||||
term_height=$(tput lines 2> /dev/null || echo "24")
|
||||
else
|
||||
term_height=24
|
||||
fi
|
||||
fi
|
||||
local reserved=6
|
||||
local available=$((term_height - reserved))
|
||||
if [[ $available -lt 3 ]]; then
|
||||
echo 3
|
||||
elif [[ $available -gt 50 ]]; then
|
||||
echo 50
|
||||
else
|
||||
echo "$available"
|
||||
fi
|
||||
}
|
||||
|
||||
local items_per_page=$(_get_items_per_page)
|
||||
local cursor_pos=0
|
||||
local top_index=0
|
||||
|
||||
# Initialize selection (all selected by default, except recent ones)
|
||||
local -a selected=()
|
||||
IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}"
|
||||
for ((i = 0; i < total_items; i++)); do
|
||||
# Default unselected if category has recent items
|
||||
if [[ ${recent_flags[i]:-false} == "true" ]]; then
|
||||
selected[i]=false
|
||||
else
|
||||
selected[i]=true
|
||||
fi
|
||||
done
|
||||
local original_stty=""
|
||||
if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then
|
||||
original_stty=$(stty -g 2> /dev/null || echo "")
|
||||
fi
|
||||
# Terminal control functions
|
||||
restore_terminal() {
|
||||
trap - EXIT INT TERM
|
||||
show_cursor
|
||||
if [[ -n "${original_stty:-}" ]]; then
|
||||
stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || true
|
||||
fi
|
||||
}
|
||||
# shellcheck disable=SC2329
|
||||
handle_interrupt() {
|
||||
restore_terminal
|
||||
exit 130
|
||||
}
|
||||
draw_menu() {
|
||||
# Recalculate items_per_page dynamically to handle window resize
|
||||
items_per_page=$(_get_items_per_page)
|
||||
|
||||
# Clamp pagination state to avoid cursor drifting out of view
|
||||
local max_top_index=0
|
||||
if [[ $total_items -gt $items_per_page ]]; then
|
||||
max_top_index=$((total_items - items_per_page))
|
||||
fi
|
||||
if [[ $top_index -gt $max_top_index ]]; then
|
||||
top_index=$max_top_index
|
||||
fi
|
||||
if [[ $top_index -lt 0 ]]; then
|
||||
top_index=0
|
||||
fi
|
||||
|
||||
local visible_count=$((total_items - top_index))
|
||||
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
|
||||
if [[ $cursor_pos -gt $((visible_count - 1)) ]]; then
|
||||
cursor_pos=$((visible_count - 1))
|
||||
fi
|
||||
if [[ $cursor_pos -lt 0 ]]; then
|
||||
cursor_pos=0
|
||||
fi
|
||||
|
||||
printf "\033[H"
|
||||
# Calculate total size of selected items for header
|
||||
local selected_size=0
|
||||
local selected_count=0
|
||||
IFS=',' read -r -a sizes <<< "${PURGE_CATEGORY_SIZES:-}"
|
||||
for ((i = 0; i < total_items; i++)); do
|
||||
if [[ ${selected[i]} == true ]]; then
|
||||
selected_size=$((selected_size + ${sizes[i]:-0}))
|
||||
((selected_count++))
|
||||
fi
|
||||
done
|
||||
local selected_gb
|
||||
selected_gb=$(printf "%.1f" "$(echo "scale=2; $selected_size/1024/1024" | bc)")
|
||||
|
||||
# Show position indicator if scrolling is needed
|
||||
local scroll_indicator=""
|
||||
if [[ $total_items -gt $items_per_page ]]; then
|
||||
local current_pos=$((top_index + cursor_pos + 1))
|
||||
scroll_indicator=" ${GRAY}[${current_pos}/${total_items}]${NC}"
|
||||
fi
|
||||
|
||||
printf "%s\n" "$clear_line"
|
||||
printf "%s${PURPLE_BOLD}Select Categories to Clean${NC}%s ${GRAY}- ${selected_gb}GB ($selected_count selected)${NC}\n" "$clear_line" "$scroll_indicator"
|
||||
printf "%s\n" "$clear_line"
|
||||
|
||||
IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}"
|
||||
|
||||
# Calculate visible range
|
||||
local end_index=$((top_index + visible_count))
|
||||
|
||||
# Draw only visible items
|
||||
for ((i = top_index; i < end_index; i++)); do
|
||||
local checkbox="$ICON_EMPTY"
|
||||
[[ ${selected[i]} == true ]] && checkbox="$ICON_SOLID"
|
||||
local recent_marker=""
|
||||
[[ ${recent_flags[i]:-false} == "true" ]] && recent_marker=" ${GRAY}| Recent${NC}"
|
||||
local rel_pos=$((i - top_index))
|
||||
if [[ $rel_pos -eq $cursor_pos ]]; then
|
||||
printf "%s${CYAN}${ICON_ARROW} %s %s%s${NC}\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker"
|
||||
else
|
||||
printf "%s %s %s%s\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker"
|
||||
fi
|
||||
done
|
||||
|
||||
# Fill empty slots to clear previous content
|
||||
local items_shown=$visible_count
|
||||
for ((i = items_shown; i < items_per_page; i++)); do
|
||||
printf "%s\n" "$clear_line"
|
||||
done
|
||||
|
||||
printf "%s\n" "$clear_line"
|
||||
|
||||
printf "%s${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space Select | Enter Confirm | A All | I Invert | Q Quit${NC}\n" "$clear_line"
|
||||
}
|
||||
trap restore_terminal EXIT
|
||||
trap handle_interrupt INT TERM
|
||||
# Preserve interrupt character for Ctrl-C
|
||||
stty -echo -icanon intr ^C 2> /dev/null || true
|
||||
hide_cursor
|
||||
if [[ -t 1 ]]; then
|
||||
clear_screen
|
||||
fi
|
||||
# Main loop
|
||||
while true; do
|
||||
draw_menu
|
||||
# Read key
|
||||
IFS= read -r -s -n1 key || key=""
|
||||
case "$key" in
|
||||
$'\x1b')
|
||||
# Arrow keys or ESC
|
||||
# Read next 2 chars with timeout (bash 3.2 needs integer)
|
||||
IFS= read -r -s -n1 -t 1 key2 || key2=""
|
||||
if [[ "$key2" == "[" ]]; then
|
||||
IFS= read -r -s -n1 -t 1 key3 || key3=""
|
||||
case "$key3" in
|
||||
A) # Up arrow
|
||||
if [[ $cursor_pos -gt 0 ]]; then
|
||||
((cursor_pos--))
|
||||
elif [[ $top_index -gt 0 ]]; then
|
||||
((top_index--))
|
||||
fi
|
||||
;;
|
||||
B) # Down arrow
|
||||
local absolute_index=$((top_index + cursor_pos))
|
||||
local last_index=$((total_items - 1))
|
||||
if [[ $absolute_index -lt $last_index ]]; then
|
||||
local visible_count=$((total_items - top_index))
|
||||
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
|
||||
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
|
||||
((cursor_pos++))
|
||||
elif [[ $((top_index + visible_count)) -lt $total_items ]]; then
|
||||
((top_index++))
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
else
|
||||
# ESC alone (no following chars)
|
||||
restore_terminal
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
" ") # Space - toggle current item
|
||||
local idx=$((top_index + cursor_pos))
|
||||
if [[ ${selected[idx]} == true ]]; then
|
||||
selected[idx]=false
|
||||
else
|
||||
selected[idx]=true
|
||||
fi
|
||||
;;
|
||||
"a" | "A") # Select all
|
||||
for ((i = 0; i < total_items; i++)); do
|
||||
selected[i]=true
|
||||
done
|
||||
;;
|
||||
"i" | "I") # Invert selection
|
||||
for ((i = 0; i < total_items; i++)); do
|
||||
if [[ ${selected[i]} == true ]]; then
|
||||
selected[i]=false
|
||||
else
|
||||
selected[i]=true
|
||||
fi
|
||||
done
|
||||
;;
|
||||
"q" | "Q" | $'\x03') # Quit or Ctrl-C
|
||||
restore_terminal
|
||||
return 1
|
||||
;;
|
||||
"" | $'\n' | $'\r') # Enter - confirm
|
||||
# Build result
|
||||
PURGE_SELECTION_RESULT=""
|
||||
for ((i = 0; i < total_items; i++)); do
|
||||
if [[ ${selected[i]} == true ]]; then
|
||||
[[ -n "$PURGE_SELECTION_RESULT" ]] && PURGE_SELECTION_RESULT+=","
|
||||
PURGE_SELECTION_RESULT+="$i"
|
||||
fi
|
||||
done
|
||||
restore_terminal
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
# Main cleanup function - scans and prompts user to select artifacts to clean
|
||||
clean_project_artifacts() {
|
||||
local -a all_found_items=()
|
||||
local -a safe_to_clean=()
|
||||
local -a recently_modified=()
|
||||
# Set up cleanup on interrupt
|
||||
# Note: Declared without 'local' so cleanup_scan trap can access them
|
||||
scan_pids=()
|
||||
scan_temps=()
|
||||
# shellcheck disable=SC2329
|
||||
cleanup_scan() {
|
||||
# Kill all background scans
|
||||
for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do
|
||||
kill "$pid" 2> /dev/null || true
|
||||
done
|
||||
# Clean up temp files
|
||||
for temp in "${scan_temps[@]+"${scan_temps[@]}"}"; do
|
||||
rm -f "$temp" 2> /dev/null || true
|
||||
done
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
echo ""
|
||||
exit 130
|
||||
}
|
||||
trap cleanup_scan INT TERM
|
||||
# Start parallel scanning of all paths at once
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Scanning projects..."
|
||||
fi
|
||||
# Launch all scans in parallel
|
||||
for path in "${PURGE_SEARCH_PATHS[@]}"; do
|
||||
if [[ -d "$path" ]]; then
|
||||
local scan_output
|
||||
scan_output=$(mktemp)
|
||||
scan_temps+=("$scan_output")
|
||||
# Launch scan in background for true parallelism
|
||||
scan_purge_targets "$path" "$scan_output" &
|
||||
local scan_pid=$!
|
||||
scan_pids+=("$scan_pid")
|
||||
fi
|
||||
done
|
||||
# Wait for all scans to complete
|
||||
for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do
|
||||
wait "$pid" 2> /dev/null || true
|
||||
done
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
# Collect all results
|
||||
for scan_output in "${scan_temps[@]+"${scan_temps[@]}"}"; do
|
||||
if [[ -f "$scan_output" ]]; then
|
||||
while IFS= read -r item; do
|
||||
if [[ -n "$item" ]]; then
|
||||
all_found_items+=("$item")
|
||||
fi
|
||||
done < "$scan_output"
|
||||
rm -f "$scan_output"
|
||||
fi
|
||||
done
|
||||
# Clean up trap
|
||||
trap - INT TERM
|
||||
if [[ ${#all_found_items[@]} -eq 0 ]]; then
|
||||
echo ""
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No old project artifacts to clean"
|
||||
printf '\n'
|
||||
return 2 # Special code: nothing to clean
|
||||
fi
|
||||
# Mark recently modified items (for default selection state)
|
||||
for item in "${all_found_items[@]}"; do
|
||||
if is_recently_modified "$item"; then
|
||||
recently_modified+=("$item")
|
||||
fi
|
||||
# Add all items to safe_to_clean, let user choose
|
||||
safe_to_clean+=("$item")
|
||||
done
|
||||
# Build menu options - one per artifact
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Calculating sizes..."
|
||||
fi
|
||||
local -a menu_options=()
|
||||
local -a item_paths=()
|
||||
local -a item_sizes=()
|
||||
local -a item_recent_flags=()
|
||||
# Helper to get project name from path
|
||||
# For ~/www/pake/src-tauri/target -> returns "pake"
|
||||
# For ~/work/code/MyProject/node_modules -> returns "MyProject"
|
||||
# Strategy: Find the nearest ancestor directory containing a project indicator file
|
||||
get_project_name() {
|
||||
local path="$1"
|
||||
local artifact_name
|
||||
artifact_name=$(basename "$path")
|
||||
|
||||
# Start from the parent of the artifact and walk up
|
||||
local current_dir
|
||||
current_dir=$(dirname "$path")
|
||||
|
||||
while [[ "$current_dir" != "/" && "$current_dir" != "$HOME" && -n "$current_dir" ]]; do
|
||||
# Check if current directory contains any project indicator
|
||||
for indicator in "${PROJECT_INDICATORS[@]}"; do
|
||||
if [[ -e "$current_dir/$indicator" ]]; then
|
||||
# Found a project root, return its name
|
||||
basename "$current_dir"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
# Move up one level
|
||||
current_dir=$(dirname "$current_dir")
|
||||
done
|
||||
|
||||
# Fallback: try the old logic (first directory under search root)
|
||||
local search_roots=()
|
||||
if [[ ${#PURGE_SEARCH_PATHS[@]} -gt 0 ]]; then
|
||||
search_roots=("${PURGE_SEARCH_PATHS[@]}")
|
||||
else
|
||||
search_roots=("$HOME/www" "$HOME/dev" "$HOME/Projects")
|
||||
fi
|
||||
for root in "${search_roots[@]}"; do
|
||||
root="${root%/}"
|
||||
if [[ -n "$root" && "$path" == "$root/"* ]]; then
|
||||
local relative_path="${path#"$root"/}"
|
||||
echo "$relative_path" | cut -d'/' -f1
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
# Final fallback: use grandparent directory
|
||||
dirname "$(dirname "$path")" | xargs basename
|
||||
}
|
||||
# Format display with alignment (like app_selector)
|
||||
format_purge_display() {
|
||||
local project_name="$1"
|
||||
local artifact_type="$2"
|
||||
local size_str="$3"
|
||||
# Terminal width for alignment
|
||||
local terminal_width=$(tput cols 2> /dev/null || echo 80)
|
||||
local fixed_width=28 # Reserve for type and size
|
||||
local available_width=$((terminal_width - fixed_width))
|
||||
# Bounds: 24-35 chars for project name
|
||||
[[ $available_width -lt 24 ]] && available_width=24
|
||||
[[ $available_width -gt 35 ]] && available_width=35
|
||||
# Truncate project name if needed
|
||||
local truncated_name=$(truncate_by_display_width "$project_name" "$available_width")
|
||||
local current_width=$(get_display_width "$truncated_name")
|
||||
local char_count=${#truncated_name}
|
||||
local padding=$((available_width - current_width))
|
||||
local printf_width=$((char_count + padding))
|
||||
# Format: "project_name size | artifact_type"
|
||||
printf "%-*s %9s | %-13s" "$printf_width" "$truncated_name" "$size_str" "$artifact_type"
|
||||
}
|
||||
# Build menu options - one line per artifact
|
||||
for item in "${safe_to_clean[@]}"; do
|
||||
local project_name=$(get_project_name "$item")
|
||||
local artifact_type=$(basename "$item")
|
||||
local size_kb=$(get_dir_size_kb "$item")
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
# Check if recent
|
||||
local is_recent=false
|
||||
for recent_item in "${recently_modified[@]+"${recently_modified[@]}"}"; do
|
||||
if [[ "$item" == "$recent_item" ]]; then
|
||||
is_recent=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
menu_options+=("$(format_purge_display "$project_name" "$artifact_type" "$size_human")")
|
||||
item_paths+=("$item")
|
||||
item_sizes+=("$size_kb")
|
||||
item_recent_flags+=("$is_recent")
|
||||
done
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
# Set global vars for selector
|
||||
export PURGE_CATEGORY_SIZES=$(
|
||||
IFS=,
|
||||
echo "${item_sizes[*]}"
|
||||
)
|
||||
export PURGE_RECENT_CATEGORIES=$(
|
||||
IFS=,
|
||||
echo "${item_recent_flags[*]}"
|
||||
)
|
||||
# Interactive selection (only if terminal is available)
|
||||
PURGE_SELECTION_RESULT=""
|
||||
if [[ -t 0 ]]; then
|
||||
if ! select_purge_categories "${menu_options[@]}"; then
|
||||
unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
# Non-interactive: select all non-recent items
|
||||
for ((i = 0; i < ${#menu_options[@]}; i++)); do
|
||||
if [[ ${item_recent_flags[i]} != "true" ]]; then
|
||||
[[ -n "$PURGE_SELECTION_RESULT" ]] && PURGE_SELECTION_RESULT+=","
|
||||
PURGE_SELECTION_RESULT+="$i"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
if [[ -z "$PURGE_SELECTION_RESULT" ]]; then
|
||||
echo ""
|
||||
echo -e "${GRAY}No items selected${NC}"
|
||||
printf '\n'
|
||||
unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT
|
||||
return 0
|
||||
fi
|
||||
# Clean selected items
|
||||
echo ""
|
||||
IFS=',' read -r -a selected_indices <<< "$PURGE_SELECTION_RESULT"
|
||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||
local cleaned_count=0
|
||||
for idx in "${selected_indices[@]}"; do
|
||||
local item_path="${item_paths[idx]}"
|
||||
local artifact_type=$(basename "$item_path")
|
||||
local project_name=$(get_project_name "$item_path")
|
||||
local size_kb="${item_sizes[idx]}"
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
# Safety checks
|
||||
if [[ -z "$item_path" || "$item_path" == "/" || "$item_path" == "$HOME" || "$item_path" != "$HOME/"* ]]; then
|
||||
continue
|
||||
fi
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Cleaning $project_name/$artifact_type..."
|
||||
fi
|
||||
if [[ -e "$item_path" ]]; then
|
||||
safe_remove "$item_path" true
|
||||
if [[ ! -e "$item_path" ]]; then
|
||||
local current_total=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0")
|
||||
echo "$((current_total + size_kb))" > "$stats_dir/purge_stats"
|
||||
((cleaned_count++))
|
||||
fi
|
||||
fi
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $project_name - $artifact_type ${GREEN}($size_human)${NC}"
|
||||
fi
|
||||
done
|
||||
# Update count
|
||||
echo "$cleaned_count" > "$stats_dir/purge_count"
|
||||
unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT
|
||||
}
|
||||
@@ -1,49 +1,36 @@
|
||||
#!/bin/bash
|
||||
# System-Level Cleanup Module
|
||||
# Deep system cleanup (requires sudo) and Time Machine failed backups
|
||||
|
||||
# System-Level Cleanup Module (requires sudo).
|
||||
set -euo pipefail
|
||||
|
||||
# Deep system cleanup (requires sudo)
|
||||
# System caches, logs, and temp files.
|
||||
clean_deep_system() {
|
||||
# Clean old system caches
|
||||
safe_sudo_find_delete "/Library/Caches" "*.cache" "$MOLE_TEMP_FILE_AGE_DAYS" "f" || true
|
||||
safe_sudo_find_delete "/Library/Caches" "*.tmp" "$MOLE_TEMP_FILE_AGE_DAYS" "f" || true
|
||||
safe_sudo_find_delete "/Library/Caches" "*.log" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
|
||||
# Clean temp files - use real paths (macOS /tmp is symlink to /private/tmp)
|
||||
stop_section_spinner
|
||||
local cache_cleaned=0
|
||||
safe_sudo_find_delete "/Library/Caches" "*.cache" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true
|
||||
safe_sudo_find_delete "/Library/Caches" "*.tmp" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true
|
||||
safe_sudo_find_delete "/Library/Caches" "*.log" "$MOLE_LOG_AGE_DAYS" "f" && cache_cleaned=1 || true
|
||||
[[ $cache_cleaned -eq 1 ]] && log_success "System caches"
|
||||
local tmp_cleaned=0
|
||||
safe_sudo_find_delete "/private/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true
|
||||
safe_sudo_find_delete "/private/var/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true
|
||||
[[ $tmp_cleaned -eq 1 ]] && log_success "System temp files"
|
||||
|
||||
# Clean crash reports
|
||||
safe_sudo_find_delete "/Library/Logs/DiagnosticReports" "*" "$MOLE_CRASH_REPORT_AGE_DAYS" "f" || true
|
||||
log_success "System crash reports"
|
||||
|
||||
# Clean system logs - use real path (macOS /var is symlink to /private/var)
|
||||
safe_sudo_find_delete "/private/var/log" "*.log" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
safe_sudo_find_delete "/private/var/log" "*.gz" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
log_success "System logs"
|
||||
|
||||
# Clean Library Updates safely - skip if SIP is enabled to avoid error messages
|
||||
# SIP-protected files in /Library/Updates cannot be deleted even with sudo
|
||||
if [[ -d "/Library/Updates" && ! -L "/Library/Updates" ]]; then
|
||||
if is_sip_enabled; then
|
||||
# SIP is enabled, skip /Library/Updates entirely to avoid error messages
|
||||
# These files are system-protected and cannot be removed
|
||||
: # No-op, silently skip
|
||||
else
|
||||
# SIP is disabled, attempt cleanup with restricted flag check
|
||||
if ! is_sip_enabled; then
|
||||
local updates_cleaned=0
|
||||
while IFS= read -r -d '' item; do
|
||||
# Skip system-protected files (restricted flag)
|
||||
if [[ -z "$item" ]] || [[ ! "$item" =~ ^/Library/Updates/[^/]+$ ]]; then
|
||||
debug_log "Skipping malformed path: $item"
|
||||
continue
|
||||
fi
|
||||
local item_flags
|
||||
item_flags=$(command stat -f%Sf "$item" 2> /dev/null || echo "")
|
||||
item_flags=$($STAT_BSD -f%Sf "$item" 2> /dev/null || echo "")
|
||||
if [[ "$item_flags" == *"restricted"* ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if safe_sudo_remove "$item"; then
|
||||
((updates_cleaned++))
|
||||
fi
|
||||
@@ -51,21 +38,15 @@ clean_deep_system() {
|
||||
[[ $updates_cleaned -gt 0 ]] && log_success "System library updates"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean macOS Install Data (system upgrade leftovers)
|
||||
# Only remove if older than 30 days to ensure system stability
|
||||
if [[ -d "/macOS Install Data" ]]; then
|
||||
local mtime=$(get_file_mtime "/macOS Install Data")
|
||||
local age_days=$((($(date +%s) - mtime) / 86400))
|
||||
|
||||
debug_log "Found macOS Install Data (age: ${age_days} days)"
|
||||
|
||||
if [[ $age_days -ge 30 ]]; then
|
||||
local size_kb=$(get_path_size_kb "/macOS Install Data")
|
||||
if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
debug_log "Cleaning macOS Install Data: $size_human (${age_days} days old)"
|
||||
|
||||
if safe_sudo_remove "/macOS Install Data"; then
|
||||
log_success "macOS Install Data ($size_human)"
|
||||
fi
|
||||
@@ -74,172 +55,175 @@ clean_deep_system() {
|
||||
debug_log "Keeping macOS Install Data (only ${age_days} days old, needs 30+)"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean browser code signature caches
|
||||
# These are regenerated automatically when needed
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning system caches..."
|
||||
fi
|
||||
start_section_spinner "Scanning system caches..."
|
||||
local code_sign_cleaned=0
|
||||
local found_count=0
|
||||
local last_update_time=$(date +%s)
|
||||
local update_interval=2
|
||||
while IFS= read -r -d '' cache_dir; do
|
||||
debug_log "Found code sign cache: $cache_dir"
|
||||
if safe_remove "$cache_dir" true; then
|
||||
((code_sign_cleaned++))
|
||||
fi
|
||||
done < <(find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true)
|
||||
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
((found_count++))
|
||||
local current_time=$(date +%s)
|
||||
if [[ $((current_time - last_update_time)) -ge $update_interval ]]; then
|
||||
start_section_spinner "Scanning system caches... ($found_count found)"
|
||||
last_update_time=$current_time
|
||||
fi
|
||||
done < <(run_with_timeout 5 command find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true)
|
||||
stop_section_spinner
|
||||
[[ $code_sign_cleaned -gt 0 ]] && log_success "Browser code signature caches ($code_sign_cleaned items)"
|
||||
|
||||
# Clean system diagnostics logs
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
log_success "System diagnostic logs"
|
||||
|
||||
# Clean power logs
|
||||
safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
log_success "Power logs"
|
||||
safe_sudo_find_delete "/private/var/db/reportmemoryexception/MemoryLimitViolations" "*" "30" "f" || true
|
||||
log_success "Memory exception reports"
|
||||
start_section_spinner "Cleaning diagnostic trace logs..."
|
||||
local diag_logs_cleaned=0
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*.tracev3" "30" "f" && diag_logs_cleaned=1 || true
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*.tracev3" "30" "f" && diag_logs_cleaned=1 || true
|
||||
stop_section_spinner
|
||||
[[ $diag_logs_cleaned -eq 1 ]] && log_success "System diagnostic trace logs"
|
||||
}
|
||||
|
||||
# Clean Time Machine failed backups
|
||||
# Incomplete Time Machine backups.
|
||||
clean_time_machine_failed_backups() {
|
||||
local tm_cleaned=0
|
||||
|
||||
# Check if Time Machine is configured
|
||||
if command -v tmutil > /dev/null 2>&1; then
|
||||
if tmutil destinationinfo 2>&1 | grep -q "No destinations configured"; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No failed Time Machine backups found"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ! -d "/Volumes" ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No failed Time Machine backups found"
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Skip if backup is running
|
||||
if pgrep -x "backupd" > /dev/null 2>&1; then
|
||||
start_section_spinner "Checking Time Machine configuration..."
|
||||
local spinner_active=true
|
||||
local tm_info
|
||||
tm_info=$(run_with_timeout 2 tmutil destinationinfo 2>&1 || echo "failed")
|
||||
if [[ "$tm_info" == *"No destinations configured"* || "$tm_info" == "failed" ]]; then
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
stop_section_spinner
|
||||
fi
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
return 0
|
||||
fi
|
||||
if [[ ! -d "/Volumes" ]]; then
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
stop_section_spinner
|
||||
fi
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
return 0
|
||||
fi
|
||||
if tmutil status 2> /dev/null | grep -q "Running = 1"; then
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
stop_section_spinner
|
||||
fi
|
||||
echo -e " ${YELLOW}!${NC} Time Machine backup in progress, skipping cleanup"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
start_section_spinner "Checking backup volumes..."
|
||||
fi
|
||||
# Fast pre-scan for backup volumes to avoid slow tmutil checks.
|
||||
local -a backup_volumes=()
|
||||
for volume in /Volumes/*; do
|
||||
[[ -d "$volume" ]] || continue
|
||||
|
||||
# Skip system and network volumes
|
||||
[[ "$volume" == "/Volumes/MacintoshHD" || "$volume" == "/" ]] && continue
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning backup volumes..."
|
||||
fi
|
||||
|
||||
# Skip if volume is a symlink (security check)
|
||||
[[ -L "$volume" ]] && continue
|
||||
|
||||
# Check if this is a Time Machine destination
|
||||
if command -v tmutil > /dev/null 2>&1; then
|
||||
if ! tmutil destinationinfo 2> /dev/null | grep -q "$(basename "$volume")"; then
|
||||
continue
|
||||
fi
|
||||
if [[ -d "$volume/Backups.backupdb" ]] || [[ -d "$volume/.MobileBackups" ]]; then
|
||||
backup_volumes+=("$volume")
|
||||
fi
|
||||
|
||||
local fs_type=$(command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}')
|
||||
done
|
||||
if [[ ${#backup_volumes[@]} -eq 0 ]]; then
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
stop_section_spinner
|
||||
fi
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
return 0
|
||||
fi
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
start_section_spinner "Scanning backup volumes..."
|
||||
fi
|
||||
for volume in "${backup_volumes[@]}"; do
|
||||
local fs_type
|
||||
fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "unknown")
|
||||
case "$fs_type" in
|
||||
nfs | smbfs | afpfs | cifs | webdav) continue ;;
|
||||
nfs | smbfs | afpfs | cifs | webdav | unknown) continue ;;
|
||||
esac
|
||||
|
||||
# HFS+ style backups (Backups.backupdb)
|
||||
local backupdb_dir="$volume/Backups.backupdb"
|
||||
if [[ -d "$backupdb_dir" ]]; then
|
||||
while IFS= read -r inprogress_file; do
|
||||
[[ -d "$inprogress_file" ]] || continue
|
||||
|
||||
# Only delete old failed backups (safety window)
|
||||
# Only delete old incomplete backups (safety window).
|
||||
local file_mtime=$(get_file_mtime "$inprogress_file")
|
||||
local current_time=$(date +%s)
|
||||
local hours_old=$(((current_time - file_mtime) / 3600))
|
||||
|
||||
if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local size_kb=$(get_path_size_kb "$inprogress_file")
|
||||
[[ "$size_kb" -le 0 ]] && continue
|
||||
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
stop_section_spinner
|
||||
spinner_active=false
|
||||
fi
|
||||
local backup_name=$(basename "$inprogress_file")
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}→${NC} Failed backup: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete backup: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
((tm_cleaned++))
|
||||
note_activity
|
||||
continue
|
||||
fi
|
||||
|
||||
# Real deletion
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
echo -e " ${YELLOW}!${NC} tmutil not available, skipping: $backup_name"
|
||||
continue
|
||||
fi
|
||||
|
||||
if tmutil delete "$inprogress_file" 2> /dev/null; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Failed backup: $backup_name ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete backup: $backup_name ${GREEN}($size_human)${NC}"
|
||||
((tm_cleaned++))
|
||||
((files_cleaned++))
|
||||
((total_size_cleaned += size_kb))
|
||||
((total_items++))
|
||||
note_activity
|
||||
else
|
||||
echo -e " ${YELLOW}!${NC} Could not delete: $backup_name (try manually with sudo)"
|
||||
echo -e " ${YELLOW}!${NC} Could not delete: $backup_name · try manually with sudo"
|
||||
fi
|
||||
done < <(run_with_timeout 15 find "$backupdb_dir" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true)
|
||||
fi
|
||||
|
||||
# APFS style backups (.backupbundle or .sparsebundle)
|
||||
# APFS bundles.
|
||||
for bundle in "$volume"/*.backupbundle "$volume"/*.sparsebundle; do
|
||||
[[ -e "$bundle" ]] || continue
|
||||
[[ -d "$bundle" ]] || continue
|
||||
|
||||
# Check if bundle is mounted
|
||||
local bundle_name=$(basename "$bundle")
|
||||
local mounted_path=$(hdiutil info 2> /dev/null | grep -A 5 "image-path.*$bundle_name" | grep "/Volumes/" | awk '{print $1}' | head -1 || echo "")
|
||||
|
||||
if [[ -n "$mounted_path" && -d "$mounted_path" ]]; then
|
||||
while IFS= read -r inprogress_file; do
|
||||
[[ -d "$inprogress_file" ]] || continue
|
||||
|
||||
# Only delete old failed backups (safety window)
|
||||
local file_mtime=$(get_file_mtime "$inprogress_file")
|
||||
local current_time=$(date +%s)
|
||||
local hours_old=$(((current_time - file_mtime) / 3600))
|
||||
|
||||
if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local size_kb=$(get_path_size_kb "$inprogress_file")
|
||||
[[ "$size_kb" -le 0 ]] && continue
|
||||
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
stop_section_spinner
|
||||
spinner_active=false
|
||||
fi
|
||||
local backup_name=$(basename "$inprogress_file")
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}→${NC} Failed APFS backup in $bundle_name: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
((tm_cleaned++))
|
||||
note_activity
|
||||
continue
|
||||
fi
|
||||
|
||||
# Real deletion
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if tmutil delete "$inprogress_file" 2> /dev/null; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Failed APFS backup in $bundle_name: $backup_name ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${GREEN}($size_human)${NC}"
|
||||
((tm_cleaned++))
|
||||
((files_cleaned++))
|
||||
((total_size_cleaned += size_kb))
|
||||
@@ -251,61 +235,86 @@ clean_time_machine_failed_backups() {
|
||||
done < <(run_with_timeout 15 find "$mounted_path" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true)
|
||||
fi
|
||||
done
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
done
|
||||
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
stop_section_spinner
|
||||
fi
|
||||
if [[ $tm_cleaned -eq 0 ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No failed Time Machine backups found"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean local APFS snapshots (older than 24h)
|
||||
# Local APFS snapshots (keep the most recent).
|
||||
clean_local_snapshots() {
|
||||
# Check if tmutil is available
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking local snapshots..."
|
||||
fi
|
||||
|
||||
# Check for local snapshots
|
||||
start_section_spinner "Checking local snapshots..."
|
||||
local snapshot_list
|
||||
snapshot_list=$(tmutil listlocalsnapshots / 2> /dev/null)
|
||||
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
stop_section_spinner
|
||||
[[ -z "$snapshot_list" ]] && return 0
|
||||
|
||||
# Parse and clean snapshots
|
||||
local cleaned_count=0
|
||||
local total_cleaned_size=0 # Estimation not possible without thin
|
||||
|
||||
# Get current time
|
||||
local current_ts=$(date +%s)
|
||||
local one_day_ago=$((current_ts - 86400))
|
||||
|
||||
local newest_ts=0
|
||||
local newest_name=""
|
||||
local -a snapshots=()
|
||||
while IFS= read -r line; do
|
||||
# Format: com.apple.TimeMachine.2023-10-25-120000
|
||||
if [[ "$line" =~ com\.apple\.TimeMachine\.([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{6}) ]]; then
|
||||
local snap_name="${BASH_REMATCH[0]}"
|
||||
snapshots+=("$snap_name")
|
||||
local date_str="${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]} ${BASH_REMATCH[4]:0:2}:${BASH_REMATCH[4]:2:2}:${BASH_REMATCH[4]:4:2}"
|
||||
local snap_ts=$(date -j -f "%Y-%m-%d %H:%M:%S" "$date_str" "+%s" 2> /dev/null || echo "0")
|
||||
|
||||
# Skip if parsing failed
|
||||
[[ "$snap_ts" == "0" ]] && continue
|
||||
if [[ "$snap_ts" -gt "$newest_ts" ]]; then
|
||||
newest_ts="$snap_ts"
|
||||
newest_name="$snap_name"
|
||||
fi
|
||||
fi
|
||||
done <<< "$snapshot_list"
|
||||
|
||||
# If snapshot is older than 24 hours
|
||||
if [[ $snap_ts -lt $one_day_ago ]]; then
|
||||
local snap_name="${BASH_REMATCH[0]}"
|
||||
[[ ${#snapshots[@]} -eq 0 ]] && return 0
|
||||
[[ -z "$newest_name" ]] && return 0
|
||||
|
||||
local deletable_count=$((${#snapshots[@]} - 1))
|
||||
[[ $deletable_count -le 0 ]] && return 0
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if [[ ! -t 0 ]]; then
|
||||
echo -e " ${YELLOW}!${NC} ${#snapshots[@]} local snapshot(s) found, skipping non-interactive mode"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} ${GRAY}Tip: Snapshots may cause Disk Utility to show different 'Available' values${NC}"
|
||||
return 0
|
||||
fi
|
||||
echo -e " ${YELLOW}!${NC} Time Machine local snapshots found"
|
||||
echo -e " ${GRAY}macOS can recreate them if needed.${NC}"
|
||||
echo -e " ${GRAY}The most recent snapshot will be kept.${NC}"
|
||||
echo -ne " ${PURPLE}${ICON_ARROW}${NC} Remove all local snapshots except the most recent one? ${GREEN}Enter${NC} continue, ${GRAY}Space${NC} skip: "
|
||||
local choice
|
||||
if type read_key > /dev/null 2>&1; then
|
||||
choice=$(read_key)
|
||||
else
|
||||
IFS= read -r -s -n 1 choice || choice=""
|
||||
if [[ -z "$choice" || "$choice" == $'\n' || "$choice" == $'\r' ]]; then
|
||||
choice="ENTER"
|
||||
fi
|
||||
fi
|
||||
if [[ "$choice" == "ENTER" ]]; then
|
||||
printf "\r\033[K" # Clear the prompt line
|
||||
else
|
||||
echo -e " ${GRAY}Skipped${NC}"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
local snap_name
|
||||
for snap_name in "${snapshots[@]}"; do
|
||||
if [[ "$snap_name" =~ com\.apple\.TimeMachine\.([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{6}) ]]; then
|
||||
if [[ "${BASH_REMATCH[0]}" != "$newest_name" ]]; then
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}→${NC} Old local snapshot: $snap_name ${YELLOW}(dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Local snapshot: $snap_name ${YELLOW}dry-run${NC}"
|
||||
((cleaned_count++))
|
||||
note_activity
|
||||
else
|
||||
# Secure removal
|
||||
if safe_sudo tmutil deletelocalsnapshots "${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]}-${BASH_REMATCH[4]}" > /dev/null 2>&1; then
|
||||
if sudo tmutil deletelocalsnapshots "${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]}-${BASH_REMATCH[4]}" > /dev/null 2>&1; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed snapshot: $snap_name"
|
||||
((cleaned_count++))
|
||||
note_activity
|
||||
@@ -315,9 +324,8 @@ clean_local_snapshots() {
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done <<< "$snapshot_list"
|
||||
|
||||
done
|
||||
if [[ $cleaned_count -gt 0 && "$DRY_RUN" != "true" ]]; then
|
||||
log_success "Cleaned $cleaned_count old local snapshots"
|
||||
log_success "Cleaned $cleaned_count local snapshots, kept latest"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -1,160 +1,92 @@
|
||||
#!/bin/bash
|
||||
# User Data Cleanup Module
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Clean user essentials (caches, logs, trash, crash reports)
|
||||
clean_user_essentials() {
|
||||
start_section_spinner "Scanning caches..."
|
||||
safe_clean ~/Library/Caches/* "User app cache"
|
||||
stop_section_spinner
|
||||
safe_clean ~/Library/Logs/* "User app logs"
|
||||
safe_clean ~/.Trash/* "Trash"
|
||||
|
||||
# Empty trash on mounted volumes
|
||||
if [[ -d "/Volumes" && "$DRY_RUN" != "true" ]]; then
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning external volumes..."
|
||||
fi
|
||||
for volume in /Volumes/*; do
|
||||
[[ -d "$volume" && -d "$volume/.Trashes" && -w "$volume" ]] || continue
|
||||
|
||||
# Skip network volumes
|
||||
local fs_type=$(command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}')
|
||||
case "$fs_type" in
|
||||
nfs | smbfs | afpfs | cifs | webdav) continue ;;
|
||||
esac
|
||||
|
||||
# Verify volume is mounted and not a symlink
|
||||
mount | grep -q "on $volume " || continue
|
||||
[[ -L "$volume/.Trashes" ]] && continue
|
||||
|
||||
# Safely iterate and remove each item
|
||||
while IFS= read -r -d '' item; do
|
||||
safe_remove "$item" true || true
|
||||
done < <(command find "$volume/.Trashes" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
|
||||
done
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
fi
|
||||
|
||||
safe_clean ~/Library/DiagnosticReports/* "Diagnostic reports"
|
||||
safe_clean ~/Library/Caches/com.apple.QuickLook.thumbnailcache "QuickLook thumbnails"
|
||||
safe_clean ~/Library/Caches/Quick\ Look/* "QuickLook cache"
|
||||
safe_clean ~/Library/Caches/com.apple.iconservices* "Icon services cache"
|
||||
safe_clean ~/Library/Caches/CloudKit/* "CloudKit cache"
|
||||
|
||||
# Clean incomplete downloads
|
||||
safe_clean ~/Downloads/*.download "Incomplete downloads (Safari)"
|
||||
safe_clean ~/Downloads/*.crdownload "Incomplete downloads (Chrome)"
|
||||
safe_clean ~/Downloads/*.part "Incomplete downloads (partial)"
|
||||
|
||||
# Additional user-level caches
|
||||
safe_clean ~/Library/Autosave\ Information/* "Autosave information"
|
||||
safe_clean ~/Library/IdentityCaches/* "Identity caches"
|
||||
safe_clean ~/Library/Suggestions/* "Suggestions cache (Siri)"
|
||||
safe_clean ~/Library/Calendars/Calendar\ Cache "Calendar cache"
|
||||
safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache"
|
||||
}
|
||||
|
||||
# Clean Finder metadata (.DS_Store files)
|
||||
clean_finder_metadata() {
|
||||
if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then
|
||||
if is_path_whitelisted "$HOME/.Trash"; then
|
||||
note_activity
|
||||
echo -e " ${GRAY}${ICON_SUCCESS}${NC} Finder metadata (whitelisted)"
|
||||
echo -e " ${GREEN}${ICON_EMPTY}${NC} Trash · whitelist protected"
|
||||
else
|
||||
clean_ds_store_tree "$HOME" "Home directory (.DS_Store)"
|
||||
|
||||
if [[ -d "/Volumes" ]]; then
|
||||
for volume in /Volumes/*; do
|
||||
[[ -d "$volume" && -w "$volume" ]] || continue
|
||||
|
||||
local fs_type=""
|
||||
fs_type=$(command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}')
|
||||
case "$fs_type" in
|
||||
nfs | smbfs | afpfs | cifs | webdav) continue ;;
|
||||
esac
|
||||
|
||||
clean_ds_store_tree "$volume" "$(basename "$volume") volume (.DS_Store)"
|
||||
done
|
||||
fi
|
||||
safe_clean ~/.Trash/* "Trash"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean macOS system caches
|
||||
clean_macos_system_caches() {
|
||||
safe_clean ~/Library/Saved\ Application\ State/* "Saved application states"
|
||||
safe_clean ~/Library/Caches/com.apple.spotlight "Spotlight cache"
|
||||
# Remove old Google Chrome versions while keeping Current.
|
||||
clean_chrome_old_versions() {
|
||||
local -a app_paths=(
|
||||
"/Applications/Google Chrome.app"
|
||||
"$HOME/Applications/Google Chrome.app"
|
||||
)
|
||||
|
||||
# MOVED: Spotlight cache cleanup moved to optimize command
|
||||
|
||||
safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache"
|
||||
safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache"
|
||||
safe_clean ~/Library/Caches/com.apple.Safari/Webpage\ Previews/* "Safari webpage previews"
|
||||
safe_clean ~/Library/Application\ Support/CloudDocs/session/db/* "iCloud session cache"
|
||||
safe_clean ~/Library/Caches/com.apple.Safari/fsCachedData/* "Safari cached data"
|
||||
safe_clean ~/Library/Caches/com.apple.WebKit.WebContent/* "WebKit content cache"
|
||||
safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache"
|
||||
}
|
||||
|
||||
# Clean sandboxed app caches
|
||||
clean_sandboxed_app_caches() {
|
||||
safe_clean ~/Library/Containers/com.apple.wallpaper.agent/Data/Library/Caches/* "Wallpaper agent cache"
|
||||
safe_clean ~/Library/Containers/com.apple.mediaanalysisd/Data/Library/Caches/* "Media analysis cache"
|
||||
safe_clean ~/Library/Containers/com.apple.AppStore/Data/Library/Caches/* "App Store cache"
|
||||
safe_clean ~/Library/Containers/com.apple.configurator.xpc.InternetService/Data/tmp/* "Apple Configurator temp files"
|
||||
|
||||
# Clean sandboxed app caches - iterate quietly to avoid UI flashing
|
||||
local containers_dir="$HOME/Library/Containers"
|
||||
[[ ! -d "$containers_dir" ]] && return 0
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning sandboxed apps..."
|
||||
# Use -f to match Chrome Helper processes as well
|
||||
if pgrep -f "Google Chrome" > /dev/null 2>&1; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Google Chrome running · old versions cleanup skipped"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local total_size=0
|
||||
local cleaned_count=0
|
||||
local found_any=false
|
||||
local total_size=0
|
||||
local cleaned_any=false
|
||||
|
||||
for container_dir in "$containers_dir"/*; do
|
||||
[[ -d "$container_dir" ]] || continue
|
||||
for app_path in "${app_paths[@]}"; do
|
||||
[[ -d "$app_path" ]] || continue
|
||||
|
||||
# Extract bundle ID and check protection status early
|
||||
local bundle_id=$(basename "$container_dir")
|
||||
if should_protect_data "$bundle_id"; then
|
||||
local versions_dir="$app_path/Contents/Frameworks/Google Chrome Framework.framework/Versions"
|
||||
[[ -d "$versions_dir" ]] || continue
|
||||
|
||||
local current_link="$versions_dir/Current"
|
||||
[[ -L "$current_link" ]] || continue
|
||||
|
||||
local current_version
|
||||
current_version=$(readlink "$current_link" 2> /dev/null || true)
|
||||
current_version="${current_version##*/}"
|
||||
[[ -n "$current_version" ]] || continue
|
||||
|
||||
local -a old_versions=()
|
||||
local dir name
|
||||
for dir in "$versions_dir"/*; do
|
||||
[[ -d "$dir" ]] || continue
|
||||
name=$(basename "$dir")
|
||||
[[ "$name" == "Current" ]] && continue
|
||||
[[ "$name" == "$current_version" ]] && continue
|
||||
if is_path_whitelisted "$dir"; then
|
||||
continue
|
||||
fi
|
||||
old_versions+=("$dir")
|
||||
done
|
||||
|
||||
if [[ ${#old_versions[@]} -eq 0 ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local cache_dir="$container_dir/Data/Library/Caches"
|
||||
# Check if dir exists and has content
|
||||
if [[ -d "$cache_dir" ]]; then
|
||||
# Fast check if empty (avoid expensive size calc on empty dirs)
|
||||
if [[ -n "$(ls -A "$cache_dir" 2> /dev/null)" ]]; then
|
||||
# Get size
|
||||
local size=$(get_path_size_kb "$cache_dir")
|
||||
((total_size += size))
|
||||
found_any=true
|
||||
((cleaned_count++))
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
# Clean contents safely
|
||||
# We know this is a user cache path, so rm -rf is acceptable here
|
||||
# provided we keep the Cache directory itself
|
||||
for item in "${cache_dir:?}"/*; do
|
||||
safe_remove "$item" true || true
|
||||
done
|
||||
for dir in "${old_versions[@]}"; do
|
||||
local size_kb
|
||||
size_kb=$(get_path_size_kb "$dir" || echo 0)
|
||||
size_kb="${size_kb:-0}"
|
||||
total_size=$((total_size + size_kb))
|
||||
((cleaned_count++))
|
||||
cleaned_any=true
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if has_sudo_session; then
|
||||
safe_sudo_remove "$dir" > /dev/null 2>&1 || true
|
||||
else
|
||||
safe_remove "$dir" true > /dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
if [[ "$found_any" == "true" ]]; then
|
||||
local size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$cleaned_any" == "true" ]]; then
|
||||
local size_human
|
||||
size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}→${NC} Sandboxed app caches ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Chrome old versions ${YELLOW}(${cleaned_count} dirs, $size_human dry)${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Sandboxed app caches ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Chrome old versions ${GREEN}(${cleaned_count} dirs, $size_human)${NC}"
|
||||
fi
|
||||
# Update global counters
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += total_size))
|
||||
((total_items++))
|
||||
@@ -162,16 +94,305 @@ clean_sandboxed_app_caches() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean browser caches (Safari, Chrome, Edge, Firefox, etc.)
|
||||
clean_browsers() {
|
||||
safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache"
|
||||
# Remove old Microsoft Edge versions while keeping Current.
|
||||
clean_edge_old_versions() {
|
||||
local -a app_paths=(
|
||||
"/Applications/Microsoft Edge.app"
|
||||
"$HOME/Applications/Microsoft Edge.app"
|
||||
)
|
||||
|
||||
# Chrome/Chromium
|
||||
# Use -f to match Edge Helper processes as well
|
||||
if pgrep -f "Microsoft Edge" > /dev/null 2>&1; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Microsoft Edge running · old versions cleanup skipped"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local cleaned_count=0
|
||||
local total_size=0
|
||||
local cleaned_any=false
|
||||
|
||||
for app_path in "${app_paths[@]}"; do
|
||||
[[ -d "$app_path" ]] || continue
|
||||
|
||||
local versions_dir="$app_path/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
|
||||
[[ -d "$versions_dir" ]] || continue
|
||||
|
||||
local current_link="$versions_dir/Current"
|
||||
[[ -L "$current_link" ]] || continue
|
||||
|
||||
local current_version
|
||||
current_version=$(readlink "$current_link" 2> /dev/null || true)
|
||||
current_version="${current_version##*/}"
|
||||
[[ -n "$current_version" ]] || continue
|
||||
|
||||
local -a old_versions=()
|
||||
local dir name
|
||||
for dir in "$versions_dir"/*; do
|
||||
[[ -d "$dir" ]] || continue
|
||||
name=$(basename "$dir")
|
||||
[[ "$name" == "Current" ]] && continue
|
||||
[[ "$name" == "$current_version" ]] && continue
|
||||
if is_path_whitelisted "$dir"; then
|
||||
continue
|
||||
fi
|
||||
old_versions+=("$dir")
|
||||
done
|
||||
|
||||
if [[ ${#old_versions[@]} -eq 0 ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
for dir in "${old_versions[@]}"; do
|
||||
local size_kb
|
||||
size_kb=$(get_path_size_kb "$dir" || echo 0)
|
||||
size_kb="${size_kb:-0}"
|
||||
total_size=$((total_size + size_kb))
|
||||
((cleaned_count++))
|
||||
cleaned_any=true
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if has_sudo_session; then
|
||||
safe_sudo_remove "$dir" > /dev/null 2>&1 || true
|
||||
else
|
||||
safe_remove "$dir" true > /dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if [[ "$cleaned_any" == "true" ]]; then
|
||||
local size_human
|
||||
size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Edge old versions ${YELLOW}(${cleaned_count} dirs, $size_human dry)${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Edge old versions ${GREEN}(${cleaned_count} dirs, $size_human)${NC}"
|
||||
fi
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += total_size))
|
||||
((total_items++))
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
|
||||
scan_external_volumes() {
|
||||
[[ -d "/Volumes" ]] || return 0
|
||||
local -a candidate_volumes=()
|
||||
local -a network_volumes=()
|
||||
for volume in /Volumes/*; do
|
||||
[[ -d "$volume" && -w "$volume" && ! -L "$volume" ]] || continue
|
||||
[[ "$volume" == "/" || "$volume" == "/Volumes/Macintosh HD" ]] && continue
|
||||
local protocol=""
|
||||
protocol=$(run_with_timeout 1 command diskutil info "$volume" 2> /dev/null | grep -i "Protocol:" | awk '{print $2}' || echo "")
|
||||
case "$protocol" in
|
||||
SMB | NFS | AFP | CIFS | WebDAV)
|
||||
network_volumes+=("$volume")
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
local fs_type=""
|
||||
fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "")
|
||||
case "$fs_type" in
|
||||
nfs | smbfs | afpfs | cifs | webdav)
|
||||
network_volumes+=("$volume")
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
candidate_volumes+=("$volume")
|
||||
done
|
||||
local volume_count=${#candidate_volumes[@]}
|
||||
local network_count=${#network_volumes[@]}
|
||||
if [[ $volume_count -eq 0 ]]; then
|
||||
if [[ $network_count -gt 0 ]]; then
|
||||
echo -e " ${GRAY}${ICON_LIST}${NC} External volumes (${network_count} network volume(s) skipped)"
|
||||
note_activity
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
start_section_spinner "Scanning $volume_count external volume(s)..."
|
||||
for volume in "${candidate_volumes[@]}"; do
|
||||
[[ -d "$volume" && -r "$volume" ]] || continue
|
||||
local volume_trash="$volume/.Trashes"
|
||||
if [[ -d "$volume_trash" && "$DRY_RUN" != "true" ]] && ! is_path_whitelisted "$volume_trash"; then
|
||||
while IFS= read -r -d '' item; do
|
||||
safe_remove "$item" true || true
|
||||
done < <(command find "$volume_trash" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
|
||||
fi
|
||||
if [[ "$PROTECT_FINDER_METADATA" != "true" ]]; then
|
||||
clean_ds_store_tree "$volume" "$(basename "$volume") volume (.DS_Store)"
|
||||
fi
|
||||
done
|
||||
stop_section_spinner
|
||||
}
|
||||
# Finder metadata (.DS_Store).
|
||||
clean_finder_metadata() {
|
||||
stop_section_spinner
|
||||
if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then
|
||||
note_activity
|
||||
echo -e " ${GREEN}${ICON_EMPTY}${NC} Finder metadata · whitelist protected"
|
||||
return
|
||||
fi
|
||||
clean_ds_store_tree "$HOME" "Home directory (.DS_Store)"
|
||||
}
|
||||
# macOS system caches and user-level leftovers.
|
||||
clean_macos_system_caches() {
|
||||
stop_section_spinner
|
||||
# safe_clean already checks protected paths.
|
||||
safe_clean ~/Library/Saved\ Application\ State/* "Saved application states" || true
|
||||
safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache" || true
|
||||
safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache" || true
|
||||
safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache" || true
|
||||
safe_clean ~/Library/DiagnosticReports/* "Diagnostic reports" || true
|
||||
safe_clean ~/Library/Caches/com.apple.QuickLook.thumbnailcache "QuickLook thumbnails" || true
|
||||
safe_clean ~/Library/Caches/Quick\ Look/* "QuickLook cache" || true
|
||||
safe_clean ~/Library/Caches/com.apple.iconservices* "Icon services cache" || true
|
||||
safe_clean ~/Downloads/*.download "Safari incomplete downloads" || true
|
||||
safe_clean ~/Downloads/*.crdownload "Chrome incomplete downloads" || true
|
||||
safe_clean ~/Downloads/*.part "Partial incomplete downloads" || true
|
||||
safe_clean ~/Library/Autosave\ Information/* "Autosave information" || true
|
||||
safe_clean ~/Library/IdentityCaches/* "Identity caches" || true
|
||||
safe_clean ~/Library/Suggestions/* "Siri suggestions cache" || true
|
||||
safe_clean ~/Library/Calendars/Calendar\ Cache "Calendar cache" || true
|
||||
safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache" || true
|
||||
}
|
||||
clean_recent_items() {
|
||||
stop_section_spinner
|
||||
local shared_dir="$HOME/Library/Application Support/com.apple.sharedfilelist"
|
||||
local -a recent_lists=(
|
||||
"$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl2"
|
||||
"$shared_dir/com.apple.LSSharedFileList.RecentDocuments.sfl2"
|
||||
"$shared_dir/com.apple.LSSharedFileList.RecentServers.sfl2"
|
||||
"$shared_dir/com.apple.LSSharedFileList.RecentHosts.sfl2"
|
||||
"$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl"
|
||||
"$shared_dir/com.apple.LSSharedFileList.RecentDocuments.sfl"
|
||||
"$shared_dir/com.apple.LSSharedFileList.RecentServers.sfl"
|
||||
"$shared_dir/com.apple.LSSharedFileList.RecentHosts.sfl"
|
||||
)
|
||||
if [[ -d "$shared_dir" ]]; then
|
||||
for sfl_file in "${recent_lists[@]}"; do
|
||||
[[ -e "$sfl_file" ]] && safe_clean "$sfl_file" "Recent items list" || true
|
||||
done
|
||||
fi
|
||||
safe_clean ~/Library/Preferences/com.apple.recentitems.plist "Recent items preferences" || true
|
||||
}
|
||||
clean_mail_downloads() {
|
||||
stop_section_spinner
|
||||
local mail_age_days=${MOLE_MAIL_AGE_DAYS:-30}
|
||||
if ! [[ "$mail_age_days" =~ ^[0-9]+$ ]]; then
|
||||
mail_age_days=30
|
||||
fi
|
||||
local -a mail_dirs=(
|
||||
"$HOME/Library/Mail Downloads"
|
||||
"$HOME/Library/Containers/com.apple.mail/Data/Library/Mail Downloads"
|
||||
)
|
||||
local count=0
|
||||
local cleaned_kb=0
|
||||
for target_path in "${mail_dirs[@]}"; do
|
||||
if [[ -d "$target_path" ]]; then
|
||||
local dir_size_kb=0
|
||||
dir_size_kb=$(get_path_size_kb "$target_path")
|
||||
if ! [[ "$dir_size_kb" =~ ^[0-9]+$ ]]; then
|
||||
dir_size_kb=0
|
||||
fi
|
||||
local min_kb="${MOLE_MAIL_DOWNLOADS_MIN_KB:-5120}"
|
||||
if ! [[ "$min_kb" =~ ^[0-9]+$ ]]; then
|
||||
min_kb=5120
|
||||
fi
|
||||
if [[ "$dir_size_kb" -lt "$min_kb" ]]; then
|
||||
continue
|
||||
fi
|
||||
while IFS= read -r -d '' file_path; do
|
||||
if [[ -f "$file_path" ]]; then
|
||||
local file_size_kb=$(get_path_size_kb "$file_path")
|
||||
if safe_remove "$file_path" true; then
|
||||
((count++))
|
||||
((cleaned_kb += file_size_kb))
|
||||
fi
|
||||
fi
|
||||
done < <(command find "$target_path" -type f -mtime +"$mail_age_days" -print0 2> /dev/null || true)
|
||||
fi
|
||||
done
|
||||
if [[ $count -gt 0 ]]; then
|
||||
local cleaned_mb=$(echo "$cleaned_kb" | awk '{printf "%.1f", $1/1024}' || echo "0.0")
|
||||
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $count mail attachments (~${cleaned_mb}MB)"
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
# Sandboxed app caches.
|
||||
clean_sandboxed_app_caches() {
|
||||
stop_section_spinner
|
||||
safe_clean ~/Library/Containers/com.apple.wallpaper.agent/Data/Library/Caches/* "Wallpaper agent cache"
|
||||
safe_clean ~/Library/Containers/com.apple.mediaanalysisd/Data/Library/Caches/* "Media analysis cache"
|
||||
safe_clean ~/Library/Containers/com.apple.AppStore/Data/Library/Caches/* "App Store cache"
|
||||
safe_clean ~/Library/Containers/com.apple.configurator.xpc.InternetService/Data/tmp/* "Apple Configurator temp files"
|
||||
local containers_dir="$HOME/Library/Containers"
|
||||
[[ ! -d "$containers_dir" ]] && return 0
|
||||
start_section_spinner "Scanning sandboxed apps..."
|
||||
local total_size=0
|
||||
local cleaned_count=0
|
||||
local found_any=false
|
||||
# Use nullglob to avoid literal globs.
|
||||
local _ng_state
|
||||
_ng_state=$(shopt -p nullglob || true)
|
||||
shopt -s nullglob
|
||||
for container_dir in "$containers_dir"/*; do
|
||||
process_container_cache "$container_dir"
|
||||
done
|
||||
eval "$_ng_state"
|
||||
stop_section_spinner
|
||||
if [[ "$found_any" == "true" ]]; then
|
||||
local size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Sandboxed app caches ${YELLOW}($size_human dry)${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Sandboxed app caches ${GREEN}($size_human)${NC}"
|
||||
fi
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += total_size))
|
||||
((total_items++))
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
# Process a single container cache directory.
|
||||
process_container_cache() {
|
||||
local container_dir="$1"
|
||||
[[ -d "$container_dir" ]] || return 0
|
||||
local bundle_id=$(basename "$container_dir")
|
||||
if is_critical_system_component "$bundle_id"; then
|
||||
return 0
|
||||
fi
|
||||
if should_protect_data "$bundle_id" || should_protect_data "$(echo "$bundle_id" | tr '[:upper:]' '[:lower:]')"; then
|
||||
return 0
|
||||
fi
|
||||
local cache_dir="$container_dir/Data/Library/Caches"
|
||||
[[ -d "$cache_dir" ]] || return 0
|
||||
# Fast non-empty check.
|
||||
if find "$cache_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
|
||||
local size=$(get_path_size_kb "$cache_dir")
|
||||
((total_size += size))
|
||||
found_any=true
|
||||
((cleaned_count++))
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
# Clean contents safely with local nullglob.
|
||||
local _ng_state
|
||||
_ng_state=$(shopt -p nullglob || true)
|
||||
shopt -s nullglob
|
||||
for item in "$cache_dir"/*; do
|
||||
[[ -e "$item" ]] || continue
|
||||
safe_remove "$item" true || true
|
||||
done
|
||||
eval "$_ng_state"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
# Browser caches (Safari/Chrome/Edge/Firefox).
|
||||
clean_browsers() {
|
||||
stop_section_spinner
|
||||
safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache"
|
||||
# Chrome/Chromium.
|
||||
safe_clean ~/Library/Caches/Google/Chrome/* "Chrome cache"
|
||||
safe_clean ~/Library/Application\ Support/Google/Chrome/*/Application\ Cache/* "Chrome app cache"
|
||||
safe_clean ~/Library/Application\ Support/Google/Chrome/*/GPUCache/* "Chrome GPU cache"
|
||||
safe_clean ~/Library/Caches/Chromium/* "Chromium cache"
|
||||
|
||||
safe_clean ~/Library/Caches/com.microsoft.edgemac/* "Edge cache"
|
||||
safe_clean ~/Library/Caches/company.thebrowser.Browser/* "Arc cache"
|
||||
safe_clean ~/Library/Caches/company.thebrowser.dia/* "Dia cache"
|
||||
@@ -183,13 +404,12 @@ clean_browsers() {
|
||||
safe_clean ~/Library/Caches/com.kagi.kagimacOS/* "Orion cache"
|
||||
safe_clean ~/Library/Caches/zen/* "Zen cache"
|
||||
safe_clean ~/Library/Application\ Support/Firefox/Profiles/*/cache2/* "Firefox profile cache"
|
||||
|
||||
# DISABLED: Service Worker CacheStorage scanning (find can hang on large browser profiles)
|
||||
# Browser caches are already cleaned by the safe_clean calls above
|
||||
clean_chrome_old_versions
|
||||
clean_edge_old_versions
|
||||
}
|
||||
|
||||
# Clean cloud storage app caches
|
||||
# Cloud storage caches.
|
||||
clean_cloud_storage() {
|
||||
stop_section_spinner
|
||||
safe_clean ~/Library/Caches/com.dropbox.* "Dropbox cache"
|
||||
safe_clean ~/Library/Caches/com.getdropbox.dropbox "Dropbox cache"
|
||||
safe_clean ~/Library/Caches/com.google.GoogleDrive "Google Drive cache"
|
||||
@@ -198,9 +418,9 @@ clean_cloud_storage() {
|
||||
safe_clean ~/Library/Caches/com.box.desktop "Box cache"
|
||||
safe_clean ~/Library/Caches/com.microsoft.OneDrive "OneDrive cache"
|
||||
}
|
||||
|
||||
# Clean office application caches
|
||||
# Office app caches.
|
||||
clean_office_applications() {
|
||||
stop_section_spinner
|
||||
safe_clean ~/Library/Caches/com.microsoft.Word "Microsoft Word cache"
|
||||
safe_clean ~/Library/Caches/com.microsoft.Excel "Microsoft Excel cache"
|
||||
safe_clean ~/Library/Caches/com.microsoft.Powerpoint "Microsoft PowerPoint cache"
|
||||
@@ -210,117 +430,107 @@ clean_office_applications() {
|
||||
safe_clean ~/Library/Caches/org.mozilla.thunderbird/* "Thunderbird cache"
|
||||
safe_clean ~/Library/Caches/com.apple.mail/* "Apple Mail cache"
|
||||
}
|
||||
|
||||
# Clean virtualization tools
|
||||
# Virtualization caches.
|
||||
clean_virtualization_tools() {
|
||||
stop_section_spinner
|
||||
safe_clean ~/Library/Caches/com.vmware.fusion "VMware Fusion cache"
|
||||
safe_clean ~/Library/Caches/com.parallels.* "Parallels cache"
|
||||
safe_clean ~/VirtualBox\ VMs/.cache "VirtualBox cache"
|
||||
safe_clean ~/.vagrant.d/tmp/* "Vagrant temporary files"
|
||||
}
|
||||
|
||||
# Clean Application Support logs and caches
|
||||
# Application Support logs/caches.
|
||||
clean_application_support_logs() {
|
||||
stop_section_spinner
|
||||
if [[ ! -d "$HOME/Library/Application Support" ]] || ! ls "$HOME/Library/Application Support" > /dev/null 2>&1; then
|
||||
note_activity
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Application Support"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning Application Support..."
|
||||
fi
|
||||
|
||||
start_section_spinner "Scanning Application Support..."
|
||||
local total_size=0
|
||||
local cleaned_count=0
|
||||
local found_any=false
|
||||
|
||||
# Clean log directories and cache patterns
|
||||
# Enable nullglob for safe globbing.
|
||||
local _ng_state
|
||||
_ng_state=$(shopt -p nullglob || true)
|
||||
shopt -s nullglob
|
||||
for app_dir in ~/Library/Application\ Support/*; do
|
||||
[[ -d "$app_dir" ]] || continue
|
||||
|
||||
local app_name=$(basename "$app_dir")
|
||||
local app_name_lower=$(echo "$app_name" | tr '[:upper:]' '[:lower:]')
|
||||
local is_protected=false
|
||||
|
||||
if should_protect_data "$app_name"; then
|
||||
is_protected=true
|
||||
elif should_protect_data "$app_name_lower"; then
|
||||
is_protected=true
|
||||
fi
|
||||
|
||||
if [[ "$is_protected" == "true" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$app_name" =~ backgroundtaskmanagement || "$app_name" =~ loginitems ]]; then
|
||||
if is_critical_system_component "$app_name"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local -a start_candidates=("$app_dir/log" "$app_dir/logs" "$app_dir/activitylog" "$app_dir/Cache/Cache_Data" "$app_dir/Crashpad/completed")
|
||||
|
||||
for candidate in "${start_candidates[@]}"; do
|
||||
if [[ -d "$candidate" ]]; then
|
||||
if [[ -n "$(ls -A "$candidate" 2> /dev/null)" ]]; then
|
||||
if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
|
||||
local size=$(get_path_size_kb "$candidate")
|
||||
((total_size += size))
|
||||
((cleaned_count++))
|
||||
found_any=true
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
safe_remove "$candidate"/* true > /dev/null 2>&1 || true
|
||||
for item in "$candidate"/*; do
|
||||
[[ -e "$item" ]] || continue
|
||||
safe_remove "$item" true > /dev/null 2>&1 || true
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# Clean Group Containers logs
|
||||
# Group Containers logs (explicit allowlist).
|
||||
local known_group_containers=(
|
||||
"group.com.apple.contentdelivery"
|
||||
)
|
||||
|
||||
for container in "${known_group_containers[@]}"; do
|
||||
local container_path="$HOME/Library/Group Containers/$container"
|
||||
local -a gc_candidates=("$container_path/Logs" "$container_path/Library/Logs")
|
||||
|
||||
for candidate in "${gc_candidates[@]}"; do
|
||||
if [[ -d "$candidate" ]]; then
|
||||
if [[ -n "$(ls -A "$candidate" 2> /dev/null)" ]]; then
|
||||
if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
|
||||
local size=$(get_path_size_kb "$candidate")
|
||||
((total_size += size))
|
||||
((cleaned_count++))
|
||||
found_any=true
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
safe_remove "$candidate"/* true > /dev/null 2>&1 || true
|
||||
for item in "$candidate"/*; do
|
||||
[[ -e "$item" ]] || continue
|
||||
safe_remove "$item" true > /dev/null 2>&1 || true
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
eval "$_ng_state"
|
||||
stop_section_spinner
|
||||
if [[ "$found_any" == "true" ]]; then
|
||||
local size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}→${NC} Application Support logs/caches ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Application Support logs/caches ${YELLOW}($size_human dry)${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Application Support logs/caches ${GREEN}($size_human)${NC}"
|
||||
fi
|
||||
# Update global counters
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += total_size))
|
||||
((total_items++))
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
|
||||
# Check and show iOS device backup info
|
||||
# iOS device backup info.
|
||||
check_ios_device_backups() {
|
||||
local backup_dir="$HOME/Library/Application Support/MobileSync/Backup"
|
||||
# Simplified check without find to avoid hanging
|
||||
# Simplified check without find to avoid hanging.
|
||||
if [[ -d "$backup_dir" ]]; then
|
||||
local backup_kb=$(get_path_size_kb "$backup_dir")
|
||||
if [[ -n "${backup_kb:-}" && "$backup_kb" -gt 102400 ]]; then
|
||||
@@ -332,16 +542,16 @@ check_ios_device_backups() {
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Clean Apple Silicon specific caches
|
||||
# Env: IS_M_SERIES
|
||||
# Apple Silicon specific caches (IS_M_SERIES).
|
||||
clean_apple_silicon_caches() {
|
||||
if [[ "$IS_M_SERIES" != "true" ]]; then
|
||||
if [[ "${IS_M_SERIES:-false}" != "true" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
start_section "Apple Silicon updates"
|
||||
safe_clean /Library/Apple/usr/share/rosetta/rosetta_update_bundle "Rosetta 2 cache"
|
||||
safe_clean ~/Library/Caches/com.apple.rosetta.update "Rosetta 2 user cache"
|
||||
safe_clean ~/Library/Caches/com.apple.amp.mediasevicesd "Apple Silicon media service cache"
|
||||
end_section
|
||||
}
|
||||
|
||||
@@ -12,11 +12,9 @@ readonly MOLE_APP_PROTECTION_LOADED=1
|
||||
_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
[[ -z "${MOLE_BASE_LOADED:-}" ]] && source "$_MOLE_CORE_DIR/base.sh"
|
||||
|
||||
# ============================================================================
|
||||
# App Management Functions
|
||||
# ============================================================================
|
||||
# Application Management
|
||||
|
||||
# System critical components that should NEVER be uninstalled
|
||||
# Critical system components protected from uninstallation
|
||||
readonly SYSTEM_CRITICAL_BUNDLES=(
|
||||
"com.apple.*" # System essentials
|
||||
"loginwindow"
|
||||
@@ -24,6 +22,9 @@ readonly SYSTEM_CRITICAL_BUNDLES=(
|
||||
"systempreferences"
|
||||
"finder"
|
||||
"safari"
|
||||
"com.apple.Settings*"
|
||||
"com.apple.SystemSettings*"
|
||||
"com.apple.controlcenter*"
|
||||
"com.apple.backgroundtaskmanagement*"
|
||||
"com.apple.loginitems*"
|
||||
"com.apple.sharedfilelist*"
|
||||
@@ -65,11 +66,9 @@ readonly SYSTEM_CRITICAL_BUNDLES=(
|
||||
"com.apple.TextInputSwitcher"
|
||||
)
|
||||
|
||||
# Apps with important data/licenses - protect during cleanup but allow uninstall
|
||||
# Applications with sensitive data; protected during cleanup but removable
|
||||
readonly DATA_PROTECTED_BUNDLES=(
|
||||
# ============================================================================
|
||||
# System Utilities & Cleanup Tools
|
||||
# ============================================================================
|
||||
"com.nektony.*" # App Cleaner & Uninstaller
|
||||
"com.macpaw.*" # CleanMyMac, CleanMaster
|
||||
"com.freemacsoft.AppCleaner" # AppCleaner
|
||||
@@ -79,9 +78,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.grandperspectiv.*" # GrandPerspective
|
||||
"com.binaryfruit.*" # FusionCast
|
||||
|
||||
# ============================================================================
|
||||
# Password Managers & Security
|
||||
# ============================================================================
|
||||
"com.1password.*" # 1Password
|
||||
"com.agilebits.*" # 1Password legacy
|
||||
"com.lastpass.*" # LastPass
|
||||
@@ -92,9 +89,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.authy.*" # Authy
|
||||
"com.yubico.*" # YubiKey Manager
|
||||
|
||||
# ============================================================================
|
||||
# Development Tools - IDEs & Editors
|
||||
# ============================================================================
|
||||
"com.jetbrains.*" # JetBrains IDEs (IntelliJ, DataGrip, etc.)
|
||||
"JetBrains*" # JetBrains Application Support folders
|
||||
"com.microsoft.VSCode" # Visual Studio Code
|
||||
@@ -109,9 +104,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"abnerworks.Typora" # Typora (Markdown editor)
|
||||
"com.uranusjr.macdown" # MacDown
|
||||
|
||||
# ============================================================================
|
||||
# AI & LLM Tools
|
||||
# ============================================================================
|
||||
"com.todesktop.*" # Cursor (often uses generic todesktop ID)
|
||||
"Cursor" # Cursor App Support
|
||||
"com.anthropic.claude*" # Claude
|
||||
@@ -133,9 +126,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.quora.poe.electron" # Poe
|
||||
"chat.openai.com.*" # OpenAI web wrappers
|
||||
|
||||
# ============================================================================
|
||||
# Development Tools - Database Clients
|
||||
# ============================================================================
|
||||
"com.sequelpro.*" # Sequel Pro
|
||||
"com.sequel-ace.*" # Sequel Ace
|
||||
"com.tinyapp.*" # TablePlus
|
||||
@@ -148,9 +139,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.valentina-db.Valentina-Studio" # Valentina Studio
|
||||
"com.dbvis.DbVisualizer" # DbVisualizer
|
||||
|
||||
# ============================================================================
|
||||
# Development Tools - API & Network
|
||||
# ============================================================================
|
||||
"com.postmanlabs.mac" # Postman
|
||||
"com.konghq.insomnia" # Insomnia
|
||||
"com.CharlesProxy.*" # Charles Proxy
|
||||
@@ -161,16 +150,17 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.telerik.Fiddler" # Fiddler
|
||||
"com.usebruno.app" # Bruno (API client)
|
||||
|
||||
# Network Proxy & VPN Tools (protect all variants)
|
||||
# Network Proxy & VPN Tools (pattern-based protection)
|
||||
# Clash variants
|
||||
"*clash*" # All Clash variants (ClashX, ClashX Pro, Clash Verge, etc)
|
||||
"*Clash*" # Capitalized variants
|
||||
"*clash-verge*" # Explicit Clash Verge protection
|
||||
"*verge*" # Verge variants (lowercase)
|
||||
"*Verge*" # Verge variants (capitalized)
|
||||
"com.nssurge.surge-mac" # Surge
|
||||
"*surge*" # Surge variants
|
||||
"*Surge*" # Surge variants
|
||||
"mihomo*" # Mihomo Party and variants
|
||||
"*openvpn*" # OpenVPN Connect and variants
|
||||
"*OpenVPN*" # OpenVPN capitalized variants
|
||||
"net.openvpn.*" # OpenVPN bundle IDs
|
||||
|
||||
# Proxy Clients (Shadowsocks, V2Ray, etc)
|
||||
"*ShadowsocksX-NG*" # ShadowsocksX-NG
|
||||
@@ -204,11 +194,14 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"*windscribe*" # Windscribe
|
||||
"*mullvad*" # Mullvad
|
||||
"*privateinternetaccess*" # PIA
|
||||
"net.openvpn.*" # OpenVPN bundle IDs
|
||||
|
||||
# ============================================================================
|
||||
# Screensaver & Dynamic Wallpaper
|
||||
"*Aerial*" # Aerial screensaver (all case variants)
|
||||
"*aerial*" # Aerial lowercase
|
||||
"*Fliqlo*" # Fliqlo screensaver (all case variants)
|
||||
"*fliqlo*" # Fliqlo lowercase
|
||||
|
||||
# Development Tools - Git & Version Control
|
||||
# ============================================================================
|
||||
"com.github.GitHubDesktop" # GitHub Desktop
|
||||
"com.sublimemerge" # Sublime Merge
|
||||
"com.torusknot.SourceTreeNotMAS" # SourceTree
|
||||
@@ -218,9 +211,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.fork.Fork" # Fork
|
||||
"com.axosoft.gitkraken" # GitKraken
|
||||
|
||||
# ============================================================================
|
||||
# Development Tools - Terminal & Shell
|
||||
# ============================================================================
|
||||
"com.googlecode.iterm2" # iTerm2
|
||||
"net.kovidgoyal.kitty" # Kitty
|
||||
"io.alacritty" # Alacritty
|
||||
@@ -231,9 +222,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"dev.warp.Warp-Stable" # Warp
|
||||
"com.termius-dmg" # Termius (SSH client)
|
||||
|
||||
# ============================================================================
|
||||
# Development Tools - Docker & Virtualization
|
||||
# ============================================================================
|
||||
"com.docker.docker" # Docker Desktop
|
||||
"com.getutm.UTM" # UTM
|
||||
"com.vmware.fusion" # VMware Fusion
|
||||
@@ -242,9 +231,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.vagrant.*" # Vagrant
|
||||
"com.orbstack.OrbStack" # OrbStack
|
||||
|
||||
# ============================================================================
|
||||
# System Monitoring & Performance
|
||||
# ============================================================================
|
||||
"com.bjango.istatmenus*" # iStat Menus
|
||||
"eu.exelban.Stats" # Stats
|
||||
"com.monitorcontrol.*" # MonitorControl
|
||||
@@ -253,9 +240,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.activity-indicator.app" # Activity Indicator
|
||||
"net.cindori.sensei" # Sensei
|
||||
|
||||
# ============================================================================
|
||||
# Window Management & Productivity
|
||||
# ============================================================================
|
||||
"com.macitbetter.*" # BetterTouchTool, BetterSnapTool
|
||||
"com.hegenberg.*" # BetterTouchTool legacy
|
||||
"com.manytricks.*" # Moom, Witch, Name Mangler, Resolutionator
|
||||
@@ -273,9 +258,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.gaosun.eul" # eul (system monitor)
|
||||
"com.pointum.hazeover" # HazeOver
|
||||
|
||||
# ============================================================================
|
||||
# Launcher & Automation
|
||||
# ============================================================================
|
||||
"com.runningwithcrayons.Alfred" # Alfred
|
||||
"com.raycast.macos" # Raycast
|
||||
"com.blacktree.Quicksilver" # Quicksilver
|
||||
@@ -286,9 +269,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"org.pqrs.Karabiner-Elements" # Karabiner-Elements
|
||||
"com.apple.Automator" # Automator (system, but keep user workflows)
|
||||
|
||||
# ============================================================================
|
||||
# Note-Taking & Documentation
|
||||
# ============================================================================
|
||||
"com.bear-writer.*" # Bear
|
||||
"com.typora.*" # Typora
|
||||
"com.ulyssesapp.*" # Ulysses
|
||||
@@ -307,9 +288,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.reflect.ReflectApp" # Reflect
|
||||
"com.inkdrop.*" # Inkdrop
|
||||
|
||||
# ============================================================================
|
||||
# Design & Creative Tools
|
||||
# ============================================================================
|
||||
"com.adobe.*" # Adobe Creative Suite
|
||||
"com.bohemiancoding.*" # Sketch
|
||||
"com.figma.*" # Figma
|
||||
@@ -327,9 +306,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.autodesk.*" # Autodesk products
|
||||
"com.sketchup.*" # SketchUp
|
||||
|
||||
# ============================================================================
|
||||
# Communication & Collaboration
|
||||
# ============================================================================
|
||||
"com.tencent.xinWeChat" # WeChat (Chinese users)
|
||||
"com.tencent.qq" # QQ
|
||||
"com.alibaba.DingTalkMac" # DingTalk
|
||||
@@ -340,6 +317,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.microsoft.teams*" # Microsoft Teams
|
||||
"com.slack.Slack" # Slack
|
||||
"com.hnc.Discord" # Discord
|
||||
"app.legcord.Legcord" # Legcord
|
||||
"org.telegram.desktop" # Telegram
|
||||
"ru.keepcoder.Telegram" # Telegram legacy
|
||||
"net.whatsapp.WhatsApp" # WhatsApp
|
||||
@@ -351,9 +329,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.postbox-inc.postbox" # Postbox
|
||||
"com.tinyspeck.slackmacgap" # Slack legacy
|
||||
|
||||
# ============================================================================
|
||||
# Task Management & Productivity
|
||||
# ============================================================================
|
||||
"com.omnigroup.OmniFocus*" # OmniFocus
|
||||
"com.culturedcode.*" # Things
|
||||
"com.todoist.*" # Todoist
|
||||
@@ -368,9 +344,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.notion.id" # Notion (also note-taking)
|
||||
"com.linear.linear" # Linear
|
||||
|
||||
# ============================================================================
|
||||
# File Transfer & Sync
|
||||
# ============================================================================
|
||||
"com.panic.transmit*" # Transmit (FTP/SFTP)
|
||||
"com.binarynights.ForkLift*" # ForkLift
|
||||
"com.noodlesoft.Hazel" # Hazel
|
||||
@@ -379,9 +353,34 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.apple.Xcode.CloudDocuments" # Xcode Cloud Documents
|
||||
"com.synology.*" # Synology apps
|
||||
|
||||
# ============================================================================
|
||||
# Cloud Storage & Backup (Issue #204)
|
||||
"com.dropbox.*" # Dropbox
|
||||
"com.getdropbox.*" # Dropbox legacy
|
||||
"*dropbox*" # Dropbox helpers/updaters
|
||||
"ws.agile.*" # 1Password sync helpers
|
||||
"com.backblaze.*" # Backblaze
|
||||
"*backblaze*" # Backblaze helpers
|
||||
"com.box.desktop*" # Box
|
||||
"*box.desktop*" # Box helpers
|
||||
"com.microsoft.OneDrive*" # Microsoft OneDrive
|
||||
"com.microsoft.SyncReporter" # OneDrive sync reporter
|
||||
"*OneDrive*" # OneDrive helpers/updaters
|
||||
"com.google.GoogleDrive" # Google Drive
|
||||
"com.google.keystone*" # Google updaters (Drive, Chrome, etc.)
|
||||
"*GoogleDrive*" # Google Drive helpers
|
||||
"com.amazon.drive" # Amazon Drive
|
||||
"com.apple.bird" # iCloud Drive daemon
|
||||
"com.apple.CloudDocs*" # iCloud Documents
|
||||
"com.displaylink.*" # DisplayLink
|
||||
"com.fujitsu.pfu.ScanSnap*" # ScanSnap
|
||||
"com.citrix.*" # Citrix Workspace
|
||||
"org.xquartz.*" # XQuartz
|
||||
"us.zoom.updater*" # Zoom updaters
|
||||
"com.DigiDNA.iMazing*" # iMazing
|
||||
"com.shirtpocket.*" # SuperDuper backup
|
||||
"homebrew.mxcl.*" # Homebrew services
|
||||
|
||||
# Screenshot & Recording
|
||||
# ============================================================================
|
||||
"com.cleanshot.*" # CleanShot X
|
||||
"com.xnipapp.xnip" # Xnip
|
||||
"com.reincubate.camo" # Camo
|
||||
@@ -395,9 +394,7 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"com.linebreak.CloudApp" # CloudApp
|
||||
"com.droplr.droplr-mac" # Droplr
|
||||
|
||||
# ============================================================================
|
||||
# Media & Entertainment
|
||||
# ============================================================================
|
||||
"com.spotify.client" # Spotify
|
||||
"com.apple.Music" # Apple Music
|
||||
"com.apple.podcasts" # Apple Podcasts
|
||||
@@ -415,20 +412,36 @@ readonly DATA_PROTECTED_BUNDLES=(
|
||||
"tv.plex.player.desktop" # Plex
|
||||
"com.netease.163music" # NetEase Music
|
||||
|
||||
# ============================================================================
|
||||
# License Management & App Stores
|
||||
# ============================================================================
|
||||
"com.paddle.Paddle*" # Paddle (license management)
|
||||
"com.setapp.DesktopClient" # Setapp
|
||||
"com.devmate.*" # DevMate (license framework)
|
||||
"org.sparkle-project.Sparkle" # Sparkle (update framework)
|
||||
)
|
||||
|
||||
# Centralized check for critical system components (case-insensitive)
|
||||
is_critical_system_component() {
|
||||
local token="$1"
|
||||
[[ -z "$token" ]] && return 1
|
||||
|
||||
local lower
|
||||
lower=$(echo "$token" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
case "$lower" in
|
||||
*backgroundtaskmanagement* | *loginitems* | *systempreferences* | *systemsettings* | *settings* | *preferences* | *controlcenter* | *biometrickit* | *sfl* | *tcc*)
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Legacy function - preserved for backward compatibility
|
||||
# Use should_protect_from_uninstall() or should_protect_data() instead
|
||||
readonly PRESERVED_BUNDLE_PATTERNS=("${SYSTEM_CRITICAL_BUNDLES[@]}" "${DATA_PROTECTED_BUNDLES[@]}")
|
||||
|
||||
# Check whether a bundle ID matches a pattern (supports globs)
|
||||
# Check if bundle ID matches pattern (glob support)
|
||||
bundle_matches_pattern() {
|
||||
local bundle_id="$1"
|
||||
local pattern="$2"
|
||||
@@ -443,7 +456,7 @@ bundle_matches_pattern() {
|
||||
return 1
|
||||
}
|
||||
|
||||
# Check if app is a system component that should never be uninstalled
|
||||
# Check if application is a protected system component
|
||||
should_protect_from_uninstall() {
|
||||
local bundle_id="$1"
|
||||
for pattern in "${SYSTEM_CRITICAL_BUNDLES[@]}"; do
|
||||
@@ -454,7 +467,7 @@ should_protect_from_uninstall() {
|
||||
return 1
|
||||
}
|
||||
|
||||
# Check if app data should be protected during cleanup (but app can be uninstalled)
|
||||
# Check if application data should be protected during cleanup
|
||||
should_protect_data() {
|
||||
local bundle_id="$1"
|
||||
# Protect both system critical and data protected bundles during cleanup
|
||||
@@ -466,290 +479,363 @@ should_protect_data() {
|
||||
return 1
|
||||
}
|
||||
|
||||
# Find and list app-related files (consolidated from duplicates)
|
||||
# Check if a path is protected from deletion
|
||||
# Centralized logic to protect system settings, control center, and critical apps
|
||||
#
|
||||
# Args: $1 - path to check
|
||||
# Returns: 0 if protected, 1 if safe to delete
|
||||
should_protect_path() {
|
||||
local path="$1"
|
||||
[[ -z "$path" ]] && return 1
|
||||
|
||||
local path_lower
|
||||
path_lower=$(echo "$path" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# 1. Keyword-based matching for system components
|
||||
# Protect System Settings, Preferences, Control Center, and related XPC services
|
||||
# Also protect "Settings" (used in macOS Sequoia) and savedState files
|
||||
if [[ "$path_lower" =~ systemsettings || "$path_lower" =~ systempreferences || "$path_lower" =~ controlcenter ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Additional check for com.apple.Settings (macOS Sequoia System Settings)
|
||||
if [[ "$path_lower" =~ com\.apple\.settings ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Protect Notes cache (search index issues)
|
||||
if [[ "$path_lower" =~ com\.apple\.notes ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# 2. Protect caches critical for system UI rendering
|
||||
# These caches are essential for modern macOS (Sonoma/Sequoia) system UI rendering
|
||||
case "$path" in
|
||||
# System Settings and Control Center caches (CRITICAL - prevents blank panel bug)
|
||||
*com.apple.systempreferences.cache* | *com.apple.Settings.cache* | *com.apple.controlcenter.cache*)
|
||||
return 0
|
||||
;;
|
||||
# Finder and Dock (system essential)
|
||||
*com.apple.finder.cache* | *com.apple.dock.cache*)
|
||||
return 0
|
||||
;;
|
||||
# System XPC services and sandboxed containers
|
||||
*/Library/Containers/com.apple.Settings* | */Library/Containers/com.apple.SystemSettings* | */Library/Containers/com.apple.controlcenter*)
|
||||
return 0
|
||||
;;
|
||||
*/Library/Group\ Containers/com.apple.systempreferences* | */Library/Group\ Containers/com.apple.Settings*)
|
||||
return 0
|
||||
;;
|
||||
# Shared file lists for System Settings (macOS Sequoia) - Issue #136
|
||||
*/com.apple.sharedfilelist/*com.apple.Settings* | */com.apple.sharedfilelist/*com.apple.SystemSettings* | */com.apple.sharedfilelist/*systempreferences*)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# 3. Extract bundle ID from sandbox paths
|
||||
# Matches: .../Library/Containers/bundle.id/...
|
||||
# Matches: .../Library/Group Containers/group.id/...
|
||||
if [[ "$path" =~ /Library/Containers/([^/]+) ]] || [[ "$path" =~ /Library/Group\ Containers/([^/]+) ]]; then
|
||||
local bundle_id="${BASH_REMATCH[1]}"
|
||||
if should_protect_data "$bundle_id"; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# 4. Check for specific hardcoded critical patterns
|
||||
case "$path" in
|
||||
*com.apple.Settings* | *com.apple.SystemSettings* | *com.apple.controlcenter* | *com.apple.finder* | *com.apple.dock*)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# 5. Protect critical preference files and user data
|
||||
case "$path" in
|
||||
*/Library/Preferences/com.apple.dock.plist | */Library/Preferences/com.apple.finder.plist)
|
||||
return 0
|
||||
;;
|
||||
# Bluetooth and WiFi configurations
|
||||
*/ByHost/com.apple.bluetooth.* | */ByHost/com.apple.wifi.*)
|
||||
return 0
|
||||
;;
|
||||
# iCloud Drive - protect user's cloud synced data
|
||||
*/Library/Mobile\ Documents* | */Mobile\ Documents*)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# 6. Match full path against protected patterns
|
||||
# This catches things like /Users/tw93/Library/Caches/Claude when pattern is *Claude*
|
||||
for pattern in "${SYSTEM_CRITICAL_BUNDLES[@]}" "${DATA_PROTECTED_BUNDLES[@]}"; do
|
||||
if bundle_matches_pattern "$path" "$pattern"; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
# 7. Check if the filename itself matches any protected patterns
|
||||
local filename
|
||||
filename=$(basename "$path")
|
||||
if should_protect_data "$filename"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Check if a path is protected by whitelist patterns
|
||||
# Args: $1 - path to check
|
||||
# Returns: 0 if whitelisted, 1 if not
|
||||
is_path_whitelisted() {
|
||||
local target_path="$1"
|
||||
[[ -z "$target_path" ]] && return 1
|
||||
|
||||
# Normalize path (remove trailing slash)
|
||||
local normalized_target="${target_path%/}"
|
||||
|
||||
# Empty whitelist means nothing is protected
|
||||
[[ ${#WHITELIST_PATTERNS[@]} -eq 0 ]] && return 1
|
||||
|
||||
for pattern in "${WHITELIST_PATTERNS[@]}"; do
|
||||
# Pattern is already expanded/normalized in bin/clean.sh
|
||||
local check_pattern="${pattern%/}"
|
||||
|
||||
# Check for exact match or glob pattern match
|
||||
# shellcheck disable=SC2053
|
||||
if [[ "$normalized_target" == "$check_pattern" ]] ||
|
||||
[[ "$normalized_target" == $check_pattern ]]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Locate files associated with an application
|
||||
find_app_files() {
|
||||
local bundle_id="$1"
|
||||
local app_name="$2"
|
||||
local -a files_to_clean=()
|
||||
|
||||
# ============================================================================
|
||||
# User-level files (no sudo required)
|
||||
# ============================================================================
|
||||
# Normalize app name for matching
|
||||
local nospace_name="${app_name// /}"
|
||||
local underscore_name="${app_name// /_}"
|
||||
|
||||
# Application Support
|
||||
[[ -d ~/Library/Application\ Support/"$app_name" ]] && files_to_clean+=("$HOME/Library/Application Support/$app_name")
|
||||
[[ -d ~/Library/Application\ Support/"$bundle_id" ]] && files_to_clean+=("$HOME/Library/Application Support/$bundle_id")
|
||||
# Standard path patterns for user-level files
|
||||
local -a user_patterns=(
|
||||
"$HOME/Library/Application Support/$app_name"
|
||||
"$HOME/Library/Application Support/$bundle_id"
|
||||
"$HOME/Library/Caches/$bundle_id"
|
||||
"$HOME/Library/Caches/$app_name"
|
||||
"$HOME/Library/Logs/$app_name"
|
||||
"$HOME/Library/Logs/$bundle_id"
|
||||
"$HOME/Library/Application Support/CrashReporter/$app_name"
|
||||
"$HOME/Library/Saved Application State/$bundle_id.savedState"
|
||||
"$HOME/Library/Containers/$bundle_id"
|
||||
"$HOME/Library/WebKit/$bundle_id"
|
||||
"$HOME/Library/WebKit/com.apple.WebKit.WebContent/$bundle_id"
|
||||
"$HOME/Library/HTTPStorages/$bundle_id"
|
||||
"$HOME/Library/Cookies/$bundle_id.binarycookies"
|
||||
"$HOME/Library/LaunchAgents/$bundle_id.plist"
|
||||
"$HOME/Library/Application Scripts/$bundle_id"
|
||||
"$HOME/Library/Services/$app_name.workflow"
|
||||
"$HOME/Library/QuickLook/$app_name.qlgenerator"
|
||||
"$HOME/Library/Internet Plug-Ins/$app_name.plugin"
|
||||
"$HOME/Library/Audio/Plug-Ins/Components/$app_name.component"
|
||||
"$HOME/Library/Audio/Plug-Ins/VST/$app_name.vst"
|
||||
"$HOME/Library/Audio/Plug-Ins/VST3/$app_name.vst3"
|
||||
"$HOME/Library/Audio/Plug-Ins/Digidesign/$app_name.dpm"
|
||||
"$HOME/Library/PreferencePanes/$app_name.prefPane"
|
||||
"$HOME/Library/Screen Savers/$app_name.saver"
|
||||
"$HOME/Library/Frameworks/$app_name.framework"
|
||||
"$HOME/Library/Autosave Information/$bundle_id"
|
||||
"$HOME/Library/Contextual Menu Items/$app_name.plugin"
|
||||
"$HOME/Library/Spotlight/$app_name.mdimporter"
|
||||
"$HOME/Library/ColorPickers/$app_name.colorPicker"
|
||||
"$HOME/Library/Workflows/$app_name.workflow"
|
||||
"$HOME/.config/$app_name"
|
||||
"$HOME/.local/share/$app_name"
|
||||
"$HOME/.$app_name"
|
||||
"$HOME/.$app_name"rc
|
||||
)
|
||||
|
||||
# Sanitized App Name (remove spaces) - e.g. "Visual Studio Code" -> "VisualStudioCode"
|
||||
# Add sanitized name variants if unique enough
|
||||
if [[ ${#app_name} -gt 3 && "$app_name" =~ [[:space:]] ]]; then
|
||||
local nospace_name="${app_name// /}"
|
||||
[[ -d ~/Library/Application\ Support/"$nospace_name" ]] && files_to_clean+=("$HOME/Library/Application Support/$nospace_name")
|
||||
[[ -d ~/Library/Caches/"$nospace_name" ]] && files_to_clean+=("$HOME/Library/Caches/$nospace_name")
|
||||
[[ -d ~/Library/Logs/"$nospace_name" ]] && files_to_clean+=("$HOME/Library/Logs/$nospace_name")
|
||||
|
||||
local underscore_name="${app_name// /_}"
|
||||
[[ -d ~/Library/Application\ Support/"$underscore_name" ]] && files_to_clean+=("$HOME/Library/Application Support/$underscore_name")
|
||||
user_patterns+=(
|
||||
"$HOME/Library/Application Support/$nospace_name"
|
||||
"$HOME/Library/Caches/$nospace_name"
|
||||
"$HOME/Library/Logs/$nospace_name"
|
||||
"$HOME/Library/Application Support/$underscore_name"
|
||||
)
|
||||
fi
|
||||
|
||||
# Caches
|
||||
[[ -d ~/Library/Caches/"$bundle_id" ]] && files_to_clean+=("$HOME/Library/Caches/$bundle_id")
|
||||
[[ -d ~/Library/Caches/"$app_name" ]] && files_to_clean+=("$HOME/Library/Caches/$app_name")
|
||||
# Process standard patterns
|
||||
for p in "${user_patterns[@]}"; do
|
||||
local expanded_path="${p/#\~/$HOME}"
|
||||
# Skip if path doesn't exist
|
||||
[[ ! -e "$expanded_path" ]] && continue
|
||||
|
||||
# Preferences
|
||||
[[ -f ~/Library/Preferences/"$bundle_id".plist ]] && files_to_clean+=("$HOME/Library/Preferences/$bundle_id.plist")
|
||||
[[ -d ~/Library/Preferences/ByHost ]] && while IFS= read -r -d '' pref; do
|
||||
files_to_clean+=("$pref")
|
||||
done < <(find ~/Library/Preferences/ByHost \( -name "$bundle_id*.plist" \) -print0 2> /dev/null)
|
||||
# Safety check: Skip if path ends with a common directory name (indicates empty app_name/bundle_id)
|
||||
# This prevents deletion of entire Library subdirectories when bundle_id is empty
|
||||
case "$expanded_path" in
|
||||
*/Library/Application\ Support | */Library/Application\ Support/ | \
|
||||
*/Library/Caches | */Library/Caches/ | \
|
||||
*/Library/Logs | */Library/Logs/ | \
|
||||
*/Library/Containers | */Library/Containers/ | \
|
||||
*/Library/WebKit | */Library/WebKit/ | \
|
||||
*/Library/HTTPStorages | */Library/HTTPStorages/ | \
|
||||
*/Library/Application\ Scripts | */Library/Application\ Scripts/ | \
|
||||
*/Library/Autosave\ Information | */Library/Autosave\ Information/ | \
|
||||
*/Library/Group\ Containers | */Library/Group\ Containers/)
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
|
||||
# Logs
|
||||
[[ -d ~/Library/Logs/"$app_name" ]] && files_to_clean+=("$HOME/Library/Logs/$app_name")
|
||||
[[ -d ~/Library/Logs/"$bundle_id" ]] && files_to_clean+=("$HOME/Library/Logs/$bundle_id")
|
||||
# CrashReporter
|
||||
[[ -d ~/Library/Application\ Support/CrashReporter/"$app_name" ]] && files_to_clean+=("$HOME/Library/Application Support/CrashReporter/$app_name")
|
||||
files_to_clean+=("$expanded_path")
|
||||
done
|
||||
|
||||
# Saved Application State
|
||||
[[ -d ~/Library/Saved\ Application\ State/"$bundle_id".savedState ]] && files_to_clean+=("$HOME/Library/Saved Application State/$bundle_id.savedState")
|
||||
# Handle Preferences and ByHost variants (only if bundle_id is valid)
|
||||
if [[ -n "$bundle_id" && "$bundle_id" != "unknown" && ${#bundle_id} -gt 3 ]]; then
|
||||
[[ -f ~/Library/Preferences/"$bundle_id".plist ]] && files_to_clean+=("$HOME/Library/Preferences/$bundle_id.plist")
|
||||
[[ -d ~/Library/Preferences/ByHost ]] && while IFS= read -r -d '' pref; do
|
||||
files_to_clean+=("$pref")
|
||||
done < <(command find ~/Library/Preferences/ByHost -maxdepth 1 \( -name "$bundle_id*.plist" \) -print0 2> /dev/null)
|
||||
|
||||
# Containers (sandboxed apps)
|
||||
[[ -d ~/Library/Containers/"$bundle_id" ]] && files_to_clean+=("$HOME/Library/Containers/$bundle_id")
|
||||
# Group Containers (special handling)
|
||||
if [[ -d ~/Library/Group\ Containers ]]; then
|
||||
while IFS= read -r -d '' container; do
|
||||
files_to_clean+=("$container")
|
||||
done < <(command find ~/Library/Group\ Containers -maxdepth 1 \( -name "*$bundle_id*" \) -print0 2> /dev/null)
|
||||
fi
|
||||
fi
|
||||
|
||||
# Group Containers
|
||||
[[ -d ~/Library/Group\ Containers ]] && while IFS= read -r -d '' container; do
|
||||
files_to_clean+=("$container")
|
||||
done < <(find ~/Library/Group\ Containers -type d \( -name "*$bundle_id*" \) -print0 2> /dev/null)
|
||||
|
||||
# WebKit data
|
||||
[[ -d ~/Library/WebKit/"$bundle_id" ]] && files_to_clean+=("$HOME/Library/WebKit/$bundle_id")
|
||||
[[ -d ~/Library/WebKit/com.apple.WebKit.WebContent/"$bundle_id" ]] && files_to_clean+=("$HOME/Library/WebKit/com.apple.WebKit.WebContent/$bundle_id")
|
||||
|
||||
# HTTP Storage
|
||||
[[ -d ~/Library/HTTPStorages/"$bundle_id" ]] && files_to_clean+=("$HOME/Library/HTTPStorages/$bundle_id")
|
||||
|
||||
# Cookies
|
||||
[[ -f ~/Library/Cookies/"$bundle_id".binarycookies ]] && files_to_clean+=("$HOME/Library/Cookies/$bundle_id.binarycookies")
|
||||
|
||||
# Launch Agents (user-level)
|
||||
[[ -f ~/Library/LaunchAgents/"$bundle_id".plist ]] && files_to_clean+=("$HOME/Library/LaunchAgents/$bundle_id.plist")
|
||||
# Search for LaunchAgents by app name if unique enough
|
||||
if [[ ${#app_name} -gt 3 ]]; then
|
||||
# Launch Agents by name (special handling)
|
||||
if [[ ${#app_name} -gt 3 ]] && [[ -d ~/Library/LaunchAgents ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
files_to_clean+=("$plist")
|
||||
done < <(find ~/Library/LaunchAgents -name "*$app_name*.plist" -print0 2> /dev/null)
|
||||
done < <(command find ~/Library/LaunchAgents -maxdepth 1 \( -name "*$app_name*.plist" \) -print0 2> /dev/null)
|
||||
fi
|
||||
|
||||
# Application Scripts
|
||||
[[ -d ~/Library/Application\ Scripts/"$bundle_id" ]] && files_to_clean+=("$HOME/Library/Application Scripts/$bundle_id")
|
||||
|
||||
# Services
|
||||
[[ -d ~/Library/Services/"$app_name".workflow ]] && files_to_clean+=("$HOME/Library/Services/$app_name.workflow")
|
||||
|
||||
# QuickLook Plugins
|
||||
[[ -d ~/Library/QuickLook/"$app_name".qlgenerator ]] && files_to_clean+=("$HOME/Library/QuickLook/$app_name.qlgenerator")
|
||||
|
||||
# Internet Plug-Ins
|
||||
[[ -d ~/Library/Internet\ Plug-Ins/"$app_name".plugin ]] && files_to_clean+=("$HOME/Library/Internet Plug-Ins/$app_name.plugin")
|
||||
|
||||
# Audio Plug-Ins (Components, VST, VST3)
|
||||
[[ -d ~/Library/Audio/Plug-Ins/Components/"$app_name".component ]] && files_to_clean+=("$HOME/Library/Audio/Plug-Ins/Components/$app_name.component")
|
||||
[[ -d ~/Library/Audio/Plug-Ins/VST/"$app_name".vst ]] && files_to_clean+=("$HOME/Library/Audio/Plug-Ins/VST/$app_name.vst")
|
||||
[[ -d ~/Library/Audio/Plug-Ins/VST3/"$app_name".vst3 ]] && files_to_clean+=("$HOME/Library/Audio/Plug-Ins/VST3/$app_name.vst3")
|
||||
[[ -d ~/Library/Audio/Plug-Ins/Digidesign/"$app_name".dpm ]] && files_to_clean+=("$HOME/Library/Audio/Plug-Ins/Digidesign/$app_name.dpm")
|
||||
|
||||
# Preference Panes
|
||||
[[ -d ~/Library/PreferencePanes/"$app_name".prefPane ]] && files_to_clean+=("$HOME/Library/PreferencePanes/$app_name.prefPane")
|
||||
|
||||
# Screen Savers
|
||||
[[ -d ~/Library/Screen\ Savers/"$app_name".saver ]] && files_to_clean+=("$HOME/Library/Screen Savers/$app_name.saver")
|
||||
|
||||
# Frameworks
|
||||
[[ -d ~/Library/Frameworks/"$app_name".framework ]] && files_to_clean+=("$HOME/Library/Frameworks/$app_name.framework")
|
||||
|
||||
# Autosave Information
|
||||
[[ -d ~/Library/Autosave\ Information/"$bundle_id" ]] && files_to_clean+=("$HOME/Library/Autosave Information/$bundle_id")
|
||||
|
||||
# Contextual Menu Items
|
||||
[[ -d ~/Library/Contextual\ Menu\ Items/"$app_name".plugin ]] && files_to_clean+=("$HOME/Library/Contextual Menu Items/$app_name.plugin")
|
||||
|
||||
# Spotlight Plugins
|
||||
[[ -d ~/Library/Spotlight/"$app_name".mdimporter ]] && files_to_clean+=("$HOME/Library/Spotlight/$app_name.mdimporter")
|
||||
|
||||
# Color Pickers
|
||||
[[ -d ~/Library/ColorPickers/"$app_name".colorPicker ]] && files_to_clean+=("$HOME/Library/ColorPickers/$app_name.colorPicker")
|
||||
|
||||
# Workflows
|
||||
[[ -d ~/Library/Workflows/"$app_name".workflow ]] && files_to_clean+=("$HOME/Library/Workflows/$app_name.workflow")
|
||||
|
||||
# Unix-style configuration directories and files (cross-platform apps)
|
||||
[[ -d ~/.config/"$app_name" ]] && files_to_clean+=("$HOME/.config/$app_name")
|
||||
[[ -d ~/.local/share/"$app_name" ]] && files_to_clean+=("$HOME/.local/share/$app_name")
|
||||
[[ -d ~/."$app_name" ]] && files_to_clean+=("$HOME/.$app_name")
|
||||
[[ -f ~/."${app_name}rc" ]] && files_to_clean+=("$HOME/.${app_name}rc")
|
||||
|
||||
# ============================================================================
|
||||
# IDE-specific SDK and Toolchain directories
|
||||
# ============================================================================
|
||||
|
||||
# DevEco-Studio (HarmonyOS/OpenHarmony IDE by Huawei)
|
||||
# Handle specialized toolchains and development environments
|
||||
# 1. DevEco-Studio (Huawei)
|
||||
if [[ "$app_name" =~ DevEco|deveco ]] || [[ "$bundle_id" =~ huawei.*deveco ]]; then
|
||||
[[ -d ~/DevEcoStudioProjects ]] && files_to_clean+=("$HOME/DevEcoStudioProjects")
|
||||
[[ -d ~/DevEco-Studio ]] && files_to_clean+=("$HOME/DevEco-Studio")
|
||||
[[ -d ~/Library/Application\ Support/Huawei ]] && files_to_clean+=("$HOME/Library/Application Support/Huawei")
|
||||
[[ -d ~/Library/Caches/Huawei ]] && files_to_clean+=("$HOME/Library/Caches/Huawei")
|
||||
[[ -d ~/Library/Logs/Huawei ]] && files_to_clean+=("$HOME/Library/Logs/Huawei")
|
||||
[[ -d ~/Library/Huawei ]] && files_to_clean+=("$HOME/Library/Huawei")
|
||||
[[ -d ~/Huawei ]] && files_to_clean+=("$HOME/Huawei")
|
||||
[[ -d ~/HarmonyOS ]] && files_to_clean+=("$HOME/HarmonyOS")
|
||||
[[ -d ~/.huawei ]] && files_to_clean+=("$HOME/.huawei")
|
||||
[[ -d ~/.ohos ]] && files_to_clean+=("$HOME/.ohos")
|
||||
for d in ~/DevEcoStudioProjects ~/DevEco-Studio ~/Library/Application\ Support/Huawei ~/Library/Caches/Huawei ~/Library/Logs/Huawei ~/Library/Huawei ~/Huawei ~/HarmonyOS ~/.huawei ~/.ohos; do
|
||||
[[ -d "$d" ]] && files_to_clean+=("$d")
|
||||
done
|
||||
fi
|
||||
|
||||
# Android Studio
|
||||
# 2. Android Studio (Google)
|
||||
if [[ "$app_name" =~ Android.*Studio|android.*studio ]] || [[ "$bundle_id" =~ google.*android.*studio|jetbrains.*android ]]; then
|
||||
[[ -d ~/AndroidStudioProjects ]] && files_to_clean+=("$HOME/AndroidStudioProjects")
|
||||
[[ -d ~/Library/Android ]] && files_to_clean+=("$HOME/Library/Android")
|
||||
[[ -d ~/.android ]] && files_to_clean+=("$HOME/.android")
|
||||
[[ -d ~/.gradle ]] && files_to_clean+=("$HOME/.gradle")
|
||||
[[ -d ~/Library/Application\ Support/Google ]] &&
|
||||
while IFS= read -r -d '' dir; do files_to_clean+=("$dir"); done < <(find ~/Library/Application\ Support/Google -maxdepth 1 -name "AndroidStudio*" -print0 2> /dev/null)
|
||||
for d in ~/AndroidStudioProjects ~/Library/Android ~/.android ~/.gradle; do
|
||||
[[ -d "$d" ]] && files_to_clean+=("$d")
|
||||
done
|
||||
[[ -d ~/Library/Application\ Support/Google ]] && while IFS= read -r -d '' d; do files_to_clean+=("$d"); done < <(command find ~/Library/Application\ Support/Google -maxdepth 1 -name "AndroidStudio*" -print0 2> /dev/null)
|
||||
fi
|
||||
|
||||
# Xcode
|
||||
# 3. Xcode (Apple)
|
||||
if [[ "$app_name" =~ Xcode|xcode ]] || [[ "$bundle_id" =~ apple.*xcode ]]; then
|
||||
[[ -d ~/Library/Developer ]] && files_to_clean+=("$HOME/Library/Developer")
|
||||
[[ -d ~/.Xcode ]] && files_to_clean+=("$HOME/.Xcode")
|
||||
fi
|
||||
|
||||
# IntelliJ IDEA, PyCharm, WebStorm, etc. (JetBrains IDEs)
|
||||
# 4. JetBrains (IDE settings)
|
||||
if [[ "$bundle_id" =~ jetbrains ]] || [[ "$app_name" =~ IntelliJ|PyCharm|WebStorm|GoLand|RubyMine|PhpStorm|CLion|DataGrip|Rider ]]; then
|
||||
local ide_name="$app_name"
|
||||
[[ -d ~/Library/Application\ Support/JetBrains ]] &&
|
||||
while IFS= read -r -d '' dir; do files_to_clean+=("$dir"); done < <(find ~/Library/Application\ Support/JetBrains -maxdepth 1 -name "${ide_name}*" -print0 2> /dev/null)
|
||||
[[ -d ~/Library/Caches/JetBrains ]] &&
|
||||
while IFS= read -r -d '' dir; do files_to_clean+=("$dir"); done < <(find ~/Library/Caches/JetBrains -maxdepth 1 -name "${ide_name}*" -print0 2> /dev/null)
|
||||
[[ -d ~/Library/Logs/JetBrains ]] &&
|
||||
while IFS= read -r -d '' dir; do files_to_clean+=("$dir"); done < <(find ~/Library/Logs/JetBrains -maxdepth 1 -name "${ide_name}*" -print0 2> /dev/null)
|
||||
for base in ~/Library/Application\ Support/JetBrains ~/Library/Caches/JetBrains ~/Library/Logs/JetBrains; do
|
||||
[[ -d "$base" ]] && while IFS= read -r -d '' d; do files_to_clean+=("$d"); done < <(command find "$base" -maxdepth 1 -name "${app_name}*" -print0 2> /dev/null)
|
||||
done
|
||||
fi
|
||||
|
||||
# Unity
|
||||
if [[ "$app_name" =~ Unity|unity ]] || [[ "$bundle_id" =~ unity ]]; then
|
||||
[[ -d ~/.local/share/unity3d ]] && files_to_clean+=("$HOME/.local/share/unity3d")
|
||||
[[ -d ~/Library/Unity ]] && files_to_clean+=("$HOME/Library/Unity")
|
||||
fi
|
||||
# 5. Unity / Unreal / Godot
|
||||
[[ "$app_name" =~ Unity|unity ]] && [[ -d ~/Library/Unity ]] && files_to_clean+=("$HOME/Library/Unity")
|
||||
[[ "$app_name" =~ Unreal|unreal ]] && [[ -d ~/Library/Application\ Support/Epic ]] && files_to_clean+=("$HOME/Library/Application Support/Epic")
|
||||
[[ "$app_name" =~ Godot|godot ]] && [[ -d ~/Library/Application\ Support/Godot ]] && files_to_clean+=("$HOME/Library/Application Support/Godot")
|
||||
|
||||
# Unreal Engine
|
||||
if [[ "$app_name" =~ Unreal|unreal ]] || [[ "$bundle_id" =~ unrealengine|epicgames ]]; then
|
||||
[[ -d ~/Library/Application\ Support/Epic ]] && files_to_clean+=("$HOME/Library/Application Support/Epic")
|
||||
[[ -d ~/Documents/Unreal\ Projects ]] && files_to_clean+=("$HOME/Documents/Unreal Projects")
|
||||
fi
|
||||
# 6. Tools
|
||||
[[ "$bundle_id" =~ microsoft.*vscode ]] && [[ -d ~/.vscode ]] && files_to_clean+=("$HOME/.vscode")
|
||||
[[ "$app_name" =~ Docker ]] && [[ -d ~/.docker ]] && files_to_clean+=("$HOME/.docker")
|
||||
|
||||
# Visual Studio Code
|
||||
if [[ "$bundle_id" =~ microsoft.*vscode|visualstudio.*code ]]; then
|
||||
[[ -d ~/.vscode ]] && files_to_clean+=("$HOME/.vscode")
|
||||
[[ -d ~/.vscode-insiders ]] && files_to_clean+=("$HOME/.vscode-insiders")
|
||||
fi
|
||||
|
||||
# Flutter
|
||||
if [[ "$app_name" =~ Flutter|flutter ]] || [[ "$bundle_id" =~ flutter ]]; then
|
||||
[[ -d ~/.pub-cache ]] && files_to_clean+=("$HOME/.pub-cache")
|
||||
[[ -d ~/flutter ]] && files_to_clean+=("$HOME/flutter")
|
||||
fi
|
||||
|
||||
# Godot
|
||||
if [[ "$app_name" =~ Godot|godot ]] || [[ "$bundle_id" =~ godot ]]; then
|
||||
[[ -d ~/.local/share/godot ]] && files_to_clean+=("$HOME/.local/share/godot")
|
||||
[[ -d ~/Library/Application\ Support/Godot ]] && files_to_clean+=("$HOME/Library/Application Support/Godot")
|
||||
fi
|
||||
|
||||
# Docker Desktop
|
||||
if [[ "$app_name" =~ Docker ]] || [[ "$bundle_id" =~ docker ]]; then
|
||||
[[ -d ~/.docker ]] && files_to_clean+=("$HOME/.docker")
|
||||
fi
|
||||
|
||||
# Only print if array has elements to avoid unbound variable error
|
||||
# Output results
|
||||
if [[ ${#files_to_clean[@]} -gt 0 ]]; then
|
||||
printf '%s\n' "${files_to_clean[@]}"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Find system-level app files (requires sudo)
|
||||
# Locate system-level application files
|
||||
find_app_system_files() {
|
||||
local bundle_id="$1"
|
||||
local app_name="$2"
|
||||
local -a system_files=()
|
||||
|
||||
# System Application Support
|
||||
[[ -d /Library/Application\ Support/"$app_name" ]] && system_files+=("/Library/Application Support/$app_name")
|
||||
[[ -d /Library/Application\ Support/"$bundle_id" ]] && system_files+=("/Library/Application Support/$bundle_id")
|
||||
|
||||
# Sanitized App Name (remove spaces)
|
||||
local nospace_name="${app_name// /}"
|
||||
|
||||
# Standard system path patterns
|
||||
local -a system_patterns=(
|
||||
"/Library/Application Support/$app_name"
|
||||
"/Library/Application Support/$bundle_id"
|
||||
"/Library/LaunchAgents/$bundle_id.plist"
|
||||
"/Library/LaunchDaemons/$bundle_id.plist"
|
||||
"/Library/Preferences/$bundle_id.plist"
|
||||
"/Library/Receipts/$bundle_id.bom"
|
||||
"/Library/Receipts/$bundle_id.plist"
|
||||
"/Library/Frameworks/$app_name.framework"
|
||||
"/Library/Internet Plug-Ins/$app_name.plugin"
|
||||
"/Library/Audio/Plug-Ins/Components/$app_name.component"
|
||||
"/Library/Audio/Plug-Ins/VST/$app_name.vst"
|
||||
"/Library/Audio/Plug-Ins/VST3/$app_name.vst3"
|
||||
"/Library/Audio/Plug-Ins/Digidesign/$app_name.dpm"
|
||||
"/Library/QuickLook/$app_name.qlgenerator"
|
||||
"/Library/PreferencePanes/$app_name.prefPane"
|
||||
"/Library/Screen Savers/$app_name.saver"
|
||||
"/Library/Caches/$bundle_id"
|
||||
"/Library/Caches/$app_name"
|
||||
)
|
||||
|
||||
if [[ ${#app_name} -gt 3 && "$app_name" =~ [[:space:]] ]]; then
|
||||
local nospace_name="${app_name// /}"
|
||||
[[ -d /Library/Application\ Support/"$nospace_name" ]] && system_files+=("/Library/Application Support/$nospace_name")
|
||||
[[ -d /Library/Caches/"$nospace_name" ]] && system_files+=("/Library/Caches/$nospace_name")
|
||||
[[ -d /Library/Logs/"$nospace_name" ]] && system_files+=("/Library/Logs/$nospace_name")
|
||||
system_patterns+=(
|
||||
"/Library/Application Support/$nospace_name"
|
||||
"/Library/Caches/$nospace_name"
|
||||
"/Library/Logs/$nospace_name"
|
||||
)
|
||||
fi
|
||||
|
||||
# System Launch Agents
|
||||
[[ -f /Library/LaunchAgents/"$bundle_id".plist ]] && system_files+=("/Library/LaunchAgents/$bundle_id.plist")
|
||||
# Search for LaunchAgents by app name if unique enough
|
||||
# Process patterns
|
||||
for p in "${system_patterns[@]}"; do
|
||||
[[ ! -e "$p" ]] && continue
|
||||
|
||||
# Safety check: Skip if path ends with a common directory name (indicates empty app_name/bundle_id)
|
||||
case "$p" in
|
||||
/Library/Application\ Support | /Library/Application\ Support/ | \
|
||||
/Library/Caches | /Library/Caches/ | \
|
||||
/Library/Logs | /Library/Logs/)
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
|
||||
system_files+=("$p")
|
||||
done
|
||||
|
||||
# System LaunchAgents/LaunchDaemons by name
|
||||
if [[ ${#app_name} -gt 3 ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
system_files+=("$plist")
|
||||
done < <(find /Library/LaunchAgents -name "*$app_name*.plist" -print0 2> /dev/null)
|
||||
for base in /Library/LaunchAgents /Library/LaunchDaemons; do
|
||||
[[ -d "$base" ]] && while IFS= read -r -d '' plist; do
|
||||
system_files+=("$plist")
|
||||
done < <(command find "$base" -maxdepth 1 \( -name "*$app_name*.plist" \) -print0 2> /dev/null)
|
||||
done
|
||||
fi
|
||||
|
||||
# System Launch Daemons
|
||||
[[ -f /Library/LaunchDaemons/"$bundle_id".plist ]] && system_files+=("/Library/LaunchDaemons/$bundle_id.plist")
|
||||
# Search for LaunchDaemons by app name if unique enough
|
||||
if [[ ${#app_name} -gt 3 ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
system_files+=("$plist")
|
||||
done < <(find /Library/LaunchDaemons -name "*$app_name*.plist" -print0 2> /dev/null)
|
||||
# Privileged Helper Tools and Receipts (special handling)
|
||||
# Only search with bundle_id if it's valid (not empty and not "unknown")
|
||||
if [[ -n "$bundle_id" && "$bundle_id" != "unknown" && ${#bundle_id} -gt 3 ]]; then
|
||||
[[ -d /Library/PrivilegedHelperTools ]] && while IFS= read -r -d '' helper; do
|
||||
system_files+=("$helper")
|
||||
done < <(command find /Library/PrivilegedHelperTools -maxdepth 1 \( -name "$bundle_id*" \) -print0 2> /dev/null)
|
||||
|
||||
[[ -d /private/var/db/receipts ]] && while IFS= read -r -d '' receipt; do
|
||||
system_files+=("$receipt")
|
||||
done < <(command find /private/var/db/receipts -maxdepth 1 \( -name "*$bundle_id*" \) -print0 2> /dev/null)
|
||||
fi
|
||||
|
||||
# Privileged Helper Tools
|
||||
[[ -d /Library/PrivilegedHelperTools ]] && while IFS= read -r -d '' helper; do
|
||||
system_files+=("$helper")
|
||||
done < <(find /Library/PrivilegedHelperTools \( -name "$bundle_id*" \) -print0 2> /dev/null)
|
||||
|
||||
# System Preferences
|
||||
[[ -f /Library/Preferences/"$bundle_id".plist ]] && system_files+=("/Library/Preferences/$bundle_id.plist")
|
||||
|
||||
# Installation Receipts
|
||||
[[ -d /private/var/db/receipts ]] && while IFS= read -r -d '' receipt; do
|
||||
system_files+=("$receipt")
|
||||
done < <(find /private/var/db/receipts \( -name "*$bundle_id*" \) -print0 2> /dev/null)
|
||||
|
||||
# System Logs
|
||||
[[ -d /Library/Logs/"$app_name" ]] && system_files+=("/Library/Logs/$app_name")
|
||||
[[ -d /Library/Logs/"$bundle_id" ]] && system_files+=("/Library/Logs/$bundle_id")
|
||||
|
||||
# System Frameworks
|
||||
[[ -d /Library/Frameworks/"$app_name".framework ]] && system_files+=("/Library/Frameworks/$app_name.framework")
|
||||
|
||||
# System Internet Plug-Ins
|
||||
[[ -d /Library/Internet\ Plug-Ins/"$app_name".plugin ]] && system_files+=("/Library/Internet Plug-Ins/$app_name.plugin")
|
||||
|
||||
# System Audio Plug-Ins
|
||||
[[ -d /Library/Audio/Plug-Ins/Components/"$app_name".component ]] && system_files+=("/Library/Audio/Plug-Ins/Components/$app_name.component")
|
||||
[[ -d /Library/Audio/Plug-Ins/VST/"$app_name".vst ]] && system_files+=("/Library/Audio/Plug-Ins/VST/$app_name.vst")
|
||||
[[ -d /Library/Audio/Plug-Ins/VST3/"$app_name".vst3 ]] && system_files+=("/Library/Audio/Plug-Ins/VST3/$app_name.vst3")
|
||||
[[ -d /Library/Audio/Plug-Ins/Digidesign/"$app_name".dpm ]] && system_files+=("/Library/Audio/Plug-Ins/Digidesign/$app_name.dpm")
|
||||
|
||||
# System QuickLook Plugins
|
||||
[[ -d /Library/QuickLook/"$app_name".qlgenerator ]] && system_files+=("/Library/QuickLook/$app_name.qlgenerator")
|
||||
|
||||
# System Preference Panes
|
||||
[[ -d /Library/PreferencePanes/"$app_name".prefPane ]] && system_files+=("/Library/PreferencePanes/$app_name.prefPane")
|
||||
|
||||
# System Screen Savers
|
||||
[[ -d /Library/Screen\ Savers/"$app_name".saver ]] && system_files+=("/Library/Screen Savers/$app_name.saver")
|
||||
|
||||
# System Caches
|
||||
[[ -d /Library/Caches/"$bundle_id" ]] && system_files+=("/Library/Caches/$bundle_id")
|
||||
[[ -d /Library/Caches/"$app_name" ]] && system_files+=("/Library/Caches/$app_name")
|
||||
|
||||
# Only print if array has elements
|
||||
if [[ ${#system_files[@]} -gt 0 ]]; then
|
||||
printf '%s\n' "${system_files[@]}"
|
||||
fi
|
||||
@@ -758,7 +844,7 @@ find_app_system_files() {
|
||||
find_app_receipt_files "$bundle_id"
|
||||
}
|
||||
|
||||
# Find files from installation receipts (Bom files)
|
||||
# Locate files using installation receipts (BOM)
|
||||
find_app_receipt_files() {
|
||||
local bundle_id="$1"
|
||||
|
||||
@@ -773,7 +859,7 @@ find_app_receipt_files() {
|
||||
if [[ -d /private/var/db/receipts ]]; then
|
||||
while IFS= read -r -d '' bom; do
|
||||
bom_files+=("$bom")
|
||||
done < <(find /private/var/db/receipts -name "${bundle_id}*.bom" -print0 2> /dev/null)
|
||||
done < <(find /private/var/db/receipts -maxdepth 1 -name "${bundle_id}*.bom" -print0 2> /dev/null)
|
||||
fi
|
||||
|
||||
# Process bom files if any found
|
||||
@@ -791,13 +877,13 @@ find_app_receipt_files() {
|
||||
# Standardize path (remove leading dot)
|
||||
local clean_path="${file_path#.}"
|
||||
|
||||
# Ensure it starts with /
|
||||
# Ensure absolute path
|
||||
if [[ "$clean_path" != /* ]]; then
|
||||
clean_path="/$clean_path"
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# SAFETY FILTER: Only allow specific removal paths
|
||||
# Safety check: restrict removal to trusted paths
|
||||
# ------------------------------------------------------------------------
|
||||
local is_safe=false
|
||||
|
||||
@@ -832,15 +918,7 @@ find_app_receipt_files() {
|
||||
esac
|
||||
|
||||
if [[ "$is_safe" == "true" && -e "$clean_path" ]]; then
|
||||
# Only valid files
|
||||
# Don't delete directories if they are non-empty parents?
|
||||
# lsbom lists directories too.
|
||||
# If we return a directory, `safe_remove` logic handles it.
|
||||
# `uninstall.sh` uses `remove_file_list`.
|
||||
# If `lsbom` lists `/Applications` (it shouldn't, only contents), we must be careful.
|
||||
# `lsbom` usually lists `./Applications/MyApp.app`.
|
||||
# If it lists `./Applications`, we must skip it.
|
||||
|
||||
# If lsbom lists /Applications, skip to avoid system damage.
|
||||
# Extra check: path must be deep enough?
|
||||
# If path is just "/Applications", skip.
|
||||
if [[ "$clean_path" == "/Applications" || "$clean_path" == "/Library" || "$clean_path" == "/usr/local" ]]; then
|
||||
@@ -858,9 +936,9 @@ find_app_receipt_files() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Force quit an application
|
||||
# Terminate a running application
|
||||
force_kill_app() {
|
||||
# Args: app_name [app_path]; tries graceful then force kill; returns 0 if stopped, 1 otherwise
|
||||
# Gracefully terminates or force-kills an application
|
||||
local app_name="$1"
|
||||
local app_path="${2:-""}"
|
||||
|
||||
@@ -913,18 +991,4 @@ force_kill_app() {
|
||||
pgrep -x "$match_pattern" > /dev/null 2>&1 && return 1 || return 0
|
||||
}
|
||||
|
||||
# Calculate total size of files (consolidated from duplicates)
|
||||
calculate_total_size() {
|
||||
local files="$1"
|
||||
local total_kb=0
|
||||
|
||||
while IFS= read -r file; do
|
||||
if [[ -n "$file" && -e "$file" ]]; then
|
||||
local size_kb
|
||||
size_kb=$(get_path_size_kb "$file")
|
||||
((total_kb += size_kb))
|
||||
fi
|
||||
done <<< "$files"
|
||||
|
||||
echo "$total_kb"
|
||||
}
|
||||
# Note: calculate_total_size() is defined in lib/core/file_ops.sh
|
||||
|
||||
601
lib/core/base.sh
601
lib/core/base.sh
@@ -31,27 +31,29 @@ readonly ICON_CONFIRM="◎"
|
||||
readonly ICON_ADMIN="⚙"
|
||||
readonly ICON_SUCCESS="✓"
|
||||
readonly ICON_ERROR="☻"
|
||||
readonly ICON_WARNING="●"
|
||||
readonly ICON_EMPTY="○"
|
||||
readonly ICON_SOLID="●"
|
||||
readonly ICON_LIST="•"
|
||||
readonly ICON_ARROW="➤"
|
||||
readonly ICON_WARNING="☻"
|
||||
readonly ICON_DRY_RUN="→"
|
||||
readonly ICON_NAV_UP="↑"
|
||||
readonly ICON_NAV_DOWN="↓"
|
||||
readonly ICON_NAV_LEFT="←"
|
||||
readonly ICON_NAV_RIGHT="→"
|
||||
|
||||
# ============================================================================
|
||||
# Global Configuration Constants
|
||||
# ============================================================================
|
||||
readonly MOLE_TEMP_FILE_AGE_DAYS=7 # Temp file cleanup threshold
|
||||
readonly MOLE_ORPHAN_AGE_DAYS=60 # Orphaned data threshold
|
||||
readonly MOLE_TEMP_FILE_AGE_DAYS=7 # Temp file retention (days)
|
||||
readonly MOLE_ORPHAN_AGE_DAYS=60 # Orphaned data retention (days)
|
||||
readonly MOLE_MAX_PARALLEL_JOBS=15 # Parallel job limit
|
||||
readonly MOLE_MAIL_DOWNLOADS_MIN_KB=5120 # Mail attachments size threshold
|
||||
readonly MOLE_LOG_AGE_DAYS=7 # System log retention
|
||||
readonly MOLE_CRASH_REPORT_AGE_DAYS=7 # Crash report retention
|
||||
readonly MOLE_SAVED_STATE_AGE_DAYS=7 # App saved state retention
|
||||
readonly MOLE_TM_BACKUP_SAFE_HOURS=48 # Time Machine failed backup safety window
|
||||
readonly MOLE_MAIL_DOWNLOADS_MIN_KB=5120 # Mail attachment size threshold
|
||||
readonly MOLE_MAIL_AGE_DAYS=30 # Mail attachment retention (days)
|
||||
readonly MOLE_LOG_AGE_DAYS=7 # Log retention (days)
|
||||
readonly MOLE_CRASH_REPORT_AGE_DAYS=7 # Crash report retention (days)
|
||||
readonly MOLE_SAVED_STATE_AGE_DAYS=30 # Saved state retention (days) - increased for safety
|
||||
readonly MOLE_TM_BACKUP_SAFE_HOURS=48 # TM backup safety window (hours)
|
||||
readonly MOLE_MAX_DS_STORE_FILES=500 # Max .DS_Store files to clean per scan
|
||||
readonly MOLE_MAX_ORPHAN_ITERATIONS=100 # Max iterations for orphaned app data scan
|
||||
|
||||
# ============================================================================
|
||||
# Seasonal Functions
|
||||
@@ -80,14 +82,21 @@ declare -a DEFAULT_WHITELIST_PATTERNS=(
|
||||
"$HOME/Library/Caches/com.nssurge.surge-mac/*"
|
||||
"$HOME/Library/Application Support/com.nssurge.surge-mac/*"
|
||||
"$HOME/Library/Caches/org.R-project.R/R/renv/*"
|
||||
"$HOME/Library/Caches/pypoetry/virtualenvs*"
|
||||
"$HOME/Library/Caches/JetBrains*"
|
||||
"$HOME/Library/Caches/com.jetbrains.toolbox*"
|
||||
"$HOME/Library/Caches/com.apple.finder"
|
||||
"$HOME/Library/Mobile Documents*"
|
||||
# System-critical caches that affect macOS functionality and stability
|
||||
# CRITICAL: Removing these will cause system search and UI issues
|
||||
"$HOME/Library/Caches/com.apple.FontRegistry*"
|
||||
"$HOME/Library/Caches/com.apple.spotlight*"
|
||||
"$HOME/Library/Caches/com.apple.Spotlight*"
|
||||
"$HOME/Library/Caches/CloudKit*"
|
||||
"$FINDER_METADATA_SENTINEL"
|
||||
)
|
||||
|
||||
declare -a DEFAULT_OPTIMIZE_WHITELIST_PATTERNS=(
|
||||
"check_brew_updates"
|
||||
"check_brew_health"
|
||||
"check_touchid"
|
||||
"check_git_config"
|
||||
@@ -98,7 +107,7 @@ declare -a DEFAULT_OPTIMIZE_WHITELIST_PATTERNS=(
|
||||
# ============================================================================
|
||||
readonly STAT_BSD="/usr/bin/stat"
|
||||
|
||||
# Get file size in bytes using BSD stat
|
||||
# Get file size in bytes
|
||||
get_file_size() {
|
||||
local file="$1"
|
||||
local result
|
||||
@@ -106,8 +115,7 @@ get_file_size() {
|
||||
echo "${result:-0}"
|
||||
}
|
||||
|
||||
# Get file modification time using BSD stat
|
||||
# Returns: epoch seconds
|
||||
# Get file modification time in epoch seconds
|
||||
get_file_mtime() {
|
||||
local file="$1"
|
||||
[[ -z "$file" ]] && {
|
||||
@@ -119,7 +127,7 @@ get_file_mtime() {
|
||||
echo "${result:-0}"
|
||||
}
|
||||
|
||||
# Get file owner username using BSD stat
|
||||
# Get file owner username
|
||||
get_file_owner() {
|
||||
local file="$1"
|
||||
$STAT_BSD -f%Su "$file" 2> /dev/null || echo ""
|
||||
@@ -146,8 +154,7 @@ is_sip_enabled() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if running in interactive terminal
|
||||
# Returns: 0 if interactive, 1 otherwise
|
||||
# Check if running in an interactive terminal
|
||||
is_interactive() {
|
||||
[[ -t 1 ]]
|
||||
}
|
||||
@@ -165,12 +172,36 @@ detect_architecture() {
|
||||
# Get free disk space on root volume
|
||||
# Returns: human-readable string (e.g., "100G")
|
||||
get_free_space() {
|
||||
command df -h / | awk 'NR==2 {print $4}'
|
||||
local target="/"
|
||||
if [[ -d "/System/Volumes/Data" ]]; then
|
||||
target="/System/Volumes/Data"
|
||||
fi
|
||||
|
||||
df -h "$target" | awk 'NR==2 {print $4}'
|
||||
}
|
||||
|
||||
# Get optimal number of parallel jobs for a given operation type
|
||||
# Args: $1 - operation type (scan|io|compute|default)
|
||||
# Returns: number of jobs
|
||||
# Get Darwin kernel major version (e.g., 24 for 24.2.0)
|
||||
# Returns 999 on failure to adopt conservative behavior (assume modern system)
|
||||
get_darwin_major() {
|
||||
local kernel
|
||||
kernel=$(uname -r 2> /dev/null || true)
|
||||
local major="${kernel%%.*}"
|
||||
if [[ ! "$major" =~ ^[0-9]+$ ]]; then
|
||||
# Return high number to skip potentially dangerous operations on unknown systems
|
||||
major=999
|
||||
fi
|
||||
echo "$major"
|
||||
}
|
||||
|
||||
# Check if Darwin kernel major version meets minimum
|
||||
is_darwin_ge() {
|
||||
local minimum="$1"
|
||||
local major
|
||||
major=$(get_darwin_major)
|
||||
[[ "$major" -ge "$minimum" ]]
|
||||
}
|
||||
|
||||
# Get optimal parallel jobs for operation type (scan|io|compute|default)
|
||||
get_optimal_parallel_jobs() {
|
||||
local operation_type="${1:-default}"
|
||||
local cpu_cores
|
||||
@@ -188,53 +219,214 @@ get_optimal_parallel_jobs() {
|
||||
esac
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# User Context Utilities
|
||||
# ============================================================================
|
||||
|
||||
is_root_user() {
|
||||
[[ "$(id -u)" == "0" ]]
|
||||
}
|
||||
|
||||
get_user_home() {
|
||||
local user="$1"
|
||||
local home=""
|
||||
|
||||
if [[ -z "$user" ]]; then
|
||||
echo ""
|
||||
return 0
|
||||
fi
|
||||
|
||||
if command -v dscl > /dev/null 2>&1; then
|
||||
home=$(dscl . -read "/Users/$user" NFSHomeDirectory 2> /dev/null | awk '{print $2}' | head -1 || true)
|
||||
fi
|
||||
|
||||
if [[ -z "$home" ]]; then
|
||||
home=$(eval echo "~$user" 2> /dev/null || true)
|
||||
fi
|
||||
|
||||
if [[ "$home" == "~"* ]]; then
|
||||
home=""
|
||||
fi
|
||||
|
||||
echo "$home"
|
||||
}
|
||||
|
||||
get_invoking_user() {
|
||||
if [[ -n "${SUDO_USER:-}" && "${SUDO_USER:-}" != "root" ]]; then
|
||||
echo "$SUDO_USER"
|
||||
return 0
|
||||
fi
|
||||
echo "${USER:-}"
|
||||
}
|
||||
|
||||
get_invoking_uid() {
|
||||
if [[ -n "${SUDO_UID:-}" ]]; then
|
||||
echo "$SUDO_UID"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local uid
|
||||
uid=$(id -u 2> /dev/null || true)
|
||||
echo "$uid"
|
||||
}
|
||||
|
||||
get_invoking_gid() {
|
||||
if [[ -n "${SUDO_GID:-}" ]]; then
|
||||
echo "$SUDO_GID"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local gid
|
||||
gid=$(id -g 2> /dev/null || true)
|
||||
echo "$gid"
|
||||
}
|
||||
|
||||
get_invoking_home() {
|
||||
if [[ -n "${SUDO_USER:-}" && "${SUDO_USER:-}" != "root" ]]; then
|
||||
get_user_home "$SUDO_USER"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "${HOME:-}"
|
||||
}
|
||||
|
||||
ensure_user_dir() {
|
||||
local raw_path="$1"
|
||||
if [[ -z "$raw_path" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local target_path="$raw_path"
|
||||
if [[ "$target_path" == "~"* ]]; then
|
||||
target_path="${target_path/#\~/$HOME}"
|
||||
fi
|
||||
|
||||
mkdir -p "$target_path" 2> /dev/null || true
|
||||
|
||||
if ! is_root_user; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local sudo_user="${SUDO_USER:-}"
|
||||
if [[ -z "$sudo_user" || "$sudo_user" == "root" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local user_home
|
||||
user_home=$(get_user_home "$sudo_user")
|
||||
if [[ -z "$user_home" ]]; then
|
||||
return 0
|
||||
fi
|
||||
user_home="${user_home%/}"
|
||||
|
||||
if [[ "$target_path" != "$user_home" && "$target_path" != "$user_home/"* ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local owner_uid="${SUDO_UID:-}"
|
||||
local owner_gid="${SUDO_GID:-}"
|
||||
if [[ -z "$owner_uid" || -z "$owner_gid" ]]; then
|
||||
owner_uid=$(id -u "$sudo_user" 2> /dev/null || true)
|
||||
owner_gid=$(id -g "$sudo_user" 2> /dev/null || true)
|
||||
fi
|
||||
|
||||
if [[ -z "$owner_uid" || -z "$owner_gid" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local dir="$target_path"
|
||||
while [[ -n "$dir" && "$dir" != "/" ]]; do
|
||||
# Early stop: if ownership is already correct, no need to continue up the tree
|
||||
if [[ -d "$dir" ]]; then
|
||||
local current_uid
|
||||
current_uid=$("$STAT_BSD" -f%u "$dir" 2> /dev/null || echo "")
|
||||
if [[ "$current_uid" == "$owner_uid" ]]; then
|
||||
break
|
||||
fi
|
||||
fi
|
||||
|
||||
chown "$owner_uid:$owner_gid" "$dir" 2> /dev/null || true
|
||||
|
||||
if [[ "$dir" == "$user_home" ]]; then
|
||||
break
|
||||
fi
|
||||
dir=$(dirname "$dir")
|
||||
if [[ "$dir" == "." ]]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
ensure_user_file() {
|
||||
local raw_path="$1"
|
||||
if [[ -z "$raw_path" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local target_path="$raw_path"
|
||||
if [[ "$target_path" == "~"* ]]; then
|
||||
target_path="${target_path/#\~/$HOME}"
|
||||
fi
|
||||
|
||||
ensure_user_dir "$(dirname "$target_path")"
|
||||
touch "$target_path" 2> /dev/null || true
|
||||
|
||||
if ! is_root_user; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local sudo_user="${SUDO_USER:-}"
|
||||
if [[ -z "$sudo_user" || "$sudo_user" == "root" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local user_home
|
||||
user_home=$(get_user_home "$sudo_user")
|
||||
if [[ -z "$user_home" ]]; then
|
||||
return 0
|
||||
fi
|
||||
user_home="${user_home%/}"
|
||||
|
||||
if [[ "$target_path" != "$user_home" && "$target_path" != "$user_home/"* ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local owner_uid="${SUDO_UID:-}"
|
||||
local owner_gid="${SUDO_GID:-}"
|
||||
if [[ -z "$owner_uid" || -z "$owner_gid" ]]; then
|
||||
owner_uid=$(id -u "$sudo_user" 2> /dev/null || true)
|
||||
owner_gid=$(id -g "$sudo_user" 2> /dev/null || true)
|
||||
fi
|
||||
|
||||
if [[ -n "$owner_uid" && -n "$owner_gid" ]]; then
|
||||
chown "$owner_uid:$owner_gid" "$target_path" 2> /dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Formatting Utilities
|
||||
# ============================================================================
|
||||
|
||||
# Convert bytes to human-readable format
|
||||
# Args: $1 - size in bytes
|
||||
# Returns: formatted string (e.g., "1.50GB", "256MB", "4KB")
|
||||
# Convert bytes to human-readable format (e.g., 1.5GB)
|
||||
bytes_to_human() {
|
||||
local bytes="$1"
|
||||
if [[ ! "$bytes" =~ ^[0-9]+$ ]]; then
|
||||
[[ "$bytes" =~ ^[0-9]+$ ]] || {
|
||||
echo "0B"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
if ((bytes >= 1073741824)); then # >= 1GB
|
||||
local divisor=1073741824
|
||||
local whole=$((bytes / divisor))
|
||||
local remainder=$((bytes % divisor))
|
||||
local frac=$(((remainder * 100 + divisor / 2) / divisor))
|
||||
if ((frac >= 100)); then
|
||||
frac=0
|
||||
((whole++))
|
||||
fi
|
||||
printf "%d.%02dGB\n" "$whole" "$frac"
|
||||
return 0
|
||||
# GB: >= 1073741824 bytes
|
||||
if ((bytes >= 1073741824)); then
|
||||
printf "%d.%02dGB\n" $((bytes / 1073741824)) $(((bytes % 1073741824) * 100 / 1073741824))
|
||||
# MB: >= 1048576 bytes
|
||||
elif ((bytes >= 1048576)); then
|
||||
printf "%d.%01dMB\n" $((bytes / 1048576)) $(((bytes % 1048576) * 10 / 1048576))
|
||||
# KB: >= 1024 bytes (round up)
|
||||
elif ((bytes >= 1024)); then
|
||||
printf "%dKB\n" $(((bytes + 512) / 1024))
|
||||
else
|
||||
printf "%dB\n" "$bytes"
|
||||
fi
|
||||
|
||||
if ((bytes >= 1048576)); then # >= 1MB
|
||||
local divisor=1048576
|
||||
local whole=$((bytes / divisor))
|
||||
local remainder=$((bytes % divisor))
|
||||
local frac=$(((remainder * 10 + divisor / 2) / divisor))
|
||||
if ((frac >= 10)); then
|
||||
frac=0
|
||||
((whole++))
|
||||
fi
|
||||
printf "%d.%01dMB\n" "$whole" "$frac"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if ((bytes >= 1024)); then
|
||||
local rounded_kb=$(((bytes + 512) / 1024))
|
||||
printf "%dKB\n" "$rounded_kb"
|
||||
return 0
|
||||
fi
|
||||
|
||||
printf "%dB\n" "$bytes"
|
||||
}
|
||||
|
||||
# Convert kilobytes to human-readable format
|
||||
@@ -244,17 +436,22 @@ bytes_to_human_kb() {
|
||||
bytes_to_human "$((${1:-0} * 1024))"
|
||||
}
|
||||
|
||||
# Get brand-friendly name for an application
|
||||
# Args: $1 - application name
|
||||
# Returns: localized name based on system language preference
|
||||
# Get brand-friendly localized name for an application
|
||||
get_brand_name() {
|
||||
local name="$1"
|
||||
|
||||
# Detect if system primary language is Chinese
|
||||
local is_chinese=false
|
||||
local sys_lang
|
||||
sys_lang=$(defaults read -g AppleLanguages 2> /dev/null | grep -o 'zh-Hans\|zh-Hant\|zh' | head -1 || echo "")
|
||||
[[ -n "$sys_lang" ]] && is_chinese=true
|
||||
# Detect if system primary language is Chinese (Cached)
|
||||
if [[ -z "${MOLE_IS_CHINESE_SYSTEM:-}" ]]; then
|
||||
local sys_lang
|
||||
sys_lang=$(defaults read -g AppleLanguages 2> /dev/null | grep -o 'zh-Hans\|zh-Hant\|zh' | head -1 || echo "")
|
||||
if [[ -n "$sys_lang" ]]; then
|
||||
export MOLE_IS_CHINESE_SYSTEM="true"
|
||||
else
|
||||
export MOLE_IS_CHINESE_SYSTEM="false"
|
||||
fi
|
||||
fi
|
||||
|
||||
local is_chinese="${MOLE_IS_CHINESE_SYSTEM}"
|
||||
|
||||
# Return localized names based on system language
|
||||
if [[ "$is_chinese" == true ]]; then
|
||||
@@ -304,7 +501,6 @@ declare -a MOLE_TEMP_FILES=()
|
||||
declare -a MOLE_TEMP_DIRS=()
|
||||
|
||||
# Create tracked temporary file
|
||||
# Returns: temp file path
|
||||
create_temp_file() {
|
||||
local temp
|
||||
temp=$(mktemp) || return 1
|
||||
@@ -313,7 +509,6 @@ create_temp_file() {
|
||||
}
|
||||
|
||||
# Create tracked temporary directory
|
||||
# Returns: temp directory path
|
||||
create_temp_dir() {
|
||||
local temp
|
||||
temp=$(mktemp -d) || return 1
|
||||
@@ -342,6 +537,7 @@ mktemp_file() {
|
||||
|
||||
# Cleanup all tracked temp files and directories
|
||||
cleanup_temp_files() {
|
||||
stop_inline_spinner 2> /dev/null || true
|
||||
local file
|
||||
if [[ ${#MOLE_TEMP_FILES[@]} -gt 0 ]]; then
|
||||
for file in "${MOLE_TEMP_FILES[@]}"; do
|
||||
@@ -379,7 +575,7 @@ start_section() {
|
||||
# End a section
|
||||
# Shows "Nothing to tidy" if no activity was recorded
|
||||
end_section() {
|
||||
if [[ $TRACK_SECTION -eq 1 && $SECTION_ACTIVITY -eq 0 ]]; then
|
||||
if [[ "${TRACK_SECTION:-0}" == "1" && "${SECTION_ACTIVITY:-0}" == "0" ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Nothing to tidy"
|
||||
fi
|
||||
TRACK_SECTION=0
|
||||
@@ -387,7 +583,272 @@ end_section() {
|
||||
|
||||
# Mark activity in current section
|
||||
note_activity() {
|
||||
if [[ $TRACK_SECTION -eq 1 ]]; then
|
||||
if [[ "${TRACK_SECTION:-0}" == "1" ]]; then
|
||||
SECTION_ACTIVITY=1
|
||||
fi
|
||||
}
|
||||
|
||||
# Start a section spinner with optional message
|
||||
# Usage: start_section_spinner "message"
|
||||
start_section_spinner() {
|
||||
local message="${1:-Scanning...}"
|
||||
stop_inline_spinner 2> /dev/null || true
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "$message"
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop spinner and clear the line
|
||||
# Usage: stop_section_spinner
|
||||
stop_section_spinner() {
|
||||
stop_inline_spinner 2> /dev/null || true
|
||||
if [[ -t 1 ]]; then
|
||||
echo -ne "\r\033[K" >&2 || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Safe terminal line clearing with terminal type detection
|
||||
# Usage: safe_clear_lines <num_lines> [tty_device]
|
||||
# Returns: 0 on success, 1 if terminal doesn't support ANSI
|
||||
safe_clear_lines() {
|
||||
local lines="${1:-1}"
|
||||
local tty_device="${2:-/dev/tty}"
|
||||
|
||||
# Use centralized ANSI support check (defined below)
|
||||
# Note: This forward reference works because functions are parsed before execution
|
||||
is_ansi_supported 2> /dev/null || return 1
|
||||
|
||||
# Clear lines one by one (more reliable than multi-line sequences)
|
||||
local i
|
||||
for ((i = 0; i < lines; i++)); do
|
||||
printf "\033[1A\r\033[K" > "$tty_device" 2> /dev/null || return 1
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Safe single line clear with fallback
|
||||
# Usage: safe_clear_line [tty_device]
|
||||
safe_clear_line() {
|
||||
local tty_device="${1:-/dev/tty}"
|
||||
|
||||
# Use centralized ANSI support check
|
||||
is_ansi_supported 2> /dev/null || return 1
|
||||
|
||||
printf "\r\033[K" > "$tty_device" 2> /dev/null || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
# Update progress spinner if enough time has elapsed
|
||||
# Usage: update_progress_if_needed <completed> <total> <last_update_time_var> [interval]
|
||||
# Example: update_progress_if_needed "$completed" "$total" last_progress_update 2
|
||||
# Returns: 0 if updated, 1 if skipped
|
||||
update_progress_if_needed() {
|
||||
local completed="$1"
|
||||
local total="$2"
|
||||
local last_update_var="$3" # Name of variable holding last update time
|
||||
local interval="${4:-2}" # Default: update every 2 seconds
|
||||
|
||||
# Get current time
|
||||
local current_time=$(date +%s)
|
||||
|
||||
# Get last update time from variable
|
||||
local last_time
|
||||
eval "last_time=\${$last_update_var:-0}"
|
||||
|
||||
# Check if enough time has elapsed
|
||||
if [[ $((current_time - last_time)) -ge $interval ]]; then
|
||||
# Update the spinner with progress
|
||||
stop_section_spinner
|
||||
start_section_spinner "Scanning items... ($completed/$total)"
|
||||
|
||||
# Update the last_update_time variable
|
||||
eval "$last_update_var=$current_time"
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Spinner Stack Management (prevents nesting issues)
|
||||
# ============================================================================
|
||||
|
||||
# Global spinner stack
|
||||
declare -a MOLE_SPINNER_STACK=()
|
||||
|
||||
# Push current spinner state onto stack
|
||||
# Usage: push_spinner_state
|
||||
push_spinner_state() {
|
||||
local current_state=""
|
||||
|
||||
# Save current spinner PID if running
|
||||
if [[ -n "${MOLE_SPINNER_PID:-}" ]] && kill -0 "$MOLE_SPINNER_PID" 2> /dev/null; then
|
||||
current_state="running:$MOLE_SPINNER_PID"
|
||||
else
|
||||
current_state="stopped"
|
||||
fi
|
||||
|
||||
MOLE_SPINNER_STACK+=("$current_state")
|
||||
debug_log "Pushed spinner state: $current_state (stack depth: ${#MOLE_SPINNER_STACK[@]})"
|
||||
}
|
||||
|
||||
# Pop and restore spinner state from stack
|
||||
# Usage: pop_spinner_state
|
||||
pop_spinner_state() {
|
||||
if [[ ${#MOLE_SPINNER_STACK[@]} -eq 0 ]]; then
|
||||
debug_log "Warning: Attempted to pop from empty spinner stack"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Stack depth safety check
|
||||
if [[ ${#MOLE_SPINNER_STACK[@]} -gt 10 ]]; then
|
||||
debug_log "Warning: Spinner stack depth excessive (${#MOLE_SPINNER_STACK[@]}), possible leak"
|
||||
fi
|
||||
|
||||
local last_idx=$((${#MOLE_SPINNER_STACK[@]} - 1))
|
||||
local state="${MOLE_SPINNER_STACK[$last_idx]}"
|
||||
|
||||
# Remove from stack (Bash 3.2 compatible way)
|
||||
# Instead of unset, rebuild array without last element
|
||||
local -a new_stack=()
|
||||
local i
|
||||
for ((i = 0; i < last_idx; i++)); do
|
||||
new_stack+=("${MOLE_SPINNER_STACK[$i]}")
|
||||
done
|
||||
MOLE_SPINNER_STACK=("${new_stack[@]}")
|
||||
|
||||
debug_log "Popped spinner state: $state (remaining depth: ${#MOLE_SPINNER_STACK[@]})"
|
||||
|
||||
# Restore state if needed
|
||||
if [[ "$state" == running:* ]]; then
|
||||
# Previous spinner was running - we don't restart it automatically
|
||||
# This is intentional to avoid UI conflicts
|
||||
:
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Safe spinner start with stack management
|
||||
# Usage: safe_start_spinner <message>
|
||||
safe_start_spinner() {
|
||||
local message="${1:-Working...}"
|
||||
|
||||
# Push current state
|
||||
push_spinner_state
|
||||
|
||||
# Stop any existing spinner
|
||||
stop_section_spinner 2> /dev/null || true
|
||||
|
||||
# Start new spinner
|
||||
start_section_spinner "$message"
|
||||
}
|
||||
|
||||
# Safe spinner stop with stack management
|
||||
# Usage: safe_stop_spinner
|
||||
safe_stop_spinner() {
|
||||
# Stop current spinner
|
||||
stop_section_spinner 2> /dev/null || true
|
||||
|
||||
# Pop previous state
|
||||
pop_spinner_state || true
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Terminal Compatibility Checks
|
||||
# ============================================================================
|
||||
|
||||
# Check if terminal supports ANSI escape codes
|
||||
# Usage: is_ansi_supported
|
||||
# Returns: 0 if supported, 1 if not
|
||||
is_ansi_supported() {
|
||||
# Check if running in interactive terminal
|
||||
[[ -t 1 ]] || return 1
|
||||
|
||||
# Check TERM variable
|
||||
[[ -n "${TERM:-}" ]] || return 1
|
||||
|
||||
# Check for known ANSI-compatible terminals
|
||||
case "$TERM" in
|
||||
xterm* | vt100 | vt220 | screen* | tmux* | ansi | linux | rxvt* | konsole*)
|
||||
return 0
|
||||
;;
|
||||
dumb | unknown)
|
||||
return 1
|
||||
;;
|
||||
*)
|
||||
# Check terminfo database if available
|
||||
if command -v tput > /dev/null 2>&1; then
|
||||
# Test if terminal supports colors (good proxy for ANSI support)
|
||||
local colors=$(tput colors 2> /dev/null || echo "0")
|
||||
[[ "$colors" -ge 8 ]] && return 0
|
||||
fi
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Get terminal capability info
|
||||
# Usage: get_terminal_info
|
||||
get_terminal_info() {
|
||||
local info="Terminal: ${TERM:-unknown}"
|
||||
|
||||
if is_ansi_supported; then
|
||||
info+=" (ANSI supported)"
|
||||
|
||||
if command -v tput > /dev/null 2>&1; then
|
||||
local cols=$(tput cols 2> /dev/null || echo "?")
|
||||
local lines=$(tput lines 2> /dev/null || echo "?")
|
||||
local colors=$(tput colors 2> /dev/null || echo "?")
|
||||
info+=" ${cols}x${lines}, ${colors} colors"
|
||||
fi
|
||||
else
|
||||
info+=" (ANSI not supported)"
|
||||
fi
|
||||
|
||||
echo "$info"
|
||||
}
|
||||
|
||||
# Validate terminal environment before running
|
||||
# Usage: validate_terminal_environment
|
||||
# Returns: 0 if OK, 1 with warning if issues detected
|
||||
validate_terminal_environment() {
|
||||
local warnings=0
|
||||
|
||||
# Check if TERM is set
|
||||
if [[ -z "${TERM:-}" ]]; then
|
||||
log_warning "TERM environment variable not set"
|
||||
((warnings++))
|
||||
fi
|
||||
|
||||
# Check if running in a known problematic terminal
|
||||
case "${TERM:-}" in
|
||||
dumb)
|
||||
log_warning "Running in 'dumb' terminal - limited functionality"
|
||||
((warnings++))
|
||||
;;
|
||||
unknown)
|
||||
log_warning "Terminal type unknown - may have display issues"
|
||||
((warnings++))
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check terminal size if available
|
||||
if command -v tput > /dev/null 2>&1; then
|
||||
local cols=$(tput cols 2> /dev/null || echo "80")
|
||||
if [[ "$cols" -lt 60 ]]; then
|
||||
log_warning "Terminal width ($cols cols) is narrow - output may wrap"
|
||||
((warnings++))
|
||||
fi
|
||||
fi
|
||||
|
||||
# Report compatibility
|
||||
if [[ $warnings -eq 0 ]]; then
|
||||
debug_log "Terminal environment validated: $(get_terminal_info)"
|
||||
return 0
|
||||
else
|
||||
debug_log "Terminal compatibility warnings: $warnings"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
17
lib/core/commands.sh
Normal file
17
lib/core/commands.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Shared command list for help text and completions.
|
||||
MOLE_COMMANDS=(
|
||||
"clean:Free up disk space"
|
||||
"uninstall:Remove apps completely"
|
||||
"optimize:Check and maintain system"
|
||||
"analyze:Explore disk usage"
|
||||
"status:Monitor system health"
|
||||
"purge:Remove old project artifacts"
|
||||
"touchid:Configure Touch ID for sudo"
|
||||
"completion:Setup shell tab completion"
|
||||
"update:Update to latest version"
|
||||
"remove:Remove Mole from system"
|
||||
"help:Show help"
|
||||
"version:Show version"
|
||||
)
|
||||
@@ -12,7 +12,7 @@ readonly MOLE_COMMON_LOADED=1
|
||||
|
||||
_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Load core modules in dependency order
|
||||
# Load core modules
|
||||
source "$_MOLE_CORE_DIR/base.sh"
|
||||
source "$_MOLE_CORE_DIR/log.sh"
|
||||
|
||||
@@ -26,32 +26,55 @@ if [[ -f "$_MOLE_CORE_DIR/sudo.sh" ]]; then
|
||||
source "$_MOLE_CORE_DIR/sudo.sh"
|
||||
fi
|
||||
|
||||
# Update Mole via Homebrew
|
||||
# Args: $1 = current version
|
||||
# Update via Homebrew
|
||||
update_via_homebrew() {
|
||||
local current_version="$1"
|
||||
local temp_update temp_upgrade
|
||||
temp_update=$(mktemp_file "brew_update")
|
||||
temp_upgrade=$(mktemp_file "brew_upgrade")
|
||||
|
||||
# Set up trap for interruption (Ctrl+C) with inline cleanup
|
||||
trap 'stop_inline_spinner 2>/dev/null; rm -f "$temp_update" "$temp_upgrade" 2>/dev/null; echo ""; exit 130' INT TERM
|
||||
|
||||
# Update Homebrew
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Updating Homebrew..."
|
||||
else
|
||||
echo "Updating Homebrew..."
|
||||
fi
|
||||
brew update 2>&1 | grep -Ev "^(==>|Already up-to-date)" || true
|
||||
|
||||
brew update > "$temp_update" 2>&1 &
|
||||
local update_pid=$!
|
||||
wait $update_pid 2> /dev/null || true # Continue even if brew update fails
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
|
||||
# Upgrade Mole
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Upgrading Mole..."
|
||||
else
|
||||
echo "Upgrading Mole..."
|
||||
fi
|
||||
|
||||
brew upgrade mole > "$temp_upgrade" 2>&1 &
|
||||
local upgrade_pid=$!
|
||||
wait $upgrade_pid 2> /dev/null || true # Continue even if brew upgrade fails
|
||||
|
||||
local upgrade_output
|
||||
upgrade_output=$(brew upgrade mole 2>&1) || true
|
||||
upgrade_output=$(cat "$temp_upgrade")
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
|
||||
# Clear trap
|
||||
trap - INT TERM
|
||||
|
||||
# Cleanup temp files
|
||||
rm -f "$temp_update" "$temp_upgrade"
|
||||
|
||||
if echo "$upgrade_output" | grep -q "already installed"; then
|
||||
local installed_version
|
||||
installed_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}')
|
||||
@@ -71,12 +94,11 @@ update_via_homebrew() {
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Clear update cache
|
||||
# Clear update cache (suppress errors if cache doesn't exist or is locked)
|
||||
rm -f "$HOME/.cache/mole/version_check" "$HOME/.cache/mole/update_message" 2> /dev/null || true
|
||||
}
|
||||
|
||||
# Remove apps from Dock
|
||||
# Args: app paths to remove
|
||||
# Remove applications from Dock
|
||||
remove_apps_from_dock() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
return 0
|
||||
@@ -89,7 +111,7 @@ remove_apps_from_dock() {
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Execute Python helper to prune dock entries for the given app paths
|
||||
# Prune dock entries using Python helper
|
||||
python3 - "$@" << 'PY' 2> /dev/null || return 0
|
||||
import os
|
||||
import plistlib
|
||||
|
||||
@@ -29,11 +29,7 @@ fi
|
||||
# Path Validation
|
||||
# ============================================================================
|
||||
|
||||
# Validate path for deletion operations
|
||||
# Checks: non-empty, absolute, no traversal, no control chars, not system dir
|
||||
#
|
||||
# Args: $1 - path to validate
|
||||
# Returns: 0 if safe, 1 if unsafe
|
||||
# Validate path for deletion (absolute, no traversal, not system dir)
|
||||
validate_path_for_deletion() {
|
||||
local path="$1"
|
||||
|
||||
@@ -61,6 +57,13 @@ validate_path_for_deletion() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Allow deletion of coresymbolicationd cache (safe system cache that can be rebuilt)
|
||||
case "$path" in
|
||||
/System/Library/Caches/com.apple.coresymbolicationd/data | /System/Library/Caches/com.apple.coresymbolicationd/data/*)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check path isn't critical system directory
|
||||
case "$path" in
|
||||
/ | /bin | /sbin | /usr | /usr/bin | /usr/sbin | /etc | /var | /System | /System/* | /Library/Extensions)
|
||||
@@ -76,13 +79,7 @@ validate_path_for_deletion() {
|
||||
# Safe Removal Operations
|
||||
# ============================================================================
|
||||
|
||||
# Safe wrapper around rm -rf with path validation
|
||||
#
|
||||
# Args:
|
||||
# $1 - path to remove
|
||||
# $2 - silent mode (optional, default: false)
|
||||
#
|
||||
# Returns: 0 on success, 1 on failure
|
||||
# Safe wrapper around rm -rf with validation
|
||||
safe_remove() {
|
||||
local path="$1"
|
||||
local silent="${2:-false}"
|
||||
@@ -97,21 +94,37 @@ safe_remove() {
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Dry-run mode: log but don't delete
|
||||
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
|
||||
debug_log "[DRY RUN] Would remove: $path"
|
||||
return 0
|
||||
fi
|
||||
|
||||
debug_log "Removing: $path"
|
||||
|
||||
# Perform the deletion
|
||||
if rm -rf "$path" 2> /dev/null; then # SAFE: safe_remove implementation
|
||||
# Use || to capture the exit code so set -e won't abort on rm failures
|
||||
local error_msg
|
||||
local rm_exit=0
|
||||
error_msg=$(rm -rf "$path" 2>&1) || rm_exit=$? # safe_remove
|
||||
|
||||
if [[ $rm_exit -eq 0 ]]; then
|
||||
return 0
|
||||
else
|
||||
[[ "$silent" != "true" ]] && log_error "Failed to remove: $path"
|
||||
# Check if it's a permission error
|
||||
if [[ "$error_msg" == *"Permission denied"* ]] || [[ "$error_msg" == *"Operation not permitted"* ]]; then
|
||||
MOLE_PERMISSION_DENIED_COUNT=${MOLE_PERMISSION_DENIED_COUNT:-0}
|
||||
MOLE_PERMISSION_DENIED_COUNT=$((MOLE_PERMISSION_DENIED_COUNT + 1))
|
||||
export MOLE_PERMISSION_DENIED_COUNT
|
||||
debug_log "Permission denied: $path (may need Full Disk Access)"
|
||||
else
|
||||
[[ "$silent" != "true" ]] && log_error "Failed to remove: $path"
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Safe sudo remove with additional symlink protection
|
||||
#
|
||||
# Args: $1 - path to remove
|
||||
# Returns: 0 on success, 1 on failure
|
||||
# Safe sudo removal with symlink protection
|
||||
safe_sudo_remove() {
|
||||
local path="$1"
|
||||
|
||||
@@ -132,6 +145,12 @@ safe_sudo_remove() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Dry-run mode: log but don't delete
|
||||
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
|
||||
debug_log "[DRY RUN] Would remove (sudo): $path"
|
||||
return 0
|
||||
fi
|
||||
|
||||
debug_log "Removing (sudo): $path"
|
||||
|
||||
# Perform the deletion
|
||||
@@ -147,15 +166,7 @@ safe_sudo_remove() {
|
||||
# Safe Find and Delete Operations
|
||||
# ============================================================================
|
||||
|
||||
# Safe find delete with depth limit and validation
|
||||
#
|
||||
# Args:
|
||||
# $1 - base directory
|
||||
# $2 - file pattern (e.g., "*.log")
|
||||
# $3 - age in days (0 = all files, default: 7)
|
||||
# $4 - type filter ("f" or "d", default: "f")
|
||||
#
|
||||
# Returns: 0 on success, 1 on failure
|
||||
# Safe file discovery and deletion with depth and age limits
|
||||
safe_find_delete() {
|
||||
local base_dir="$1"
|
||||
local pattern="$2"
|
||||
@@ -181,44 +192,38 @@ safe_find_delete() {
|
||||
|
||||
debug_log "Finding in $base_dir: $pattern (age: ${age_days}d, type: $type_filter)"
|
||||
|
||||
# Execute find with safety limits (maxdepth 5 covers most app cache structures)
|
||||
if [[ "$age_days" -eq 0 ]]; then
|
||||
# Delete all matching files without time restriction
|
||||
command find "$base_dir" \
|
||||
-maxdepth 5 \
|
||||
-name "$pattern" \
|
||||
-type "$type_filter" \
|
||||
-delete 2> /dev/null || true
|
||||
else
|
||||
# Delete files older than age_days
|
||||
command find "$base_dir" \
|
||||
-maxdepth 5 \
|
||||
-name "$pattern" \
|
||||
-type "$type_filter" \
|
||||
-mtime "+$age_days" \
|
||||
-delete 2> /dev/null || true
|
||||
local find_args=("-maxdepth" "5" "-name" "$pattern" "-type" "$type_filter")
|
||||
if [[ "$age_days" -gt 0 ]]; then
|
||||
find_args+=("-mtime" "+$age_days")
|
||||
fi
|
||||
|
||||
# Iterate results to respect should_protect_path when available
|
||||
while IFS= read -r -d '' match; do
|
||||
if command -v should_protect_path > /dev/null 2>&1; then
|
||||
if should_protect_path "$match"; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
safe_remove "$match" true || true
|
||||
done < <(command find "$base_dir" "${find_args[@]}" -print0 2> /dev/null || true)
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Safe sudo find delete (same as safe_find_delete but with sudo)
|
||||
#
|
||||
# Args: same as safe_find_delete
|
||||
# Returns: 0 on success, 1 on failure
|
||||
# Safe sudo discovery and deletion
|
||||
safe_sudo_find_delete() {
|
||||
local base_dir="$1"
|
||||
local pattern="$2"
|
||||
local age_days="${3:-7}"
|
||||
local type_filter="${4:-f}"
|
||||
|
||||
# Validate base directory
|
||||
if [[ ! -d "$base_dir" ]]; then
|
||||
log_error "Directory does not exist: $base_dir"
|
||||
return 1
|
||||
# Validate base directory (use sudo for permission-restricted dirs)
|
||||
if ! sudo test -d "$base_dir" 2> /dev/null; then
|
||||
debug_log "Directory does not exist (skipping): $base_dir"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -L "$base_dir" ]]; then
|
||||
if sudo test -L "$base_dir" 2> /dev/null; then
|
||||
log_error "Refusing to search symlinked directory: $base_dir"
|
||||
return 1
|
||||
fi
|
||||
@@ -231,22 +236,21 @@ safe_sudo_find_delete() {
|
||||
|
||||
debug_log "Finding (sudo) in $base_dir: $pattern (age: ${age_days}d, type: $type_filter)"
|
||||
|
||||
# Execute find with sudo
|
||||
if [[ "$age_days" -eq 0 ]]; then
|
||||
sudo find "$base_dir" \
|
||||
-maxdepth 5 \
|
||||
-name "$pattern" \
|
||||
-type "$type_filter" \
|
||||
-delete 2> /dev/null || true
|
||||
else
|
||||
sudo find "$base_dir" \
|
||||
-maxdepth 5 \
|
||||
-name "$pattern" \
|
||||
-type "$type_filter" \
|
||||
-mtime "+$age_days" \
|
||||
-delete 2> /dev/null || true
|
||||
local find_args=("-maxdepth" "5" "-name" "$pattern" "-type" "$type_filter")
|
||||
if [[ "$age_days" -gt 0 ]]; then
|
||||
find_args+=("-mtime" "+$age_days")
|
||||
fi
|
||||
|
||||
# Iterate results to respect should_protect_path when available
|
||||
while IFS= read -r -d '' match; do
|
||||
if command -v should_protect_path > /dev/null 2>&1; then
|
||||
if should_protect_path "$match"; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
safe_sudo_remove "$match" || true
|
||||
done < <(sudo find "$base_dir" "${find_args[@]}" -print0 2> /dev/null || true)
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -254,11 +258,7 @@ safe_sudo_find_delete() {
|
||||
# Size Calculation
|
||||
# ============================================================================
|
||||
|
||||
# Get path size in kilobytes
|
||||
# Uses timeout protection to prevent du from hanging on large directories
|
||||
#
|
||||
# Args: $1 - path
|
||||
# Returns: size in KB (0 if path doesn't exist)
|
||||
# Get path size in KB (returns 0 if not found)
|
||||
get_path_size_kb() {
|
||||
local path="$1"
|
||||
[[ -z "$path" || ! -e "$path" ]] && {
|
||||
@@ -266,15 +266,20 @@ get_path_size_kb() {
|
||||
return
|
||||
}
|
||||
# Direct execution without timeout overhead - critical for performance in loops
|
||||
# Use || echo 0 to ensure failure in du (e.g. permission error) doesn't exit script under set -e
|
||||
# Pipefail would normally cause the pipeline to fail if du fails, but || handle catches it.
|
||||
local size
|
||||
size=$(command du -sk "$path" 2> /dev/null | awk '{print $1}')
|
||||
echo "${size:-0}"
|
||||
size=$(command du -sk "$path" 2> /dev/null | awk 'NR==1 {print $1; exit}' || true)
|
||||
|
||||
# Ensure size is a valid number (fix for non-numeric du output)
|
||||
if [[ "$size" =~ ^[0-9]+$ ]]; then
|
||||
echo "$size"
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
# Calculate total size of multiple paths
|
||||
#
|
||||
# Args: $1 - newline-separated list of paths
|
||||
# Returns: total size in KB
|
||||
# Calculate total size for multiple paths
|
||||
calculate_total_size() {
|
||||
local files="$1"
|
||||
local total_kb=0
|
||||
|
||||
@@ -25,15 +25,14 @@ readonly LOG_FILE="${HOME}/.config/mole/mole.log"
|
||||
readonly DEBUG_LOG_FILE="${HOME}/.config/mole/mole_debug_session.log"
|
||||
readonly LOG_MAX_SIZE_DEFAULT=1048576 # 1MB
|
||||
|
||||
# Ensure log directory exists
|
||||
mkdir -p "$(dirname "$LOG_FILE")" 2> /dev/null || true
|
||||
# Ensure log directory and file exist with correct ownership
|
||||
ensure_user_file "$LOG_FILE"
|
||||
|
||||
# ============================================================================
|
||||
# Log Rotation
|
||||
# ============================================================================
|
||||
|
||||
# Rotate log file if it exceeds max size
|
||||
# Called once at module load, not per log entry
|
||||
# Rotate log file if it exceeds maximum size
|
||||
rotate_log_once() {
|
||||
# Skip if already checked this session
|
||||
[[ -n "${MOLE_LOG_ROTATED:-}" ]] && return 0
|
||||
@@ -42,7 +41,7 @@ rotate_log_once() {
|
||||
local max_size="${MOLE_MAX_LOG_SIZE:-$LOG_MAX_SIZE_DEFAULT}"
|
||||
if [[ -f "$LOG_FILE" ]] && [[ $(get_file_size "$LOG_FILE") -gt "$max_size" ]]; then
|
||||
mv "$LOG_FILE" "${LOG_FILE}.old" 2> /dev/null || true
|
||||
touch "$LOG_FILE" 2> /dev/null || true
|
||||
ensure_user_file "$LOG_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -51,7 +50,6 @@ rotate_log_once() {
|
||||
# ============================================================================
|
||||
|
||||
# Log informational message
|
||||
# Args: $1 - message
|
||||
log_info() {
|
||||
echo -e "${BLUE}$1${NC}"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
@@ -62,7 +60,6 @@ log_info() {
|
||||
}
|
||||
|
||||
# Log success message
|
||||
# Args: $1 - message
|
||||
log_success() {
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $1"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
@@ -73,7 +70,6 @@ log_success() {
|
||||
}
|
||||
|
||||
# Log warning message
|
||||
# Args: $1 - message
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}$1${NC}"
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
@@ -84,9 +80,8 @@ log_warning() {
|
||||
}
|
||||
|
||||
# Log error message
|
||||
# Args: $1 - message
|
||||
log_error() {
|
||||
echo -e "${RED}${ICON_ERROR}${NC} $1" >&2
|
||||
echo -e "${YELLOW}${ICON_ERROR}${NC} $1" >&2
|
||||
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
echo "[$timestamp] ERROR: $1" >> "$LOG_FILE" 2> /dev/null || true
|
||||
if [[ "${MO_DEBUG:-}" == "1" ]]; then
|
||||
@@ -94,8 +89,7 @@ log_error() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Debug logging - only shown when MO_DEBUG=1
|
||||
# Args: $@ - debug message components
|
||||
# Debug logging (active when MO_DEBUG=1)
|
||||
debug_log() {
|
||||
if [[ "${MO_DEBUG:-}" == "1" ]]; then
|
||||
echo -e "${GRAY}[DEBUG]${NC} $*" >&2
|
||||
@@ -110,6 +104,7 @@ log_system_info() {
|
||||
export MOLE_SYS_INFO_LOGGED=1
|
||||
|
||||
# Reset debug log file for this new session
|
||||
ensure_user_file "$DEBUG_LOG_FILE"
|
||||
: > "$DEBUG_LOG_FILE"
|
||||
|
||||
# Start block in debug log file
|
||||
@@ -143,15 +138,12 @@ log_system_info() {
|
||||
# Command Execution Wrappers
|
||||
# ============================================================================
|
||||
|
||||
# Run command silently, ignore errors
|
||||
# Args: $@ - command and arguments
|
||||
# Run command silently (ignore errors)
|
||||
run_silent() {
|
||||
"$@" > /dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
# Run command with error logging
|
||||
# Args: $@ - command and arguments
|
||||
# Returns: command exit code
|
||||
run_logged() {
|
||||
local cmd="$1"
|
||||
# Log to main file, and also to debug file if enabled
|
||||
@@ -173,8 +165,7 @@ run_logged() {
|
||||
# Formatted Output
|
||||
# ============================================================================
|
||||
|
||||
# Print formatted summary block with heading and details
|
||||
# Args: $1=status (ignored), $2=heading, $@=details
|
||||
# Print formatted summary block
|
||||
print_summary_block() {
|
||||
local heading=""
|
||||
local -a details=()
|
||||
|
||||
@@ -16,6 +16,7 @@ check_touchid_support() {
|
||||
return 1
|
||||
}
|
||||
|
||||
# Detect clamshell mode (lid closed)
|
||||
is_clamshell_mode() {
|
||||
# ioreg is missing (not macOS) -> treat as lid open
|
||||
if ! command -v ioreg > /dev/null 2>&1; then
|
||||
@@ -115,15 +116,23 @@ request_sudo_access() {
|
||||
# Check if in clamshell mode - if yes, skip Touch ID entirely
|
||||
if is_clamshell_mode; then
|
||||
echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg}"
|
||||
_request_password "$tty_path"
|
||||
return $?
|
||||
if _request_password "$tty_path"; then
|
||||
# Clear all prompt lines (use safe clearing method)
|
||||
safe_clear_lines 3 "$tty_path"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Not in clamshell mode - try Touch ID if configured
|
||||
if ! check_touchid_support; then
|
||||
echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg}"
|
||||
_request_password "$tty_path"
|
||||
return $?
|
||||
if _request_password "$tty_path"; then
|
||||
# Clear all prompt lines (use safe clearing method)
|
||||
safe_clear_lines 3 "$tty_path"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Touch ID is available and not in clamshell mode
|
||||
@@ -142,7 +151,8 @@ request_sudo_access() {
|
||||
wait "$sudo_pid" 2> /dev/null
|
||||
local exit_code=$?
|
||||
if [[ $exit_code -eq 0 ]] && sudo -n true 2> /dev/null; then
|
||||
# Touch ID succeeded
|
||||
# Touch ID succeeded - clear the prompt line
|
||||
safe_clear_lines 1 "$tty_path"
|
||||
return 0
|
||||
fi
|
||||
# Touch ID failed or cancelled
|
||||
@@ -168,10 +178,15 @@ request_sudo_access() {
|
||||
sleep 1
|
||||
|
||||
# Clear any leftover prompts on the screen
|
||||
printf "\r\033[2K" > "$tty_path"
|
||||
safe_clear_line "$tty_path"
|
||||
|
||||
# Now use our password input (this should not trigger Touch ID again)
|
||||
_request_password "$tty_path"
|
||||
if _request_password "$tty_path"; then
|
||||
# Clear all prompt lines (use safe clearing method)
|
||||
safe_clear_lines 3 "$tty_path"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
@@ -182,8 +197,7 @@ request_sudo_access() {
|
||||
MOLE_SUDO_KEEPALIVE_PID=""
|
||||
MOLE_SUDO_ESTABLISHED="false"
|
||||
|
||||
# Start sudo keepalive background process
|
||||
# Returns: PID of keepalive process
|
||||
# Start sudo keepalive
|
||||
_start_sudo_keepalive() {
|
||||
# Start background keepalive process with all outputs redirected
|
||||
# This is critical: command substitution waits for all file descriptors to close
|
||||
@@ -212,8 +226,7 @@ _start_sudo_keepalive() {
|
||||
echo $pid
|
||||
}
|
||||
|
||||
# Stop sudo keepalive process
|
||||
# Args: $1 - PID of keepalive process
|
||||
# Stop sudo keepalive
|
||||
_stop_sudo_keepalive() {
|
||||
local pid="${1:-}"
|
||||
if [[ -n "$pid" ]]; then
|
||||
@@ -227,8 +240,7 @@ has_sudo_session() {
|
||||
sudo -n true 2> /dev/null
|
||||
}
|
||||
|
||||
# Request sudo access (wrapper for common.sh function)
|
||||
# Args: $1 - prompt message
|
||||
# Request administrative access
|
||||
request_sudo() {
|
||||
local prompt_msg="${1:-Admin access required}"
|
||||
|
||||
@@ -244,8 +256,7 @@ request_sudo() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Ensure sudo session is established with keepalive
|
||||
# Args: $1 - prompt message
|
||||
# Maintain active sudo session with keepalive
|
||||
ensure_sudo_session() {
|
||||
local prompt="${1:-Admin access required}"
|
||||
|
||||
@@ -287,8 +298,7 @@ register_sudo_cleanup() {
|
||||
trap stop_sudo_session EXIT INT TERM
|
||||
}
|
||||
|
||||
# Check if sudo is likely needed for given operations
|
||||
# Args: $@ - list of operations to check
|
||||
# Predict if operation requires administrative access
|
||||
will_need_sudo() {
|
||||
local -a operations=("$@")
|
||||
for op in "${operations[@]}"; do
|
||||
|
||||
143
lib/core/ui.sh
143
lib/core/ui.sh
@@ -17,10 +17,7 @@ clear_screen() { printf '\033[2J\033[H'; }
|
||||
hide_cursor() { [[ -t 1 ]] && printf '\033[?25l' >&2 || true; }
|
||||
show_cursor() { [[ -t 1 ]] && printf '\033[?25h' >&2 || true; }
|
||||
|
||||
# Calculate display width of a string (CJK characters count as 2)
|
||||
# Args: $1 - string to measure
|
||||
# Returns: display width
|
||||
# Note: Works correctly even when LC_ALL=C is set
|
||||
# Calculate display width (CJK characters count as 2)
|
||||
get_display_width() {
|
||||
local str="$1"
|
||||
|
||||
@@ -63,11 +60,26 @@ get_display_width() {
|
||||
local padding=$((extra_bytes / 2))
|
||||
width=$((char_count + padding))
|
||||
|
||||
# Adjust for zero-width joiners and emoji variation selectors (common in filenames/emojis)
|
||||
# These characters add bytes but no visible width; subtract their count if present.
|
||||
local zwj=$'\u200d' # zero-width joiner
|
||||
local vs16=$'\ufe0f' # emoji variation selector
|
||||
local zero_width=0
|
||||
|
||||
local without_zwj=${str//$zwj/}
|
||||
zero_width=$((zero_width + (char_count - ${#without_zwj})))
|
||||
|
||||
local without_vs=${str//$vs16/}
|
||||
zero_width=$((zero_width + (char_count - ${#without_vs})))
|
||||
|
||||
if ((zero_width > 0 && width > zero_width)); then
|
||||
width=$((width - zero_width))
|
||||
fi
|
||||
|
||||
echo "$width"
|
||||
}
|
||||
|
||||
# Truncate string by display width (handles CJK correctly)
|
||||
# Args: $1 - string, $2 - max display width
|
||||
# Truncate string by display width (handles CJK)
|
||||
truncate_by_display_width() {
|
||||
local str="$1"
|
||||
local max_width="$2"
|
||||
@@ -140,7 +152,7 @@ truncate_by_display_width() {
|
||||
echo "${truncated}..."
|
||||
}
|
||||
|
||||
# Keyboard input - read single keypress
|
||||
# Read single keyboard input
|
||||
read_key() {
|
||||
local key rest read_status
|
||||
IFS= read -r -s -n 1 key
|
||||
@@ -222,7 +234,7 @@ drain_pending_input() {
|
||||
done
|
||||
}
|
||||
|
||||
# Menu display
|
||||
# Format menu option display
|
||||
show_menu_option() {
|
||||
local number="$1"
|
||||
local text="$2"
|
||||
@@ -235,53 +247,77 @@ show_menu_option() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Inline spinner
|
||||
# Background spinner implementation
|
||||
INLINE_SPINNER_PID=""
|
||||
INLINE_SPINNER_STOP_FILE=""
|
||||
|
||||
start_inline_spinner() {
|
||||
stop_inline_spinner 2> /dev/null || true
|
||||
local message="$1"
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
# Create unique stop flag file for this spinner instance
|
||||
INLINE_SPINNER_STOP_FILE="${TMPDIR:-/tmp}/mole_spinner_$$_$RANDOM.stop"
|
||||
|
||||
(
|
||||
trap 'exit 0' TERM INT EXIT
|
||||
local stop_file="$INLINE_SPINNER_STOP_FILE"
|
||||
local chars
|
||||
chars="$(mo_spinner_chars)"
|
||||
[[ -z "$chars" ]] && chars="|/-\\"
|
||||
local i=0
|
||||
while true; do
|
||||
|
||||
# Cooperative exit: check for stop file instead of relying on signals
|
||||
while [[ ! -f "$stop_file" ]]; do
|
||||
local c="${chars:$((i % ${#chars})):1}"
|
||||
# Output to stderr to avoid interfering with stdout
|
||||
printf "\r${MOLE_SPINNER_PREFIX:-}${BLUE}%s${NC} %s" "$c" "$message" >&2 || exit 0
|
||||
printf "\r${MOLE_SPINNER_PREFIX:-}${BLUE}%s${NC} %s" "$c" "$message" >&2 || break
|
||||
((i++))
|
||||
sleep 0.1
|
||||
done
|
||||
|
||||
# Clean up stop file before exiting
|
||||
rm -f "$stop_file" 2> /dev/null || true
|
||||
exit 0
|
||||
) &
|
||||
INLINE_SPINNER_PID=$!
|
||||
disown 2> /dev/null || true
|
||||
else
|
||||
echo -n " ${BLUE}|${NC} $message" >&2
|
||||
echo -n " ${BLUE}|${NC} $message" >&2 || true
|
||||
fi
|
||||
}
|
||||
|
||||
stop_inline_spinner() {
|
||||
if [[ -n "$INLINE_SPINNER_PID" ]]; then
|
||||
# Try graceful TERM first, then force KILL if needed
|
||||
if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then
|
||||
kill -TERM "$INLINE_SPINNER_PID" 2> /dev/null || true
|
||||
sleep 0.05 2> /dev/null || true
|
||||
# Force kill if still running
|
||||
if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then
|
||||
kill -KILL "$INLINE_SPINNER_PID" 2> /dev/null || true
|
||||
fi
|
||||
# Cooperative stop: create stop file to signal spinner to exit
|
||||
if [[ -n "$INLINE_SPINNER_STOP_FILE" ]]; then
|
||||
touch "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true
|
||||
fi
|
||||
|
||||
# Wait briefly for cooperative exit
|
||||
local wait_count=0
|
||||
while kill -0 "$INLINE_SPINNER_PID" 2> /dev/null && [[ $wait_count -lt 5 ]]; do
|
||||
sleep 0.05 2> /dev/null || true
|
||||
((wait_count++))
|
||||
done
|
||||
|
||||
# Only use SIGKILL as last resort if process is stuck
|
||||
if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then
|
||||
kill -KILL "$INLINE_SPINNER_PID" 2> /dev/null || true
|
||||
fi
|
||||
|
||||
wait "$INLINE_SPINNER_PID" 2> /dev/null || true
|
||||
|
||||
# Cleanup
|
||||
rm -f "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true
|
||||
INLINE_SPINNER_PID=""
|
||||
INLINE_SPINNER_STOP_FILE=""
|
||||
|
||||
# Clear the line - use \033[2K to clear entire line, not just to end
|
||||
[[ -t 1 ]] && printf "\r\033[2K" >&2
|
||||
[[ -t 1 ]] && printf "\r\033[2K" >&2 || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Wrapper for running commands with spinner
|
||||
# Run command with a terminal spinner
|
||||
with_spinner() {
|
||||
local msg="$1"
|
||||
shift || true
|
||||
@@ -302,9 +338,7 @@ mo_spinner_chars() {
|
||||
printf "%s" "$chars"
|
||||
}
|
||||
|
||||
# Format last used time for display
|
||||
# Args: $1 = last used string (e.g., "3 days ago", "Today", "Never")
|
||||
# Returns: Compact version (e.g., "3d ago", "Today", "Never")
|
||||
# Format relative time for compact display (e.g., 3d ago)
|
||||
format_last_used_summary() {
|
||||
local value="$1"
|
||||
|
||||
@@ -341,3 +375,60 @@ format_last_used_summary() {
|
||||
fi
|
||||
echo "$value"
|
||||
}
|
||||
|
||||
# Check if terminal has Full Disk Access
|
||||
# Returns 0 if FDA is granted, 1 if denied, 2 if unknown
|
||||
has_full_disk_access() {
|
||||
# Cache the result to avoid repeated checks
|
||||
if [[ -n "${MOLE_HAS_FDA:-}" ]]; then
|
||||
if [[ "$MOLE_HAS_FDA" == "1" ]]; then
|
||||
return 0
|
||||
elif [[ "$MOLE_HAS_FDA" == "unknown" ]]; then
|
||||
return 2
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test access to protected directories that require FDA
|
||||
# Strategy: Try to access directories that are commonly protected
|
||||
# If ANY of them are accessible, we likely have FDA
|
||||
# If ALL fail, we definitely don't have FDA
|
||||
local -a protected_dirs=(
|
||||
"$HOME/Library/Safari/LocalStorage"
|
||||
"$HOME/Library/Mail/V10"
|
||||
"$HOME/Library/Messages/chat.db"
|
||||
)
|
||||
|
||||
local accessible_count=0
|
||||
local tested_count=0
|
||||
|
||||
for test_path in "${protected_dirs[@]}"; do
|
||||
# Only test when the protected path exists
|
||||
if [[ -e "$test_path" ]]; then
|
||||
tested_count=$((tested_count + 1))
|
||||
# Try to stat the ACTUAL protected path - this requires FDA
|
||||
if stat "$test_path" > /dev/null 2>&1; then
|
||||
accessible_count=$((accessible_count + 1))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Three possible outcomes:
|
||||
# 1. tested_count = 0: Can't determine (test paths don't exist) → unknown
|
||||
# 2. tested_count > 0 && accessible_count > 0: Has FDA → yes
|
||||
# 3. tested_count > 0 && accessible_count = 0: No FDA → no
|
||||
if [[ $tested_count -eq 0 ]]; then
|
||||
# Can't determine - test paths don't exist, treat as unknown
|
||||
export MOLE_HAS_FDA="unknown"
|
||||
return 2
|
||||
elif [[ $accessible_count -gt 0 ]]; then
|
||||
# At least one path is accessible → has FDA
|
||||
export MOLE_HAS_FDA=1
|
||||
return 0
|
||||
else
|
||||
# Tested paths exist but not accessible → no FDA
|
||||
export MOLE_HAS_FDA=0
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -10,9 +10,13 @@ show_suggestions() {
|
||||
local can_auto_fix=false
|
||||
local -a auto_fix_items=()
|
||||
local -a manual_items=()
|
||||
local skip_security_autofix=false
|
||||
if [[ "${MOLE_SECURITY_FIXES_SHOWN:-}" == "true" ]]; then
|
||||
skip_security_autofix=true
|
||||
fi
|
||||
|
||||
# Security suggestions
|
||||
if [[ -n "${FIREWALL_DISABLED:-}" && "${FIREWALL_DISABLED}" == "true" ]]; then
|
||||
if [[ "$skip_security_autofix" == "false" && -n "${FIREWALL_DISABLED:-}" && "${FIREWALL_DISABLED}" == "true" ]]; then
|
||||
auto_fix_items+=("Enable Firewall for better security")
|
||||
has_suggestions=true
|
||||
can_auto_fix=true
|
||||
@@ -24,7 +28,7 @@ show_suggestions() {
|
||||
fi
|
||||
|
||||
# Configuration suggestions
|
||||
if [[ -n "${TOUCHID_NOT_CONFIGURED:-}" && "${TOUCHID_NOT_CONFIGURED}" == "true" ]]; then
|
||||
if [[ "$skip_security_autofix" == "false" && -n "${TOUCHID_NOT_CONFIGURED:-}" && "${TOUCHID_NOT_CONFIGURED}" == "true" ]]; then
|
||||
auto_fix_items+=("Enable Touch ID for sudo")
|
||||
has_suggestions=true
|
||||
can_auto_fix=true
|
||||
@@ -94,7 +98,7 @@ ask_for_auto_fix() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Auto-fix issues now? ${GRAY}Enter confirm / ESC cancel${NC}: "
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Auto-fix issues now? ${GRAY}Enter confirm / Space cancel${NC}: "
|
||||
|
||||
local key
|
||||
if ! key=$(read_key); then
|
||||
@@ -132,7 +136,7 @@ perform_auto_fix() {
|
||||
# Fix Firewall
|
||||
if [[ -n "${FIREWALL_DISABLED:-}" && "${FIREWALL_DISABLED}" == "true" ]]; then
|
||||
echo -e "${BLUE}Enabling Firewall...${NC}"
|
||||
if sudo defaults write /Library/Preferences/com.apple.alf globalstate -int 1 2> /dev/null; then
|
||||
if sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate on > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓${NC} Firewall enabled"
|
||||
((fixed_count++))
|
||||
fixed_items+=("Firewall enabled")
|
||||
|
||||
117
lib/manage/purge_paths.sh
Normal file
117
lib/manage/purge_paths.sh
Normal file
@@ -0,0 +1,117 @@
|
||||
#!/bin/bash
|
||||
# Purge paths management functionality
|
||||
# Opens config file for editing and shows current status
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Get script directory and source dependencies
|
||||
_MOLE_MANAGE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$_MOLE_MANAGE_DIR/../core/common.sh"
|
||||
# Only source project.sh if not already loaded (has readonly vars)
|
||||
if [[ -z "${PURGE_TARGETS:-}" ]]; then
|
||||
source "$_MOLE_MANAGE_DIR/../clean/project.sh"
|
||||
fi
|
||||
|
||||
# Config file path (use :- to avoid re-declaration if already set)
|
||||
PURGE_PATHS_CONFIG="${PURGE_PATHS_CONFIG:-$HOME/.config/mole/purge_paths}"
|
||||
|
||||
# Ensure config file exists with helpful template
|
||||
ensure_config_template() {
|
||||
if [[ ! -f "$PURGE_PATHS_CONFIG" ]]; then
|
||||
ensure_user_dir "$(dirname "$PURGE_PATHS_CONFIG")"
|
||||
cat > "$PURGE_PATHS_CONFIG" << 'EOF'
|
||||
# Mole Purge Paths - Directories to scan for project artifacts
|
||||
# Add one path per line (supports ~ for home directory)
|
||||
# Delete all paths or this file to use defaults
|
||||
#
|
||||
# Example:
|
||||
# ~/Documents/MyProjects
|
||||
# ~/Work/ClientA
|
||||
# ~/Work/ClientB
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# Main management function
|
||||
manage_purge_paths() {
|
||||
ensure_config_template
|
||||
|
||||
local display_config="${PURGE_PATHS_CONFIG/#$HOME/~}"
|
||||
|
||||
# Clear screen
|
||||
if [[ -t 1 ]]; then
|
||||
printf '\033[2J\033[H'
|
||||
fi
|
||||
|
||||
echo -e "${PURPLE_BOLD}Purge Paths Configuration${NC}"
|
||||
echo ""
|
||||
|
||||
# Show current status
|
||||
echo -e "${YELLOW}Current Scan Paths:${NC}"
|
||||
|
||||
# Reload config
|
||||
load_purge_config
|
||||
|
||||
if [[ ${#PURGE_SEARCH_PATHS[@]} -gt 0 ]]; then
|
||||
for path in "${PURGE_SEARCH_PATHS[@]}"; do
|
||||
local display_path="${path/#$HOME/~}"
|
||||
if [[ -d "$path" ]]; then
|
||||
echo -e " ${GREEN}✓${NC} $display_path"
|
||||
else
|
||||
echo -e " ${GRAY}○${NC} $display_path ${GRAY}(not found)${NC}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Check if using custom config
|
||||
local custom_count=0
|
||||
if [[ -f "$PURGE_PATHS_CONFIG" ]]; then
|
||||
while IFS= read -r line; do
|
||||
line="${line#"${line%%[![:space:]]*}"}"
|
||||
line="${line%"${line##*[![:space:]]}"}"
|
||||
[[ -z "$line" || "$line" =~ ^# ]] && continue
|
||||
((custom_count++))
|
||||
done < "$PURGE_PATHS_CONFIG"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [[ $custom_count -gt 0 ]]; then
|
||||
echo -e "${GRAY}Using custom config with $custom_count path(s)${NC}"
|
||||
else
|
||||
echo -e "${GRAY}Using ${#DEFAULT_PURGE_SEARCH_PATHS[@]} default paths${NC}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}Default Paths:${NC}"
|
||||
for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do
|
||||
echo -e " ${GRAY}-${NC} ${path/#$HOME/~}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo -e "${YELLOW}Config File:${NC} $display_config"
|
||||
echo ""
|
||||
|
||||
# Open in editor
|
||||
local editor="${EDITOR:-${VISUAL:-vim}}"
|
||||
echo -e "Opening in ${CYAN}$editor${NC}..."
|
||||
echo -e "${GRAY}Save and exit to apply changes. Leave empty to use defaults.${NC}"
|
||||
echo ""
|
||||
|
||||
# Wait for user to read
|
||||
sleep 1
|
||||
|
||||
# Open editor
|
||||
"$editor" "$PURGE_PATHS_CONFIG"
|
||||
|
||||
# Reload and show updated status
|
||||
load_purge_config
|
||||
|
||||
echo ""
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Configuration updated"
|
||||
echo -e "${GRAY}Run 'mo purge' to clean with new paths${NC}"
|
||||
echo ""
|
||||
}
|
||||
|
||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
manage_purge_paths
|
||||
fi
|
||||
@@ -76,9 +76,9 @@ ask_for_updates() {
|
||||
echo -e "$item"
|
||||
done
|
||||
echo ""
|
||||
|
||||
# If Mole has updates, offer to update it
|
||||
# If only Mole is relevant for automation, prompt just for Mole
|
||||
if [[ "${MOLE_UPDATE_AVAILABLE:-}" == "true" ]]; then
|
||||
echo ""
|
||||
echo -ne "${YELLOW}Update Mole now?${NC} ${GRAY}Enter confirm / ESC cancel${NC}: "
|
||||
|
||||
local key
|
||||
@@ -92,55 +92,33 @@ ask_for_updates() {
|
||||
echo "yes"
|
||||
echo ""
|
||||
return 0
|
||||
else
|
||||
echo "skip"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# For other updates, just show instructions
|
||||
# (Mole update check above handles the return 0 case, so we only get here if no Mole update)
|
||||
if [[ "${BREW_OUTDATED_COUNT:-0}" -gt 0 ]]; then
|
||||
echo -e "${YELLOW}Tip:${NC} Run ${GREEN}brew upgrade${NC} to update Homebrew packages"
|
||||
fi
|
||||
if [[ "${APPSTORE_UPDATE_COUNT:-0}" -gt 0 ]]; then
|
||||
echo -e "${YELLOW}Tip:${NC} Open ${BLUE}App Store${NC} to update apps"
|
||||
fi
|
||||
if [[ "${MACOS_UPDATE_AVAILABLE:-}" == "true" ]]; then
|
||||
echo -e "${YELLOW}Tip:${NC} Open ${BLUE}System Settings${NC} to update macOS"
|
||||
fi
|
||||
echo ""
|
||||
echo -e "${YELLOW}Tip:${NC} Homebrew: brew upgrade / brew upgrade --cask"
|
||||
echo -e "${YELLOW}Tip:${NC} App Store: open App Store → Updates"
|
||||
echo -e "${YELLOW}Tip:${NC} macOS: System Settings → General → Software Update"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Perform all pending updates
|
||||
# Returns: 0 if all succeeded, 1 if some failed
|
||||
perform_updates() {
|
||||
# Only handle Mole updates here
|
||||
# Other updates are now informational-only in ask_for_updates
|
||||
|
||||
# Only handle Mole updates here; Homebrew/App Store/macOS are manual (tips shown in ask_for_updates)
|
||||
local updated_count=0
|
||||
local total_count=0
|
||||
|
||||
# Update Mole
|
||||
if [[ -n "${MOLE_UPDATE_AVAILABLE:-}" && "${MOLE_UPDATE_AVAILABLE}" == "true" ]]; then
|
||||
echo -e "${BLUE}Updating Mole...${NC}"
|
||||
# Try to find mole executable
|
||||
local mole_bin="${SCRIPT_DIR}/../../mole"
|
||||
[[ ! -f "$mole_bin" ]] && mole_bin=$(command -v mole 2> /dev/null || echo "")
|
||||
|
||||
if [[ -x "$mole_bin" ]]; then
|
||||
# We use exec here or just run it?
|
||||
# If we run 'mole update', it replaces the script.
|
||||
# Since this function is part of a sourced script, replacing the file on disk is risky while running.
|
||||
# However, 'mole update' script usually handles this by downloading to a temp file and moving it.
|
||||
# But the shell might not like the file changing under it.
|
||||
# The original code ran it this way, so we assume it's safe enough or handled by mole update implementation.
|
||||
|
||||
if "$mole_bin" update 2>&1 | grep -qE "(Updated|latest version)"; then
|
||||
echo -e "${GREEN}✓${NC} Mole updated"
|
||||
reset_mole_cache
|
||||
updated_count=1
|
||||
((updated_count++))
|
||||
else
|
||||
echo -e "${RED}✗${NC} Mole update failed"
|
||||
fi
|
||||
@@ -148,11 +126,17 @@ perform_updates() {
|
||||
echo -e "${RED}✗${NC} Mole executable not found"
|
||||
fi
|
||||
echo ""
|
||||
total_count=1
|
||||
fi
|
||||
|
||||
if [[ $updated_count -gt 0 ]]; then
|
||||
if [[ $total_count -eq 0 ]]; then
|
||||
echo -e "${GRAY}No updates to perform${NC}"
|
||||
return 0
|
||||
elif [[ $updated_count -eq $total_count ]]; then
|
||||
echo -e "${GREEN}All updates completed (${updated_count}/${total_count})${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}Update failed (${updated_count}/${total_count})${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ save_whitelist_patterns() {
|
||||
header_text="# Mole Whitelist - Protected paths won't be deleted\n# Default protections: Playwright browsers, HuggingFace models, Maven repo, Ollama models, Surge Mac, R renv, Finder metadata\n# Add one pattern per line to keep items safe."
|
||||
fi
|
||||
|
||||
mkdir -p "$(dirname "$config_file")"
|
||||
ensure_user_file "$config_file"
|
||||
|
||||
echo -e "$header_text" > "$config_file"
|
||||
|
||||
@@ -81,6 +81,7 @@ Apple Mail cache|$HOME/Library/Caches/com.apple.mail/*|system_cache
|
||||
Gradle build cache (Android Studio, Gradle projects)|$HOME/.gradle/caches/*|ide_cache
|
||||
Gradle daemon processes cache|$HOME/.gradle/daemon/*|ide_cache
|
||||
Xcode DerivedData (build outputs, indexes)|$HOME/Library/Developer/Xcode/DerivedData/*|ide_cache
|
||||
Xcode archives (built app packages)|$HOME/Library/Developer/Xcode/Archives/*|ide_cache
|
||||
Xcode internal cache files|$HOME/Library/Caches/com.apple.dt.Xcode/*|ide_cache
|
||||
Xcode iOS device support symbols|$HOME/Library/Developer/Xcode/iOS DeviceSupport/*/Symbols/System/Library/Caches/*|ide_cache
|
||||
Maven local repository (Java dependencies)|$HOME/.m2/repository/*|ide_cache
|
||||
@@ -143,6 +144,7 @@ Podman container cache|$HOME/.local/share/containers/cache/*|container_cache
|
||||
Font cache|$HOME/Library/Caches/com.apple.FontRegistry/*|system_cache
|
||||
Spotlight metadata cache|$HOME/Library/Caches/com.apple.spotlight/*|system_cache
|
||||
CloudKit cache|$HOME/Library/Caches/CloudKit/*|system_cache
|
||||
Trash|$HOME/.Trash|system_cache
|
||||
EOF
|
||||
# Add FINDER_METADATA with constant reference
|
||||
echo "Finder metadata (.DS_Store)|$FINDER_METADATA_SENTINEL|system_cache"
|
||||
@@ -154,8 +156,8 @@ get_optimize_whitelist_items() {
|
||||
cat << 'EOF'
|
||||
macOS Firewall check|firewall|security_check
|
||||
Gatekeeper check|gatekeeper|security_check
|
||||
Homebrew updates check|check_brew_updates|update_check
|
||||
macOS system updates check|check_macos_updates|update_check
|
||||
Mole updates check|check_mole_update|update_check
|
||||
Homebrew health check (doctor)|check_brew_health|health_check
|
||||
SIP status check|check_sip|security_check
|
||||
FileVault status check|check_filevault|security_check
|
||||
@@ -163,7 +165,6 @@ TouchID sudo check|check_touchid|config_check
|
||||
Rosetta 2 check|check_rosetta|config_check
|
||||
Git configuration check|check_git_config|config_check
|
||||
Login items check|check_login_items|config_check
|
||||
Spotlight cache cleanup|spotlight_cache|system_optimization
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -280,12 +281,16 @@ manage_whitelist_categories() {
|
||||
|
||||
if [[ "$mode" == "optimize" ]]; then
|
||||
items_source=$(get_optimize_whitelist_items)
|
||||
menu_title="Whitelist Manager – Select system checks to ignore"
|
||||
active_config_file="$WHITELIST_CONFIG_OPTIMIZE"
|
||||
local display_config="${active_config_file/#$HOME/~}"
|
||||
menu_title="Whitelist Manager – Select system checks to ignore
|
||||
${GRAY}Edit: ${display_config}${NC}"
|
||||
else
|
||||
items_source=$(get_all_cache_items)
|
||||
menu_title="Whitelist Manager – Select caches to protect"
|
||||
active_config_file="$WHITELIST_CONFIG_CLEAN"
|
||||
local display_config="${active_config_file/#$HOME/~}"
|
||||
menu_title="Whitelist Manager – Select caches to protect
|
||||
${GRAY}Edit: ${display_config}${NC}"
|
||||
fi
|
||||
|
||||
while IFS='|' read -r display_name pattern _; do
|
||||
@@ -366,9 +371,8 @@ manage_whitelist_categories() {
|
||||
unset MOLE_PRESELECTED_INDICES
|
||||
local exit_code=$?
|
||||
|
||||
# Normal exit or cancel
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
echo ""
|
||||
echo -e "${YELLOW}Cancelled${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -413,7 +417,8 @@ manage_whitelist_categories() {
|
||||
else
|
||||
summary_lines+=("Protected ${total_protected} cache(s)")
|
||||
fi
|
||||
summary_lines+=("Saved to ${active_config_file}")
|
||||
local display_config="${active_config_file/#$HOME/~}"
|
||||
summary_lines+=("Config: ${GRAY}${display_config}${NC}")
|
||||
|
||||
print_summary_block "${summary_lines[@]}"
|
||||
printf '\n'
|
||||
|
||||
@@ -1,28 +1,19 @@
|
||||
#!/bin/bash
|
||||
# System Configuration Maintenance Module
|
||||
# Fix broken preferences and broken login items
|
||||
# System Configuration Maintenance Module.
|
||||
# Fix broken preferences and login items.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ============================================================================
|
||||
# Broken Preferences Detection and Cleanup
|
||||
# Find and remove corrupted .plist files
|
||||
# ============================================================================
|
||||
|
||||
# Clean broken preference files
|
||||
# Uses plutil -lint to validate plist files
|
||||
# Returns: count of broken files fixed
|
||||
# Remove corrupted preference files.
|
||||
fix_broken_preferences() {
|
||||
local prefs_dir="$HOME/Library/Preferences"
|
||||
[[ -d "$prefs_dir" ]] || return 0
|
||||
|
||||
local broken_count=0
|
||||
|
||||
# Check main preferences directory
|
||||
while IFS= read -r plist_file; do
|
||||
[[ -f "$plist_file" ]] || continue
|
||||
|
||||
# Skip system preferences
|
||||
local filename
|
||||
filename=$(basename "$plist_file")
|
||||
case "$filename" in
|
||||
@@ -31,15 +22,13 @@ fix_broken_preferences() {
|
||||
;;
|
||||
esac
|
||||
|
||||
# Validate plist using plutil
|
||||
plutil -lint "$plist_file" > /dev/null 2>&1 && continue
|
||||
|
||||
# Remove broken plist
|
||||
rm -f "$plist_file" 2> /dev/null || true
|
||||
safe_remove "$plist_file" true > /dev/null 2>&1 || true
|
||||
((broken_count++))
|
||||
done < <(command find "$prefs_dir" -maxdepth 1 -name "*.plist" -type f 2> /dev/null || true)
|
||||
|
||||
# Check ByHost preferences with timeout protection
|
||||
# Check ByHost preferences.
|
||||
local byhost_dir="$prefs_dir/ByHost"
|
||||
if [[ -d "$byhost_dir" ]]; then
|
||||
while IFS= read -r plist_file; do
|
||||
@@ -55,63 +44,10 @@ fix_broken_preferences() {
|
||||
|
||||
plutil -lint "$plist_file" > /dev/null 2>&1 && continue
|
||||
|
||||
rm -f "$plist_file" 2> /dev/null || true
|
||||
safe_remove "$plist_file" true > /dev/null 2>&1 || true
|
||||
((broken_count++))
|
||||
done < <(command find "$byhost_dir" -name "*.plist" -type f 2> /dev/null || true)
|
||||
fi
|
||||
|
||||
echo "$broken_count"
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Broken Login Items Cleanup
|
||||
# Find and remove login items pointing to non-existent files
|
||||
# ============================================================================
|
||||
|
||||
# Clean broken login items (LaunchAgents pointing to missing executables)
|
||||
# Returns: count of broken items fixed
|
||||
fix_broken_login_items() {
|
||||
local launch_agents_dir="$HOME/Library/LaunchAgents"
|
||||
[[ -d "$launch_agents_dir" ]] || return 0
|
||||
|
||||
# Check whitelist
|
||||
if command -v is_whitelisted > /dev/null && is_whitelisted "check_login_items"; then return 0; fi
|
||||
|
||||
local broken_count=0
|
||||
|
||||
while IFS= read -r plist_file; do
|
||||
[[ -f "$plist_file" ]] || continue
|
||||
|
||||
# Skip system items
|
||||
local filename
|
||||
filename=$(basename "$plist_file")
|
||||
case "$filename" in
|
||||
com.apple.*)
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
|
||||
# Extract Program or ProgramArguments[0] from plist using plutil
|
||||
local program=""
|
||||
program=$(plutil -extract Program raw "$plist_file" 2> /dev/null || echo "")
|
||||
|
||||
if [[ -z "$program" ]]; then
|
||||
# Try ProgramArguments array (first element)
|
||||
program=$(plutil -extract ProgramArguments.0 raw "$plist_file" 2> /dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Expand tilde in path if present
|
||||
program="${program/#\~/$HOME}"
|
||||
|
||||
# Skip if no program found or program exists
|
||||
[[ -z "$program" ]] && continue
|
||||
[[ -e "$program" ]] && continue
|
||||
|
||||
# Program doesn't exist - this is a broken login item
|
||||
launchctl unload "$plist_file" 2> /dev/null || true
|
||||
rm -f "$plist_file" 2> /dev/null || true
|
||||
((broken_count++))
|
||||
done < <(command find "$launch_agents_dir" -name "*.plist" -type f 2> /dev/null || true)
|
||||
|
||||
echo "$broken_count"
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,14 +18,33 @@ format_app_display() {
|
||||
[[ "$size" != "0" && "$size" != "" && "$size" != "Unknown" ]] && size_str="$size"
|
||||
|
||||
# Calculate available width for app name based on terminal width
|
||||
# use passed width or calculate it (but calculation is slow in loops)
|
||||
# Accept pre-calculated max_name_width (5th param) to avoid recalculation in loops
|
||||
local terminal_width="${4:-$(tput cols 2> /dev/null || echo 80)}"
|
||||
local fixed_width=28
|
||||
local available_width=$((terminal_width - fixed_width))
|
||||
local max_name_width="${5:-}"
|
||||
local available_width
|
||||
|
||||
# Set reasonable bounds for name width: 24-35 display width
|
||||
[[ $available_width -lt 24 ]] && available_width=24
|
||||
[[ $available_width -gt 35 ]] && available_width=35
|
||||
if [[ -n "$max_name_width" ]]; then
|
||||
# Use pre-calculated width from caller
|
||||
available_width=$max_name_width
|
||||
else
|
||||
# Fallback: calculate it (slower, but works for standalone calls)
|
||||
# Fixed elements: " ○ " (4) + " " (1) + size (9) + " | " (3) + max_last (7) = 24
|
||||
local fixed_width=24
|
||||
available_width=$((terminal_width - fixed_width))
|
||||
|
||||
# Dynamic minimum for better spacing on wide terminals
|
||||
local min_width=18
|
||||
if [[ $terminal_width -ge 120 ]]; then
|
||||
min_width=48
|
||||
elif [[ $terminal_width -ge 100 ]]; then
|
||||
min_width=38
|
||||
elif [[ $terminal_width -ge 80 ]]; then
|
||||
min_width=25
|
||||
fi
|
||||
|
||||
[[ $available_width -lt $min_width ]] && available_width=$min_width
|
||||
[[ $available_width -gt 60 ]] && available_width=60
|
||||
fi
|
||||
|
||||
# Truncate long names if needed (based on display width, not char count)
|
||||
local truncated_name
|
||||
@@ -66,6 +85,31 @@ select_apps_for_uninstall() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Pre-scan to get actual max name width
|
||||
local max_name_width=0
|
||||
for app_data in "${apps_data[@]}"; do
|
||||
IFS='|' read -r _ _ display_name _ _ _ _ <<< "$app_data"
|
||||
local name_width=$(get_display_width "$display_name")
|
||||
[[ $name_width -gt $max_name_width ]] && max_name_width=$name_width
|
||||
done
|
||||
# Constrain based on terminal width: fixed=24, min varies by terminal width, max=60
|
||||
local fixed_width=24
|
||||
local available=$((terminal_width - fixed_width))
|
||||
|
||||
# Dynamic minimum: wider terminals get larger minimum for better spacing
|
||||
local min_width=18
|
||||
if [[ $terminal_width -ge 120 ]]; then
|
||||
min_width=48 # Wide terminals: very generous spacing
|
||||
elif [[ $terminal_width -ge 100 ]]; then
|
||||
min_width=38 # Medium-wide terminals: generous spacing
|
||||
elif [[ $terminal_width -ge 80 ]]; then
|
||||
min_width=25 # Standard terminals
|
||||
fi
|
||||
|
||||
[[ $max_name_width -lt $min_width ]] && max_name_width=$min_width
|
||||
[[ $available -lt $max_name_width ]] && max_name_width=$available
|
||||
[[ $max_name_width -gt 60 ]] && max_name_width=60
|
||||
|
||||
local -a menu_options=()
|
||||
# Prepare metadata (comma-separated) for sorting/filtering inside the menu
|
||||
local epochs_csv=""
|
||||
@@ -74,7 +118,7 @@ select_apps_for_uninstall() {
|
||||
for app_data in "${apps_data[@]}"; do
|
||||
# Keep extended field 7 (size_kb) if present
|
||||
IFS='|' read -r epoch _ display_name _ size last_used size_kb <<< "$app_data"
|
||||
menu_options+=("$(format_app_display "$display_name" "$size" "$last_used" "$terminal_width")")
|
||||
menu_options+=("$(format_app_display "$display_name" "$size" "$last_used" "$terminal_width" "$max_name_width")")
|
||||
# Build csv lists (avoid trailing commas)
|
||||
if [[ $idx -eq 0 ]]; then
|
||||
epochs_csv="${epoch:-0}"
|
||||
@@ -118,7 +162,6 @@ select_apps_for_uninstall() {
|
||||
fi
|
||||
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
echo "Cancelled"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -40,7 +40,9 @@ _pm_get_terminal_height() {
|
||||
# Calculate dynamic items per page based on terminal height
|
||||
_pm_calculate_items_per_page() {
|
||||
local term_height=$(_pm_get_terminal_height)
|
||||
local reserved=6 # header(2) + footer(3) + spacing(1)
|
||||
# Reserved: header(1) + blank(1) + blank(1) + footer(1-2) = 4-5 rows
|
||||
# Use 5 to be safe (leaves 1 row buffer when footer wraps to 2 lines)
|
||||
local reserved=5
|
||||
local available=$((term_height - reserved))
|
||||
|
||||
# Ensure minimum and maximum bounds
|
||||
@@ -153,6 +155,7 @@ paginated_multi_select() {
|
||||
}
|
||||
|
||||
local -a selected=()
|
||||
local selected_count=0 # Cache selection count to avoid O(n) loops on every draw
|
||||
|
||||
# Initialize selection array
|
||||
for ((i = 0; i < total_items; i++)); do
|
||||
@@ -165,7 +168,11 @@ paginated_multi_select() {
|
||||
IFS=',' read -ra initial_indices <<< "$cleaned_preselect"
|
||||
for idx in "${initial_indices[@]}"; do
|
||||
if [[ "$idx" =~ ^[0-9]+$ && $idx -ge 0 && $idx -lt $total_items ]]; then
|
||||
selected[idx]=true
|
||||
# Only count if not already selected (handles duplicates)
|
||||
if [[ ${selected[idx]} != true ]]; then
|
||||
selected[idx]=true
|
||||
((selected_count++))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
@@ -228,11 +235,13 @@ paginated_multi_select() {
|
||||
|
||||
local cols="${COLUMNS:-}"
|
||||
[[ -z "$cols" ]] && cols=$(tput cols 2> /dev/null || echo 80)
|
||||
[[ "$cols" =~ ^[0-9]+$ ]] || cols=80
|
||||
|
||||
_strip_ansi_len() {
|
||||
local text="$1"
|
||||
local stripped
|
||||
stripped=$(printf "%s" "$text" | LC_ALL=C awk '{gsub(/\033\[[0-9;]*[A-Za-z]/,""); print}')
|
||||
stripped=$(printf "%s" "$text" | LC_ALL=C awk '{gsub(/\033\[[0-9;]*[A-Za-z]/,""); print}' || true)
|
||||
[[ -z "$stripped" ]] && stripped="$text"
|
||||
printf "%d" "${#stripped}"
|
||||
}
|
||||
|
||||
@@ -244,7 +253,10 @@ paginated_multi_select() {
|
||||
else
|
||||
candidate="$line${sep}${s}"
|
||||
fi
|
||||
if (($(_strip_ansi_len "$candidate") > cols)); then
|
||||
local candidate_len
|
||||
candidate_len=$(_strip_ansi_len "$candidate")
|
||||
[[ -z "$candidate_len" ]] && candidate_len=0
|
||||
if ((candidate_len > cols)); then
|
||||
printf "%s%s\n" "$clear_line" "$line" >&2
|
||||
line="$s"
|
||||
else
|
||||
@@ -382,11 +394,8 @@ paginated_multi_select() {
|
||||
printf "\033[H" >&2
|
||||
local clear_line="\r\033[2K"
|
||||
|
||||
# Count selections
|
||||
local selected_count=0
|
||||
for ((i = 0; i < total_items; i++)); do
|
||||
[[ ${selected[i]} == true ]] && ((selected_count++))
|
||||
done
|
||||
# Use cached selection count (maintained incrementally on toggle)
|
||||
# No need to loop through all items anymore!
|
||||
|
||||
# Header only
|
||||
printf "${clear_line}${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2
|
||||
@@ -475,6 +484,22 @@ paginated_multi_select() {
|
||||
|
||||
# Footer: single line with controls
|
||||
local sep=" ${GRAY}|${NC} "
|
||||
|
||||
# Helper to calculate display length without ANSI codes
|
||||
_calc_len() {
|
||||
local text="$1"
|
||||
local stripped
|
||||
stripped=$(printf "%s" "$text" | LC_ALL=C awk '{gsub(/\033\[[0-9;]*[A-Za-z]/,""); print}')
|
||||
printf "%d" "${#stripped}"
|
||||
}
|
||||
|
||||
# Common menu items
|
||||
local nav="${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN}${NC}"
|
||||
local space_select="${GRAY}Space Select${NC}"
|
||||
local space="${GRAY}Space${NC}"
|
||||
local enter="${GRAY}Enter${NC}"
|
||||
local exit="${GRAY}Q Exit${NC}"
|
||||
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
# Filter mode: simple controls without sort
|
||||
local -a _segs_filter=(
|
||||
@@ -485,57 +510,82 @@ paginated_multi_select() {
|
||||
)
|
||||
_print_wrapped_controls "$sep" "${_segs_filter[@]}"
|
||||
else
|
||||
# Normal mode - single line compact format
|
||||
# Normal mode - prepare dynamic items
|
||||
local reverse_arrow="↑"
|
||||
[[ "$sort_reverse" == "true" ]] && reverse_arrow="↓"
|
||||
|
||||
# Determine filter text based on whether filter is active
|
||||
local filter_text="/ Search"
|
||||
[[ -n "$applied_query" ]] && filter_text="/ Clear"
|
||||
|
||||
local refresh="${GRAY}R Refresh${NC}"
|
||||
local search="${GRAY}${filter_text}${NC}"
|
||||
local sort_ctrl="${GRAY}S ${sort_status}${NC}"
|
||||
local order_ctrl="${GRAY}O ${reverse_arrow}${NC}"
|
||||
|
||||
if [[ "$has_metadata" == "true" ]]; then
|
||||
if [[ -n "$applied_query" ]]; then
|
||||
# Filtering: hide sort controls
|
||||
local -a _segs_all=(
|
||||
"${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN}${NC}"
|
||||
"${GRAY}Space${NC}"
|
||||
"${GRAY}Enter${NC}"
|
||||
"${GRAY}${filter_text}${NC}"
|
||||
"${GRAY}Q Exit${NC}"
|
||||
)
|
||||
# Filtering active: hide sort controls
|
||||
local -a _segs_all=("$nav" "$space" "$enter" "$refresh" "$search" "$exit")
|
||||
_print_wrapped_controls "$sep" "${_segs_all[@]}"
|
||||
else
|
||||
# Normal: show full controls
|
||||
local -a _segs_all=(
|
||||
"${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN}${NC}"
|
||||
"${GRAY}Space Select${NC}"
|
||||
"${GRAY}Enter${NC}"
|
||||
"${GRAY}R Refresh${NC}"
|
||||
"${GRAY}${filter_text}${NC}"
|
||||
"${GRAY}S ${sort_status}${NC}"
|
||||
"${GRAY}O ${reverse_arrow}${NC}"
|
||||
"${GRAY}Q Exit${NC}"
|
||||
)
|
||||
_print_wrapped_controls "$sep" "${_segs_all[@]}"
|
||||
# Normal: show full controls with dynamic reduction
|
||||
local term_width="${COLUMNS:-}"
|
||||
[[ -z "$term_width" ]] && term_width=$(tput cols 2> /dev/null || echo 80)
|
||||
[[ "$term_width" =~ ^[0-9]+$ ]] || term_width=80
|
||||
|
||||
# Level 0: Full controls
|
||||
local -a _segs=("$nav" "$space_select" "$enter" "$refresh" "$search" "$sort_ctrl" "$order_ctrl" "$exit")
|
||||
|
||||
# Calculate width
|
||||
local total_len=0 seg_count=${#_segs[@]}
|
||||
for i in "${!_segs[@]}"; do
|
||||
total_len=$((total_len + $(_calc_len "${_segs[i]}")))
|
||||
[[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3))
|
||||
done
|
||||
|
||||
# Level 1: Remove "Space Select"
|
||||
if [[ $total_len -gt $term_width ]]; then
|
||||
_segs=("$nav" "$enter" "$refresh" "$search" "$sort_ctrl" "$order_ctrl" "$exit")
|
||||
|
||||
total_len=0
|
||||
seg_count=${#_segs[@]}
|
||||
for i in "${!_segs[@]}"; do
|
||||
total_len=$((total_len + $(_calc_len "${_segs[i]}")))
|
||||
[[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3))
|
||||
done
|
||||
|
||||
# Level 2: Remove "S ${sort_status}"
|
||||
if [[ $total_len -gt $term_width ]]; then
|
||||
_segs=("$nav" "$enter" "$refresh" "$search" "$order_ctrl" "$exit")
|
||||
fi
|
||||
fi
|
||||
|
||||
_print_wrapped_controls "$sep" "${_segs[@]}"
|
||||
fi
|
||||
else
|
||||
# Without metadata: basic controls
|
||||
local -a _segs_simple=(
|
||||
"${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN}${NC}"
|
||||
"${GRAY}Space Select${NC}"
|
||||
"${GRAY}Enter${NC}"
|
||||
"${GRAY}${filter_text}${NC}"
|
||||
"${GRAY}Q Exit${NC}"
|
||||
)
|
||||
local -a _segs_simple=("$nav" "$space_select" "$enter" "$refresh" "$search" "$exit")
|
||||
_print_wrapped_controls "$sep" "${_segs_simple[@]}"
|
||||
fi
|
||||
fi
|
||||
printf "${clear_line}" >&2
|
||||
}
|
||||
|
||||
# Track previous cursor position for incremental rendering
|
||||
local prev_cursor_pos=$cursor_pos
|
||||
local prev_top_index=$top_index
|
||||
local need_full_redraw=true
|
||||
|
||||
# Main interaction loop
|
||||
while true; do
|
||||
draw_menu
|
||||
if [[ "$need_full_redraw" == "true" ]]; then
|
||||
draw_menu
|
||||
need_full_redraw=false
|
||||
# Update tracking variables after full redraw
|
||||
prev_cursor_pos=$cursor_pos
|
||||
prev_top_index=$top_index
|
||||
fi
|
||||
|
||||
local key
|
||||
key=$(read_key)
|
||||
|
||||
@@ -549,6 +599,7 @@ paginated_multi_select() {
|
||||
top_index=0
|
||||
cursor_pos=0
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
cleanup
|
||||
@@ -558,9 +609,34 @@ paginated_multi_select() {
|
||||
if [[ ${#view_indices[@]} -eq 0 ]]; then
|
||||
:
|
||||
elif [[ $cursor_pos -gt 0 ]]; then
|
||||
# Simple cursor move - only redraw affected rows
|
||||
local old_cursor=$cursor_pos
|
||||
((cursor_pos--))
|
||||
local new_cursor=$cursor_pos
|
||||
|
||||
# Calculate terminal row positions (+3: row 1=header, row 2=blank, row 3=first item)
|
||||
local old_row=$((old_cursor + 3))
|
||||
local new_row=$((new_cursor + 3))
|
||||
|
||||
# Quick redraw: update only the two affected rows
|
||||
printf "\033[%d;1H" "$old_row" >&2
|
||||
render_item "$old_cursor" false
|
||||
printf "\033[%d;1H" "$new_row" >&2
|
||||
render_item "$new_cursor" true
|
||||
|
||||
# CRITICAL: Move cursor to footer to avoid visual artifacts
|
||||
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
|
||||
|
||||
prev_cursor_pos=$cursor_pos
|
||||
|
||||
# Drain pending input for smoother fast scrolling
|
||||
drain_pending_input
|
||||
continue # Skip full redraw
|
||||
elif [[ $top_index -gt 0 ]]; then
|
||||
((top_index--))
|
||||
prev_cursor_pos=$cursor_pos
|
||||
prev_top_index=$top_index
|
||||
need_full_redraw=true # Scrolling requires full redraw
|
||||
fi
|
||||
;;
|
||||
"DOWN")
|
||||
@@ -574,7 +650,29 @@ paginated_multi_select() {
|
||||
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
|
||||
|
||||
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
|
||||
# Simple cursor move - only redraw affected rows
|
||||
local old_cursor=$cursor_pos
|
||||
((cursor_pos++))
|
||||
local new_cursor=$cursor_pos
|
||||
|
||||
# Calculate terminal row positions (+3: row 1=header, row 2=blank, row 3=first item)
|
||||
local old_row=$((old_cursor + 3))
|
||||
local new_row=$((new_cursor + 3))
|
||||
|
||||
# Quick redraw: update only the two affected rows
|
||||
printf "\033[%d;1H" "$old_row" >&2
|
||||
render_item "$old_cursor" false
|
||||
printf "\033[%d;1H" "$new_row" >&2
|
||||
render_item "$new_cursor" true
|
||||
|
||||
# CRITICAL: Move cursor to footer to avoid visual artifacts
|
||||
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
|
||||
|
||||
prev_cursor_pos=$cursor_pos
|
||||
|
||||
# Drain pending input for smoother fast scrolling
|
||||
drain_pending_input
|
||||
continue # Skip full redraw
|
||||
elif [[ $((top_index + visible_count)) -lt ${#view_indices[@]} ]]; then
|
||||
((top_index++))
|
||||
visible_count=$((${#view_indices[@]} - top_index))
|
||||
@@ -582,6 +680,9 @@ paginated_multi_select() {
|
||||
if [[ $cursor_pos -ge $visible_count ]]; then
|
||||
cursor_pos=$((visible_count - 1))
|
||||
fi
|
||||
prev_cursor_pos=$cursor_pos
|
||||
prev_top_index=$top_index
|
||||
need_full_redraw=true # Scrolling requires full redraw
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -592,9 +693,25 @@ paginated_multi_select() {
|
||||
local real="${view_indices[idx]}"
|
||||
if [[ ${selected[real]} == true ]]; then
|
||||
selected[real]=false
|
||||
((selected_count--))
|
||||
else
|
||||
selected[real]=true
|
||||
((selected_count++))
|
||||
fi
|
||||
|
||||
# Incremental update: only redraw header (for count) and current row
|
||||
# Header is at row 1
|
||||
printf "\033[1;1H\033[2K${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2
|
||||
|
||||
# Redraw current item row (+3: row 1=header, row 2=blank, row 3=first item)
|
||||
local item_row=$((cursor_pos + 3))
|
||||
printf "\033[%d;1H" "$item_row" >&2
|
||||
render_item "$cursor_pos" true
|
||||
|
||||
# Move cursor to footer to avoid visual artifacts (items + header + 2 blanks)
|
||||
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
|
||||
|
||||
continue # Skip full redraw
|
||||
fi
|
||||
;;
|
||||
"RETRY")
|
||||
@@ -606,12 +723,14 @@ paginated_multi_select() {
|
||||
sort_reverse="true"
|
||||
fi
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
fi
|
||||
;;
|
||||
"CHAR:s" | "CHAR:S")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
local ch="${key#CHAR:}"
|
||||
filter_query+="$ch"
|
||||
need_full_redraw=true
|
||||
elif [[ "$has_metadata" == "true" ]]; then
|
||||
# Cycle sort mode (only if metadata available)
|
||||
case "$sort_mode" in
|
||||
@@ -620,6 +739,7 @@ paginated_multi_select() {
|
||||
size) sort_mode="date" ;;
|
||||
esac
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
fi
|
||||
;;
|
||||
"FILTER")
|
||||
@@ -631,6 +751,7 @@ paginated_multi_select() {
|
||||
top_index=0
|
||||
cursor_pos=0
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
else
|
||||
# Enter filter mode
|
||||
filter_mode="true"
|
||||
@@ -639,6 +760,7 @@ paginated_multi_select() {
|
||||
top_index=0
|
||||
cursor_pos=0
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
fi
|
||||
;;
|
||||
"CHAR:j")
|
||||
@@ -701,12 +823,14 @@ paginated_multi_select() {
|
||||
sort_reverse="true"
|
||||
fi
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
fi
|
||||
;;
|
||||
"DELETE")
|
||||
# Backspace filter
|
||||
if [[ "$filter_mode" == "true" && -n "$filter_query" ]]; then
|
||||
filter_query="${filter_query%?}"
|
||||
need_full_redraw=true
|
||||
fi
|
||||
;;
|
||||
CHAR:*)
|
||||
@@ -715,6 +839,7 @@ paginated_multi_select() {
|
||||
# avoid accidental leading spaces
|
||||
if [[ -n "$filter_query" || "$ch" != " " ]]; then
|
||||
filter_query+="$ch"
|
||||
need_full_redraw=true
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
@@ -750,6 +875,7 @@ paginated_multi_select() {
|
||||
if [[ $idx -lt ${#view_indices[@]} ]]; then
|
||||
local real="${view_indices[idx]}"
|
||||
selected[real]=true
|
||||
((selected_count++))
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -2,73 +2,93 @@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Ensure common.sh is loaded
|
||||
# Ensure common.sh is loaded.
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
[[ -z "${MOLE_COMMON_LOADED:-}" ]] && source "$SCRIPT_DIR/lib/core/common.sh"
|
||||
|
||||
# Batch uninstall functionality with minimal confirmations
|
||||
# Replaces the overly verbose individual confirmation approach
|
||||
# Batch uninstall with a single confirmation.
|
||||
|
||||
# Decode and validate base64 encoded file list
|
||||
# Returns decoded string if valid, empty string otherwise
|
||||
# User data detection patterns (prompt user to backup if found).
|
||||
readonly SENSITIVE_DATA_PATTERNS=(
|
||||
"\.warp" # Warp terminal configs/themes
|
||||
"/\.config/" # Standard Unix config directory
|
||||
"/themes/" # Theme customizations
|
||||
"/settings/" # Settings directories
|
||||
"/Application Support/[^/]+/User Data" # Chrome/Electron user data
|
||||
"/Preferences/[^/]+\.plist" # User preference files
|
||||
"/Documents/" # User documents
|
||||
"/\.ssh/" # SSH keys and configs (critical)
|
||||
"/\.gnupg/" # GPG keys (critical)
|
||||
)
|
||||
|
||||
# Join patterns into a single regex for grep.
|
||||
SENSITIVE_DATA_REGEX=$(
|
||||
IFS='|'
|
||||
echo "${SENSITIVE_DATA_PATTERNS[*]}"
|
||||
)
|
||||
|
||||
# Decode and validate base64 file list (safe for set -e).
|
||||
decode_file_list() {
|
||||
local encoded="$1"
|
||||
local app_name="$2"
|
||||
local decoded
|
||||
|
||||
# Decode base64 data
|
||||
if ! decoded=$(printf '%s' "$encoded" | base64 -d 2> /dev/null); then
|
||||
log_error "Failed to decode file list for $app_name"
|
||||
echo ""
|
||||
return 1
|
||||
# macOS uses -D, GNU uses -d. Always return 0 for set -e safety.
|
||||
if ! decoded=$(printf '%s' "$encoded" | base64 -D 2> /dev/null); then
|
||||
if ! decoded=$(printf '%s' "$encoded" | base64 -d 2> /dev/null); then
|
||||
log_error "Failed to decode file list for $app_name" >&2
|
||||
echo ""
|
||||
return 0 # Return success with empty string
|
||||
fi
|
||||
fi
|
||||
|
||||
# Validate decoded data doesn't contain null bytes
|
||||
if [[ "$decoded" =~ $'\0' ]]; then
|
||||
log_warning "File list for $app_name contains null bytes, rejecting"
|
||||
log_warning "File list for $app_name contains null bytes, rejecting" >&2
|
||||
echo ""
|
||||
return 1
|
||||
return 0 # Return success with empty string
|
||||
fi
|
||||
|
||||
# Validate paths look reasonable (each line should be a path or empty)
|
||||
while IFS= read -r line; do
|
||||
if [[ -n "$line" && ! "$line" =~ ^/ ]]; then
|
||||
log_warning "Invalid path in file list for $app_name: $line"
|
||||
log_warning "Invalid path in file list for $app_name: $line" >&2
|
||||
echo ""
|
||||
return 1
|
||||
return 0 # Return success with empty string
|
||||
fi
|
||||
done <<< "$decoded"
|
||||
|
||||
echo "$decoded"
|
||||
return 0
|
||||
}
|
||||
# Note: find_app_files() and calculate_total_size() functions now in lib/core/common.sh
|
||||
# Note: find_app_files() and calculate_total_size() are in lib/core/common.sh.
|
||||
|
||||
# Stop Launch Agents and Daemons for an app
|
||||
# Args: $1 = bundle_id, $2 = has_system_files (true/false)
|
||||
# Stop Launch Agents/Daemons for an app.
|
||||
stop_launch_services() {
|
||||
local bundle_id="$1"
|
||||
local has_system_files="${2:-false}"
|
||||
|
||||
# User-level Launch Agents
|
||||
for plist in ~/Library/LaunchAgents/"$bundle_id"*.plist; do
|
||||
[[ -f "$plist" ]] && launchctl unload "$plist" 2> /dev/null || true
|
||||
done
|
||||
[[ -z "$bundle_id" || "$bundle_id" == "unknown" ]] && return 0
|
||||
|
||||
if [[ -d ~/Library/LaunchAgents ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
launchctl unload "$plist" 2> /dev/null || true
|
||||
done < <(find ~/Library/LaunchAgents -maxdepth 1 -name "${bundle_id}*.plist" -print0 2> /dev/null)
|
||||
fi
|
||||
|
||||
# System-level services (requires sudo)
|
||||
if [[ "$has_system_files" == "true" ]]; then
|
||||
for plist in /Library/LaunchAgents/"$bundle_id"*.plist; do
|
||||
[[ -f "$plist" ]] && sudo launchctl unload "$plist" 2> /dev/null || true
|
||||
done
|
||||
for plist in /Library/LaunchDaemons/"$bundle_id"*.plist; do
|
||||
[[ -f "$plist" ]] && sudo launchctl unload "$plist" 2> /dev/null || true
|
||||
done
|
||||
if [[ -d /Library/LaunchAgents ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
sudo launchctl unload "$plist" 2> /dev/null || true
|
||||
done < <(find /Library/LaunchAgents -maxdepth 1 -name "${bundle_id}*.plist" -print0 2> /dev/null)
|
||||
fi
|
||||
if [[ -d /Library/LaunchDaemons ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
sudo launchctl unload "$plist" 2> /dev/null || true
|
||||
done < <(find /Library/LaunchDaemons -maxdepth 1 -name "${bundle_id}*.plist" -print0 2> /dev/null)
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Remove a list of files (handles both regular files and symlinks)
|
||||
# Args: $1 = file_list (newline-separated), $2 = use_sudo (true/false)
|
||||
# Returns: number of files removed
|
||||
# Remove files (handles symlinks, optional sudo).
|
||||
remove_file_list() {
|
||||
local file_list="$1"
|
||||
local use_sudo="${2:-false}"
|
||||
@@ -78,14 +98,12 @@ remove_file_list() {
|
||||
[[ -n "$file" && -e "$file" ]] || continue
|
||||
|
||||
if [[ -L "$file" ]]; then
|
||||
# Symlink: use direct rm
|
||||
if [[ "$use_sudo" == "true" ]]; then
|
||||
sudo rm "$file" 2> /dev/null && ((count++)) || true
|
||||
else
|
||||
rm "$file" 2> /dev/null && ((count++)) || true
|
||||
fi
|
||||
else
|
||||
# Regular file/directory: use safe_remove
|
||||
if [[ "$use_sudo" == "true" ]]; then
|
||||
safe_sudo_remove "$file" && ((count++)) || true
|
||||
else
|
||||
@@ -97,8 +115,7 @@ remove_file_list() {
|
||||
echo "$count"
|
||||
}
|
||||
|
||||
# Batch uninstall with single confirmation
|
||||
# Globals: selected_apps (read) - array of selected applications
|
||||
# Batch uninstall with single confirmation.
|
||||
batch_uninstall_applications() {
|
||||
local total_size_freed=0
|
||||
|
||||
@@ -108,19 +125,18 @@ batch_uninstall_applications() {
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Pre-process: Check for running apps and calculate total impact
|
||||
# Pre-scan: running apps, sudo needs, size.
|
||||
local -a running_apps=()
|
||||
local -a sudo_apps=()
|
||||
local total_estimated_size=0
|
||||
local -a app_details=()
|
||||
|
||||
# Analyze selected apps with progress indicator
|
||||
if [[ -t 1 ]]; then start_inline_spinner "Scanning files..."; fi
|
||||
for selected_app in "${selected_apps[@]}"; do
|
||||
[[ -z "$selected_app" ]] && continue
|
||||
IFS='|' read -r _ app_path app_name bundle_id _ _ <<< "$selected_app"
|
||||
|
||||
# Check if app is running using executable name from bundle
|
||||
# Check running app by bundle executable if available.
|
||||
local exec_name=""
|
||||
if [[ -e "$app_path/Contents/Info.plist" ]]; then
|
||||
exec_name=$(defaults read "$app_path/Contents/Info.plist" CFBundleExecutable 2> /dev/null || echo "")
|
||||
@@ -130,17 +146,21 @@ batch_uninstall_applications() {
|
||||
running_apps+=("$app_name")
|
||||
fi
|
||||
|
||||
# Check if app requires sudo to delete (either app bundle or system files)
|
||||
# Sudo needed if bundle owner/dir is not writable or system files exist.
|
||||
local needs_sudo=false
|
||||
if [[ ! -w "$(dirname "$app_path")" ]] || [[ "$(get_file_owner "$app_path")" == "root" ]]; then
|
||||
local app_owner=$(get_file_owner "$app_path")
|
||||
local current_user=$(whoami)
|
||||
if [[ ! -w "$(dirname "$app_path")" ]] ||
|
||||
[[ "$app_owner" == "root" ]] ||
|
||||
[[ -n "$app_owner" && "$app_owner" != "$current_user" ]]; then
|
||||
needs_sudo=true
|
||||
fi
|
||||
|
||||
# Calculate size for summary (including system files)
|
||||
# Size estimate includes related and system files.
|
||||
local app_size_kb=$(get_path_size_kb "$app_path")
|
||||
local related_files=$(find_app_files "$bundle_id" "$app_name")
|
||||
local related_size_kb=$(calculate_total_size "$related_files")
|
||||
# system_files is a newline-separated string, not an array
|
||||
# system_files is a newline-separated string, not an array.
|
||||
# shellcheck disable=SC2178,SC2128
|
||||
local system_files=$(find_app_system_files "$bundle_id" "$app_name")
|
||||
# shellcheck disable=SC2128
|
||||
@@ -148,7 +168,6 @@ batch_uninstall_applications() {
|
||||
local total_kb=$((app_size_kb + related_size_kb + system_size_kb))
|
||||
((total_estimated_size += total_kb))
|
||||
|
||||
# Check if system files require sudo
|
||||
# shellcheck disable=SC2128
|
||||
if [[ -n "$system_files" ]]; then
|
||||
needs_sudo=true
|
||||
@@ -158,25 +177,44 @@ batch_uninstall_applications() {
|
||||
sudo_apps+=("$app_name")
|
||||
fi
|
||||
|
||||
# Store details for later use
|
||||
# Base64 encode file lists to handle multi-line data safely (single line)
|
||||
# Check for sensitive user data once.
|
||||
local has_sensitive_data="false"
|
||||
if [[ -n "$related_files" ]] && echo "$related_files" | grep -qE "$SENSITIVE_DATA_REGEX"; then
|
||||
has_sensitive_data="true"
|
||||
fi
|
||||
|
||||
# Store details for later use (base64 keeps lists on one line).
|
||||
local encoded_files
|
||||
encoded_files=$(printf '%s' "$related_files" | base64 | tr -d '\n')
|
||||
local encoded_system_files
|
||||
encoded_system_files=$(printf '%s' "$system_files" | base64 | tr -d '\n')
|
||||
app_details+=("$app_name|$app_path|$bundle_id|$total_kb|$encoded_files|$encoded_system_files")
|
||||
app_details+=("$app_name|$app_path|$bundle_id|$total_kb|$encoded_files|$encoded_system_files|$has_sensitive_data|$needs_sudo")
|
||||
done
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
# Format size display (convert KB to bytes for bytes_to_human())
|
||||
local size_display=$(bytes_to_human "$((total_estimated_size * 1024))")
|
||||
|
||||
# Display detailed file list for each app before confirmation
|
||||
echo ""
|
||||
echo -e "${PURPLE_BOLD}Files to be removed:${NC}"
|
||||
echo ""
|
||||
|
||||
# Warn if user data is detected.
|
||||
local has_user_data=false
|
||||
for detail in "${app_details[@]}"; do
|
||||
IFS='|' read -r app_name app_path bundle_id total_kb encoded_files encoded_system_files <<< "$detail"
|
||||
IFS='|' read -r _ _ _ _ _ _ has_sensitive_data <<< "$detail"
|
||||
if [[ "$has_sensitive_data" == "true" ]]; then
|
||||
has_user_data=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$has_user_data" == "true" ]]; then
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} ${YELLOW}Note: Some apps contain user configurations/themes${NC}"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
for detail in "${app_details[@]}"; do
|
||||
IFS='|' read -r app_name app_path bundle_id total_kb encoded_files encoded_system_files has_sensitive_data needs_sudo_flag <<< "$detail"
|
||||
local related_files=$(decode_file_list "$encoded_files" "$app_name")
|
||||
local system_files=$(decode_file_list "$encoded_system_files" "$app_name")
|
||||
local app_size_display=$(bytes_to_human "$((total_kb * 1024))")
|
||||
@@ -184,7 +222,7 @@ batch_uninstall_applications() {
|
||||
echo -e "${BLUE}${ICON_CONFIRM}${NC} ${app_name} ${GRAY}(${app_size_display})${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} ${app_path/$HOME/~}"
|
||||
|
||||
# Show related files (limit to 5 most important ones for brevity)
|
||||
# Show related files (limit to 5).
|
||||
local file_count=0
|
||||
local max_files=5
|
||||
while IFS= read -r file; do
|
||||
@@ -196,7 +234,7 @@ batch_uninstall_applications() {
|
||||
fi
|
||||
done <<< "$related_files"
|
||||
|
||||
# Show system files
|
||||
# Show system files (limit to 5).
|
||||
local sys_file_count=0
|
||||
while IFS= read -r file; do
|
||||
if [[ -n "$file" && -e "$file" ]]; then
|
||||
@@ -207,7 +245,6 @@ batch_uninstall_applications() {
|
||||
fi
|
||||
done <<< "$system_files"
|
||||
|
||||
# Show count of remaining files if truncated
|
||||
local total_hidden=$((file_count > max_files ? file_count - max_files : 0))
|
||||
((total_hidden += sys_file_count > max_files ? sys_file_count - max_files : 0))
|
||||
if [[ $total_hidden -gt 0 ]]; then
|
||||
@@ -215,7 +252,7 @@ batch_uninstall_applications() {
|
||||
fi
|
||||
done
|
||||
|
||||
# Show summary and get batch confirmation first (before asking for password)
|
||||
# Confirmation before requesting sudo.
|
||||
local app_total=${#selected_apps[@]}
|
||||
local app_text="app"
|
||||
[[ $app_total -gt 1 ]] && app_text="apps"
|
||||
@@ -247,9 +284,8 @@ batch_uninstall_applications() {
|
||||
;;
|
||||
esac
|
||||
|
||||
# User confirmed, now request sudo access if needed
|
||||
# Request sudo if needed.
|
||||
if [[ ${#sudo_apps[@]} -gt 0 ]]; then
|
||||
# Check if sudo is already cached
|
||||
if ! sudo -n true 2> /dev/null; then
|
||||
if ! request_sudo_access "Admin required for system apps: ${sudo_apps[*]}"; then
|
||||
echo ""
|
||||
@@ -257,10 +293,9 @@ batch_uninstall_applications() {
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
# Start sudo keepalive with robust parent checking
|
||||
# Keep sudo alive during uninstall.
|
||||
parent_pid=$$
|
||||
(while true; do
|
||||
# Check if parent process still exists first
|
||||
if ! kill -0 "$parent_pid" 2> /dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
@@ -272,48 +307,60 @@ batch_uninstall_applications() {
|
||||
|
||||
if [[ -t 1 ]]; then start_inline_spinner "Uninstalling apps..."; fi
|
||||
|
||||
# Force quit running apps first (batch)
|
||||
# Note: Apps are already killed in the individual uninstall loop below with app_path for precise matching
|
||||
|
||||
# Perform uninstallations (silent mode, show results at end)
|
||||
# Perform uninstallations (silent mode, show results at end).
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
local success_count=0 failed_count=0
|
||||
local -a failed_items=()
|
||||
local -a success_items=()
|
||||
for detail in "${app_details[@]}"; do
|
||||
IFS='|' read -r app_name app_path bundle_id total_kb encoded_files encoded_system_files <<< "$detail"
|
||||
IFS='|' read -r app_name app_path bundle_id total_kb encoded_files encoded_system_files has_sensitive_data needs_sudo <<< "$detail"
|
||||
local related_files=$(decode_file_list "$encoded_files" "$app_name")
|
||||
local system_files=$(decode_file_list "$encoded_system_files" "$app_name")
|
||||
local reason=""
|
||||
local needs_sudo=false
|
||||
[[ ! -w "$(dirname "$app_path")" || "$(get_file_owner "$app_path")" == "root" ]] && needs_sudo=true
|
||||
|
||||
# Stop Launch Agents and Daemons before removal
|
||||
# Stop Launch Agents/Daemons before removal.
|
||||
local has_system_files="false"
|
||||
[[ -n "$system_files" ]] && has_system_files="true"
|
||||
stop_launch_services "$bundle_id" "$has_system_files"
|
||||
|
||||
# Force quit app if still running
|
||||
if ! force_kill_app "$app_name" "$app_path"; then
|
||||
reason="still running"
|
||||
fi
|
||||
|
||||
# Remove the application only if not running
|
||||
# Remove the application only if not running.
|
||||
if [[ -z "$reason" ]]; then
|
||||
if [[ "$needs_sudo" == true ]]; then
|
||||
safe_sudo_remove "$app_path" || reason="remove failed"
|
||||
if ! safe_sudo_remove "$app_path"; then
|
||||
local app_owner=$(get_file_owner "$app_path")
|
||||
local current_user=$(whoami)
|
||||
if [[ -n "$app_owner" && "$app_owner" != "$current_user" && "$app_owner" != "root" ]]; then
|
||||
reason="owned by $app_owner"
|
||||
else
|
||||
reason="permission denied"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
safe_remove "$app_path" true || reason="remove failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove related files if app removal succeeded
|
||||
# Remove related files if app removal succeeded.
|
||||
if [[ -z "$reason" ]]; then
|
||||
# Remove user-level files
|
||||
remove_file_list "$related_files" "false" > /dev/null
|
||||
# Remove system-level files (requires sudo)
|
||||
remove_file_list "$system_files" "true" > /dev/null
|
||||
|
||||
# Clean up macOS defaults (preference domains).
|
||||
if [[ -n "$bundle_id" && "$bundle_id" != "unknown" ]]; then
|
||||
if defaults read "$bundle_id" &> /dev/null; then
|
||||
defaults delete "$bundle_id" 2> /dev/null || true
|
||||
fi
|
||||
|
||||
# ByHost preferences (machine-specific).
|
||||
if [[ -d ~/Library/Preferences/ByHost ]]; then
|
||||
find ~/Library/Preferences/ByHost -maxdepth 1 -name "${bundle_id}.*.plist" -delete 2> /dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
((total_size_freed += total_kb))
|
||||
((success_count++))
|
||||
((files_cleaned++))
|
||||
@@ -341,7 +388,7 @@ batch_uninstall_applications() {
|
||||
success_line+=", freed ${GREEN}${freed_display}${NC}"
|
||||
fi
|
||||
|
||||
# Format app list with max 3 per line
|
||||
# Format app list with max 3 per line.
|
||||
if [[ -n "$success_list" ]]; then
|
||||
local idx=0
|
||||
local is_first_line=true
|
||||
@@ -351,25 +398,20 @@ batch_uninstall_applications() {
|
||||
local display_item="${GREEN}${app_name}${NC}"
|
||||
|
||||
if ((idx % 3 == 0)); then
|
||||
# Start new line
|
||||
if [[ -n "$current_line" ]]; then
|
||||
summary_details+=("$current_line")
|
||||
fi
|
||||
if [[ "$is_first_line" == true ]]; then
|
||||
# First line: append to success_line
|
||||
current_line="${success_line}: $display_item"
|
||||
is_first_line=false
|
||||
else
|
||||
# Subsequent lines: just the apps
|
||||
current_line="$display_item"
|
||||
fi
|
||||
else
|
||||
# Add to current line
|
||||
current_line="$current_line, $display_item"
|
||||
fi
|
||||
((idx++))
|
||||
done
|
||||
# Add the last line
|
||||
if [[ -n "$current_line" ]]; then
|
||||
summary_details+=("$current_line")
|
||||
fi
|
||||
@@ -394,7 +436,8 @@ batch_uninstall_applications() {
|
||||
case "$first_reason" in
|
||||
still*running*) reason_summary="is still running" ;;
|
||||
remove*failed*) reason_summary="could not be removed" ;;
|
||||
permission*) reason_summary="permission denied" ;;
|
||||
permission*denied*) reason_summary="permission denied" ;;
|
||||
owned*by*) reason_summary="$first_reason (try with sudo)" ;;
|
||||
*) reason_summary="$first_reason" ;;
|
||||
esac
|
||||
fi
|
||||
@@ -414,12 +457,11 @@ batch_uninstall_applications() {
|
||||
print_summary_block "$title" "${summary_details[@]}"
|
||||
printf '\n'
|
||||
|
||||
# Clean up Dock entries for uninstalled apps
|
||||
# Clean up Dock entries for uninstalled apps.
|
||||
if [[ $success_count -gt 0 ]]; then
|
||||
local -a removed_paths=()
|
||||
for detail in "${app_details[@]}"; do
|
||||
IFS='|' read -r app_name app_path _ _ _ _ <<< "$detail"
|
||||
# Check if this app was successfully removed
|
||||
for success_name in "${success_items[@]}"; do
|
||||
if [[ "$success_name" == "$app_name" ]]; then
|
||||
removed_paths+=("$app_path")
|
||||
@@ -432,14 +474,14 @@ batch_uninstall_applications() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean up sudo keepalive if it was started
|
||||
# Clean up sudo keepalive if it was started.
|
||||
if [[ -n "${sudo_keepalive_pid:-}" ]]; then
|
||||
kill "$sudo_keepalive_pid" 2> /dev/null || true
|
||||
wait "$sudo_keepalive_pid" 2> /dev/null || true
|
||||
sudo_keepalive_pid=""
|
||||
fi
|
||||
|
||||
# Invalidate cache if any apps were successfully uninstalled
|
||||
# Invalidate cache if any apps were successfully uninstalled.
|
||||
if [[ $success_count -gt 0 ]]; then
|
||||
local cache_file="$HOME/.cache/mole/app_scan_cache"
|
||||
rm -f "$cache_file" 2> /dev/null || true
|
||||
|
||||
225
mole
225
mole
@@ -1,76 +1,91 @@
|
||||
#!/bin/bash
|
||||
# Mole - Main Entry Point
|
||||
# A comprehensive macOS maintenance tool
|
||||
#
|
||||
# Clean - Remove junk files and optimize system
|
||||
# Uninstall - Remove applications completely
|
||||
# Analyze - Interactive disk space explorer
|
||||
#
|
||||
# Usage:
|
||||
# ./mole # Interactive main menu
|
||||
# ./mole clean # Direct clean mode
|
||||
# ./mole uninstall # Direct uninstall mode
|
||||
# ./mole analyze # Disk space explorer
|
||||
# ./mole --help # Show help
|
||||
# Mole - Main CLI entrypoint.
|
||||
# Routes subcommands and interactive menu.
|
||||
# Handles update/remove flows.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Get script directory
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Source common functions
|
||||
source "$SCRIPT_DIR/lib/core/common.sh"
|
||||
source "$SCRIPT_DIR/lib/core/commands.sh"
|
||||
|
||||
# Version info
|
||||
VERSION="1.13.5"
|
||||
trap cleanup_temp_files EXIT INT TERM
|
||||
|
||||
# Version and update helpers
|
||||
VERSION="1.17.0"
|
||||
MOLE_TAGLINE="Deep clean and optimize your Mac."
|
||||
|
||||
# Check if Touch ID is already configured
|
||||
is_touchid_configured() {
|
||||
local pam_sudo_file="/etc/pam.d/sudo"
|
||||
[[ -f "$pam_sudo_file" ]] && grep -q "pam_tid.so" "$pam_sudo_file" 2> /dev/null
|
||||
}
|
||||
|
||||
# Get latest version from remote repository
|
||||
get_latest_version() {
|
||||
curl -fsSL --connect-timeout 2 --max-time 3 -H "Cache-Control: no-cache" \
|
||||
"https://raw.githubusercontent.com/tw93/mole/main/mole" 2> /dev/null |
|
||||
grep '^VERSION=' | head -1 | sed 's/VERSION="\(.*\)"/\1/'
|
||||
}
|
||||
|
||||
# Get latest version from GitHub API
|
||||
# This works for both Homebrew and manual installs since versions are synced
|
||||
get_latest_version_from_github() {
|
||||
local version
|
||||
version=$(curl -fsSL --connect-timeout 2 --max-time 3 \
|
||||
"https://api.github.com/repos/tw93/mole/releases/latest" 2> /dev/null |
|
||||
grep '"tag_name"' | head -1 | sed -E 's/.*"([^"]+)".*/\1/')
|
||||
# Remove 'v' or 'V' prefix if present
|
||||
version="${version#v}"
|
||||
version="${version#V}"
|
||||
echo "$version"
|
||||
}
|
||||
|
||||
# Check if installed via Homebrew
|
||||
# Install detection (Homebrew vs manual).
|
||||
is_homebrew_install() {
|
||||
command -v brew > /dev/null 2>&1 && brew list mole > /dev/null 2>&1
|
||||
local mole_path
|
||||
mole_path=$(command -v mole 2> /dev/null) || return 1
|
||||
|
||||
if [[ -L "$mole_path" ]] && readlink "$mole_path" | grep -q "Cellar/mole"; then
|
||||
if command -v brew > /dev/null 2>&1; then
|
||||
brew list --formula 2> /dev/null | grep -q "^mole$" && return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -f "$mole_path" ]]; then
|
||||
case "$mole_path" in
|
||||
/opt/homebrew/bin/mole | /usr/local/bin/mole)
|
||||
if [[ -d /opt/homebrew/Cellar/mole ]] || [[ -d /usr/local/Cellar/mole ]]; then
|
||||
if command -v brew > /dev/null 2>&1; then
|
||||
brew list --formula 2> /dev/null | grep -q "^mole$" && return 0
|
||||
else
|
||||
return 0 # Cellar exists, probably Homebrew install
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if command -v brew > /dev/null 2>&1; then
|
||||
local brew_prefix
|
||||
brew_prefix=$(brew --prefix 2> /dev/null)
|
||||
if [[ -n "$brew_prefix" && "$mole_path" == "$brew_prefix/bin/mole" && -d "$brew_prefix/Cellar/mole" ]]; then
|
||||
brew list --formula 2> /dev/null | grep -q "^mole$" && return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Check for updates (non-blocking, always check in background)
|
||||
# Background update notice
|
||||
check_for_updates() {
|
||||
local msg_cache="$HOME/.cache/mole/update_message"
|
||||
mkdir -p "$(dirname "$msg_cache")" 2> /dev/null
|
||||
ensure_user_dir "$(dirname "$msg_cache")"
|
||||
ensure_user_file "$msg_cache"
|
||||
|
||||
# Background version check (save to file, don't output)
|
||||
# Always check in background, display result from previous check
|
||||
(
|
||||
local latest
|
||||
|
||||
# Use GitHub API for version check (works for both Homebrew and manual installs)
|
||||
# Try API first (faster and more reliable)
|
||||
latest=$(get_latest_version_from_github)
|
||||
if [[ -z "$latest" ]]; then
|
||||
# Fallback to parsing mole script from raw GitHub
|
||||
latest=$(get_latest_version)
|
||||
fi
|
||||
|
||||
@@ -83,7 +98,6 @@ check_for_updates() {
|
||||
disown 2> /dev/null || true
|
||||
}
|
||||
|
||||
# Show update notification if available
|
||||
show_update_notification() {
|
||||
local msg_cache="$HOME/.cache/mole/update_message"
|
||||
if [[ -f "$msg_cache" && -s "$msg_cache" ]]; then
|
||||
@@ -92,6 +106,7 @@ show_update_notification() {
|
||||
fi
|
||||
}
|
||||
|
||||
# UI helpers
|
||||
show_brand_banner() {
|
||||
cat << EOF
|
||||
${GREEN} __ __ _ ${NC}
|
||||
@@ -104,7 +119,6 @@ EOF
|
||||
}
|
||||
|
||||
animate_mole_intro() {
|
||||
# Skip animation if stdout isn't a TTY (non-interactive)
|
||||
if [[ ! -t 1 ]]; then
|
||||
return
|
||||
fi
|
||||
@@ -197,8 +211,7 @@ show_version() {
|
||||
local sip_status
|
||||
if command -v csrutil > /dev/null; then
|
||||
sip_status=$(csrutil status 2> /dev/null | grep -o "enabled\|disabled" || echo "Unknown")
|
||||
# Capitalize first letter
|
||||
sip_status="$(tr '[:lower:]' '[:upper:]' <<< ${sip_status:0:1})${sip_status:1}"
|
||||
sip_status="$(tr '[:lower:]' '[:upper:]' <<< "${sip_status:0:1}")${sip_status:1}"
|
||||
else
|
||||
sip_status="Unknown"
|
||||
fi
|
||||
@@ -226,43 +239,39 @@ show_help() {
|
||||
echo
|
||||
printf "%s%s%s\n" "$BLUE" "COMMANDS" "$NC"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo" "$NC" "Main menu"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo clean" "$NC" "Free up disk space"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo uninstall" "$NC" "Remove apps completely"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo optimize" "$NC" "Check and maintain system"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo analyze" "$NC" "Explore disk usage"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo status" "$NC" "Monitor system health"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo touchid" "$NC" "Configure Touch ID for sudo"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo update" "$NC" "Update to latest version"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo remove" "$NC" "Remove Mole from system"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo --help" "$NC" "Show help"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo --version" "$NC" "Show version"
|
||||
for entry in "${MOLE_COMMANDS[@]}"; do
|
||||
local name="${entry%%:*}"
|
||||
local desc="${entry#*:}"
|
||||
local display="mo $name"
|
||||
[[ "$name" == "help" ]] && display="mo --help"
|
||||
[[ "$name" == "version" ]] && display="mo --version"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "$display" "$NC" "$desc"
|
||||
done
|
||||
echo
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo clean --dry-run" "$NC" "Preview cleanup"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo clean --whitelist" "$NC" "Manage protected caches"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo uninstall --force-rescan" "$NC" "Rescan apps and refresh cache"
|
||||
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo optimize --dry-run" "$NC" "Preview optimization"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo optimize --whitelist" "$NC" "Manage protected items"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo purge --paths" "$NC" "Configure scan directories"
|
||||
echo
|
||||
printf "%s%s%s\n" "$BLUE" "OPTIONS" "$NC"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "--debug" "$NC" "Show detailed operation logs"
|
||||
echo
|
||||
}
|
||||
|
||||
# Simple update function
|
||||
# Update flow (Homebrew or installer).
|
||||
update_mole() {
|
||||
# Set up cleanup trap for update process
|
||||
local update_interrupted=false
|
||||
trap 'update_interrupted=true; echo ""; log_error "Update interrupted by user"; exit 130' INT TERM
|
||||
trap 'update_interrupted=true; echo ""; exit 130' INT TERM
|
||||
|
||||
# Check if installed via Homebrew
|
||||
if is_homebrew_install; then
|
||||
update_via_homebrew "$VERSION"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check for updates
|
||||
local latest
|
||||
latest=$(get_latest_version_from_github)
|
||||
# Fallback to raw GitHub if API fails
|
||||
[[ -z "$latest" ]] && latest=$(get_latest_version)
|
||||
|
||||
if [[ -z "$latest" ]]; then
|
||||
@@ -279,7 +288,6 @@ update_mole() {
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Download and run installer with progress
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Downloading latest version..."
|
||||
else
|
||||
@@ -293,7 +301,6 @@ update_mole() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Download installer with progress and better error handling
|
||||
local download_error=""
|
||||
if command -v curl > /dev/null 2>&1; then
|
||||
download_error=$(curl -fsSL --connect-timeout 10 --max-time 60 "$installer_url" -o "$tmp_installer" 2>&1) || {
|
||||
@@ -302,7 +309,6 @@ update_mole() {
|
||||
rm -f "$tmp_installer"
|
||||
log_error "Update failed (curl error: $curl_exit)"
|
||||
|
||||
# Provide helpful error messages based on curl exit codes
|
||||
case $curl_exit in
|
||||
6) echo -e "${YELLOW}Tip:${NC} Could not resolve host. Check DNS or network connection." ;;
|
||||
7) echo -e "${YELLOW}Tip:${NC} Failed to connect. Check network or proxy settings." ;;
|
||||
@@ -333,7 +339,6 @@ update_mole() {
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
chmod +x "$tmp_installer"
|
||||
|
||||
# Determine install directory
|
||||
local mole_path
|
||||
mole_path="$(command -v mole 2> /dev/null || echo "$0")"
|
||||
local install_dir
|
||||
@@ -360,7 +365,6 @@ update_mole() {
|
||||
echo "Installing update..."
|
||||
fi
|
||||
|
||||
# Helper function to process installer output
|
||||
process_install_output() {
|
||||
local output="$1"
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
@@ -371,7 +375,6 @@ update_mole() {
|
||||
printf '\n%s\n' "$filtered_output"
|
||||
fi
|
||||
|
||||
# Only show success message if installer didn't already do so
|
||||
if ! printf '%s\n' "$output" | grep -Eq "Updated to latest version|Already on latest version"; then
|
||||
local new_version
|
||||
new_version=$("$mole_path" --version 2> /dev/null | awk 'NF {print $NF}' || echo "")
|
||||
@@ -381,13 +384,16 @@ update_mole() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Run installer with visible output (but capture for error handling)
|
||||
local install_output
|
||||
if install_output=$("$tmp_installer" --prefix "$install_dir" --config "$HOME/.config/mole" --update 2>&1); then
|
||||
local update_tag="V${latest#V}"
|
||||
local config_dir="${MOLE_CONFIG_DIR:-$SCRIPT_DIR}"
|
||||
if [[ ! -f "$config_dir/lib/core/common.sh" ]]; then
|
||||
config_dir="$HOME/.config/mole"
|
||||
fi
|
||||
if install_output=$(MOLE_VERSION="$update_tag" "$tmp_installer" --prefix "$install_dir" --config "$config_dir" --update 2>&1); then
|
||||
process_install_output "$install_output"
|
||||
else
|
||||
# Retry without --update flag
|
||||
if install_output=$("$tmp_installer" --prefix "$install_dir" --config "$HOME/.config/mole" 2>&1); then
|
||||
if install_output=$(MOLE_VERSION="$update_tag" "$tmp_installer" --prefix "$install_dir" --config "$config_dir" 2>&1); then
|
||||
process_install_output "$install_output"
|
||||
else
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
@@ -402,9 +408,8 @@ update_mole() {
|
||||
rm -f "$HOME/.cache/mole/update_message"
|
||||
}
|
||||
|
||||
# Remove Mole from system
|
||||
# Remove flow (Homebrew + manual + config/cache).
|
||||
remove_mole() {
|
||||
# Detect all installations with loading
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Detecting Mole installations..."
|
||||
else
|
||||
@@ -412,25 +417,37 @@ remove_mole() {
|
||||
fi
|
||||
|
||||
local is_homebrew=false
|
||||
local brew_cmd=""
|
||||
local brew_has_mole="false"
|
||||
local -a manual_installs=()
|
||||
local -a alias_installs=()
|
||||
|
||||
# Check Homebrew
|
||||
if is_homebrew_install; then
|
||||
if command -v brew > /dev/null 2>&1; then
|
||||
brew_cmd="brew"
|
||||
elif [[ -x "/opt/homebrew/bin/brew" ]]; then
|
||||
brew_cmd="/opt/homebrew/bin/brew"
|
||||
elif [[ -x "/usr/local/bin/brew" ]]; then
|
||||
brew_cmd="/usr/local/bin/brew"
|
||||
fi
|
||||
|
||||
if [[ -n "$brew_cmd" ]]; then
|
||||
if "$brew_cmd" list --formula 2> /dev/null | grep -q "^mole$"; then
|
||||
brew_has_mole="true"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$brew_has_mole" == "true" ]] || is_homebrew_install; then
|
||||
is_homebrew=true
|
||||
fi
|
||||
|
||||
# Find mole installations using which/command
|
||||
local found_mole
|
||||
found_mole=$(command -v mole 2> /dev/null || true)
|
||||
if [[ -n "$found_mole" && -f "$found_mole" ]]; then
|
||||
# Check if it's not a Homebrew symlink
|
||||
if [[ ! -L "$found_mole" ]] || ! readlink "$found_mole" | grep -q "Cellar/mole"; then
|
||||
manual_installs+=("$found_mole")
|
||||
fi
|
||||
fi
|
||||
|
||||
# Also check common locations as fallback
|
||||
local -a fallback_paths=(
|
||||
"/usr/local/bin/mole"
|
||||
"$HOME/.local/bin/mole"
|
||||
@@ -439,21 +456,18 @@ remove_mole() {
|
||||
|
||||
for path in "${fallback_paths[@]}"; do
|
||||
if [[ -f "$path" && "$path" != "$found_mole" ]]; then
|
||||
# Check if it's not a Homebrew symlink
|
||||
if [[ ! -L "$path" ]] || ! readlink "$path" | grep -q "Cellar/mole"; then
|
||||
manual_installs+=("$path")
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Find mo alias
|
||||
local found_mo
|
||||
found_mo=$(command -v mo 2> /dev/null || true)
|
||||
if [[ -n "$found_mo" && -f "$found_mo" ]]; then
|
||||
alias_installs+=("$found_mo")
|
||||
fi
|
||||
|
||||
# Also check common locations for mo
|
||||
local -a alias_fallback=(
|
||||
"/usr/local/bin/mo"
|
||||
"$HOME/.local/bin/mo"
|
||||
@@ -472,7 +486,6 @@ remove_mole() {
|
||||
|
||||
printf '\n'
|
||||
|
||||
# Check if anything to remove
|
||||
local manual_count=${#manual_installs[@]}
|
||||
local alias_count=${#alias_installs[@]}
|
||||
if [[ "$is_homebrew" == "false" && ${manual_count:-0} -eq 0 && ${alias_count:-0} -eq 0 ]]; then
|
||||
@@ -480,7 +493,6 @@ remove_mole() {
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Show what will be removed
|
||||
echo -e "${YELLOW}Remove Mole${NC} - will delete the following:"
|
||||
if [[ "$is_homebrew" == "true" ]]; then
|
||||
echo " - Mole via Homebrew"
|
||||
@@ -492,45 +504,48 @@ remove_mole() {
|
||||
echo " - ~/.cache/mole"
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to confirm, ${GRAY}ESC${NC} to cancel: "
|
||||
|
||||
# Read single key
|
||||
IFS= read -r -s -n1 key || key=""
|
||||
drain_pending_input # Clean up any escape sequence remnants
|
||||
case "$key" in
|
||||
$'\e')
|
||||
echo -e "${GRAY}Cancelled${NC}"
|
||||
echo ""
|
||||
exit 0
|
||||
;;
|
||||
"" | $'\n' | $'\r')
|
||||
printf "\r\033[K" # Clear the prompt line
|
||||
# Continue with removal
|
||||
;;
|
||||
*)
|
||||
echo -e "${GRAY}Cancelled${NC}"
|
||||
echo ""
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# Remove Homebrew installation (silent)
|
||||
local has_error=false
|
||||
if [[ "$is_homebrew" == "true" ]]; then
|
||||
if ! brew uninstall mole > /dev/null 2>&1; then
|
||||
if [[ -z "$brew_cmd" ]]; then
|
||||
log_error "Homebrew command not found. Please ensure Homebrew is installed and in your PATH."
|
||||
log_warning "You may need to manually run: brew uninstall --force mole"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_admin "Attempting to uninstall Mole via Homebrew..."
|
||||
local brew_uninstall_output
|
||||
if ! brew_uninstall_output=$("$brew_cmd" uninstall --force mole 2>&1); then
|
||||
has_error=true
|
||||
log_error "Homebrew uninstallation failed:"
|
||||
printf "%s\n" "$brew_uninstall_output" | sed "s/^/${RED} | ${NC}/" >&2
|
||||
log_warning "Please manually run: ${YELLOW}brew uninstall --force mole${NC}"
|
||||
echo "" # Add a blank line for readability
|
||||
else
|
||||
log_success "Mole uninstalled via Homebrew."
|
||||
fi
|
||||
fi
|
||||
# Remove manual installations
|
||||
if [[ ${manual_count:-0} -gt 0 ]]; then
|
||||
for install in "${manual_installs[@]}"; do
|
||||
if [[ -f "$install" ]]; then
|
||||
# Check if directory requires sudo (deletion is a directory operation)
|
||||
if [[ ! -w "$(dirname "$install")" ]]; then
|
||||
# Requires sudo
|
||||
if ! sudo rm -f "$install" 2> /dev/null; then
|
||||
has_error=true
|
||||
fi
|
||||
else
|
||||
# Regular user permission
|
||||
if ! rm -f "$install" 2> /dev/null; then
|
||||
has_error=true
|
||||
fi
|
||||
@@ -541,25 +556,25 @@ remove_mole() {
|
||||
if [[ ${alias_count:-0} -gt 0 ]]; then
|
||||
for alias in "${alias_installs[@]}"; do
|
||||
if [[ -f "$alias" ]]; then
|
||||
# Check if directory requires sudo
|
||||
if [[ ! -w "$(dirname "$alias")" ]]; then
|
||||
sudo rm -f "$alias" 2> /dev/null || true
|
||||
if ! sudo rm -f "$alias" 2> /dev/null; then
|
||||
has_error=true
|
||||
fi
|
||||
else
|
||||
rm -f "$alias" 2> /dev/null || true
|
||||
if ! rm -f "$alias" 2> /dev/null; then
|
||||
has_error=true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
# Clean up cache first (silent)
|
||||
if [[ -d "$HOME/.cache/mole" ]]; then
|
||||
rm -rf "$HOME/.cache/mole" 2> /dev/null || true
|
||||
fi
|
||||
# Clean up configuration last (silent)
|
||||
if [[ -d "$HOME/.config/mole" ]]; then
|
||||
rm -rf "$HOME/.config/mole" 2> /dev/null || true
|
||||
fi
|
||||
|
||||
# Show final result
|
||||
local final_message
|
||||
if [[ "$has_error" == "true" ]]; then
|
||||
final_message="${YELLOW}${ICON_ERROR} Mole uninstalled with some errors, thank you for using Mole!${NC}"
|
||||
@@ -571,38 +586,33 @@ remove_mole() {
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Display main menu options with minimal refresh to avoid flicker
|
||||
# Menu UI
|
||||
show_main_menu() {
|
||||
local selected="${1:-1}"
|
||||
local _full_draw="${2:-true}" # Kept for compatibility (unused)
|
||||
local banner="${MAIN_MENU_BANNER:-}"
|
||||
local update_message="${MAIN_MENU_UPDATE_MESSAGE:-}"
|
||||
|
||||
# Fallback if globals missing (should not happen)
|
||||
if [[ -z "$banner" ]]; then
|
||||
banner="$(show_brand_banner)"
|
||||
MAIN_MENU_BANNER="$banner"
|
||||
fi
|
||||
|
||||
printf '\033[H' # Move cursor to home
|
||||
printf '\033[H'
|
||||
|
||||
local line=""
|
||||
# Leading spacer
|
||||
printf '\r\033[2K\n'
|
||||
|
||||
# Brand banner
|
||||
while IFS= read -r line || [[ -n "$line" ]]; do
|
||||
printf '\r\033[2K%s\n' "$line"
|
||||
done <<< "$banner"
|
||||
|
||||
# Update notification block (if present)
|
||||
if [[ -n "$update_message" ]]; then
|
||||
while IFS= read -r line || [[ -n "$line" ]]; do
|
||||
printf '\r\033[2K%s\n' "$line"
|
||||
done <<< "$update_message"
|
||||
fi
|
||||
|
||||
# Spacer before menu options
|
||||
printf '\r\033[2K\n'
|
||||
|
||||
printf '\r\033[2K%s\n' "$(show_menu_option 1 "Clean Free up disk space" "$([[ $selected -eq 1 ]] && echo true || echo false)")"
|
||||
@@ -613,7 +623,6 @@ show_main_menu() {
|
||||
|
||||
if [[ -t 0 ]]; then
|
||||
printf '\r\033[2K\n'
|
||||
# Show TouchID if not configured, otherwise show Update
|
||||
local controls="${GRAY}↑↓ | Enter | M More | "
|
||||
if ! is_touchid_configured; then
|
||||
controls="${controls}T TouchID"
|
||||
@@ -625,24 +634,21 @@ show_main_menu() {
|
||||
printf '\r\033[2K\n'
|
||||
fi
|
||||
|
||||
# Clear any remaining content below without full screen wipe
|
||||
printf '\033[J'
|
||||
}
|
||||
|
||||
# Interactive main menu loop
|
||||
interactive_main_menu() {
|
||||
# Show intro animation only once per terminal tab
|
||||
if [[ -t 1 ]]; then
|
||||
local tty_name
|
||||
tty_name=$(tty 2> /dev/null || echo "")
|
||||
if [[ -n "$tty_name" ]]; then
|
||||
local flag_file
|
||||
local cache_dir="$HOME/.cache/mole"
|
||||
mkdir -p "$cache_dir" 2> /dev/null
|
||||
ensure_user_dir "$cache_dir"
|
||||
flag_file="$cache_dir/intro_$(echo "$tty_name" | tr -c '[:alnum:]_' '_')"
|
||||
if [[ ! -f "$flag_file" ]]; then
|
||||
animate_mole_intro
|
||||
touch "$flag_file" 2> /dev/null || true
|
||||
ensure_user_file "$flag_file"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -737,13 +743,12 @@ interactive_main_menu() {
|
||||
"QUIT") cleanup_and_exit ;;
|
||||
esac
|
||||
|
||||
# Drain any accumulated input after processing (e.g., touchpad scroll events)
|
||||
drain_pending_input
|
||||
done
|
||||
}
|
||||
|
||||
# CLI dispatch
|
||||
main() {
|
||||
# Parse global flags
|
||||
local -a args=()
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
@@ -772,9 +777,15 @@ main() {
|
||||
"status")
|
||||
exec "$SCRIPT_DIR/bin/status.sh" "${args[@]:1}"
|
||||
;;
|
||||
"purge")
|
||||
exec "$SCRIPT_DIR/bin/purge.sh" "${args[@]:1}"
|
||||
;;
|
||||
"touchid")
|
||||
exec "$SCRIPT_DIR/bin/touchid.sh" "${args[@]:1}"
|
||||
;;
|
||||
"completion")
|
||||
exec "$SCRIPT_DIR/bin/completion.sh" "${args[@]:1}"
|
||||
;;
|
||||
"update")
|
||||
update_mole
|
||||
exit 0
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Build Universal Binary for analyze-go
|
||||
# Supports both Apple Silicon and Intel Macs
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
# Check if Go is installed
|
||||
if ! command -v go > /dev/null 2>&1; then
|
||||
echo "Error: Go not installed"
|
||||
echo "Install: brew install go"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Building analyze-go for multiple architectures..."
|
||||
|
||||
# Get version info
|
||||
VERSION=$(git describe --tags --always --dirty 2> /dev/null || echo "dev")
|
||||
BUILD_TIME=$(date -u '+%Y-%m-%d_%H:%M:%S')
|
||||
LDFLAGS="-s -w -X main.Version=$VERSION -X main.BuildTime=$BUILD_TIME"
|
||||
|
||||
echo " Version: $VERSION"
|
||||
echo " Build time: $BUILD_TIME"
|
||||
echo ""
|
||||
|
||||
# Build for arm64 (Apple Silicon)
|
||||
echo " → Building for arm64..."
|
||||
GOARCH=arm64 go build -ldflags="$LDFLAGS" -trimpath -o bin/analyze-go-arm64 ./cmd/analyze
|
||||
|
||||
# Build for amd64 (Intel)
|
||||
echo " → Building for amd64..."
|
||||
GOARCH=amd64 go build -ldflags="$LDFLAGS" -trimpath -o bin/analyze-go-amd64 ./cmd/analyze
|
||||
|
||||
# Create Universal Binary
|
||||
echo " → Creating Universal Binary..."
|
||||
lipo -create bin/analyze-go-arm64 bin/analyze-go-amd64 -output bin/analyze-go
|
||||
|
||||
# Clean up temporary files
|
||||
rm bin/analyze-go-arm64 bin/analyze-go-amd64
|
||||
|
||||
# Verify
|
||||
echo ""
|
||||
echo "✓ Build complete!"
|
||||
echo ""
|
||||
file bin/analyze-go
|
||||
size_bytes=$(stat -f%z bin/analyze-go 2> /dev/null || echo 0)
|
||||
size_mb=$((size_bytes / 1024 / 1024))
|
||||
printf "Size: %d MB (%d bytes)\n" "$size_mb" "$size_bytes"
|
||||
echo ""
|
||||
echo "Binary supports: arm64 (Apple Silicon) + x86_64 (Intel)"
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Build Universal Binary for status-go
|
||||
# Supports both Apple Silicon and Intel Macs
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
|
||||
if ! command -v go > /dev/null 2>&1; then
|
||||
echo "Error: Go not installed"
|
||||
echo "Install: brew install go"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Building status-go for multiple architectures..."
|
||||
|
||||
VERSION=$(git describe --tags --always --dirty 2> /dev/null || echo "dev")
|
||||
BUILD_TIME=$(date -u '+%Y-%m-%d_%H:%M:%S')
|
||||
LDFLAGS="-s -w -X main.Version=$VERSION -X main.BuildTime=$BUILD_TIME"
|
||||
|
||||
echo " Version: $VERSION"
|
||||
echo " Build time: $BUILD_TIME"
|
||||
echo ""
|
||||
|
||||
echo " → Building for arm64..."
|
||||
GOARCH=arm64 go build -ldflags="$LDFLAGS" -trimpath -o bin/status-go-arm64 ./cmd/status
|
||||
|
||||
echo " → Building for amd64..."
|
||||
GOARCH=amd64 go build -ldflags="$LDFLAGS" -trimpath -o bin/status-go-amd64 ./cmd/status
|
||||
|
||||
echo " → Creating Universal Binary..."
|
||||
lipo -create bin/status-go-arm64 bin/status-go-amd64 -output bin/status-go
|
||||
|
||||
rm bin/status-go-arm64 bin/status-go-amd64
|
||||
|
||||
echo ""
|
||||
echo "✓ Build complete!"
|
||||
echo ""
|
||||
file bin/status-go
|
||||
size_bytes=$(stat -f%z bin/status-go 2> /dev/null || echo 0)
|
||||
size_mb=$((size_bytes / 1024 / 1024))
|
||||
printf "Size: %d MB (%d bytes)\n" "$size_mb" "$size_bytes"
|
||||
echo ""
|
||||
echo "Binary supports: arm64 (Apple Silicon) + x86_64 (Intel)"
|
||||
183
scripts/check.sh
183
scripts/check.sh
@@ -1,126 +1,189 @@
|
||||
#!/bin/bash
|
||||
# Unified check script for Mole project
|
||||
# Runs all quality checks in one command
|
||||
# Code quality checks for Mole.
|
||||
# Auto-formats code, then runs lint and syntax checks.
|
||||
|
||||
set -e
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
# Colors
|
||||
MODE="all"
|
||||
|
||||
usage() {
|
||||
cat << 'EOF'
|
||||
Usage: ./scripts/check.sh [--format|--no-format]
|
||||
|
||||
Options:
|
||||
--format Apply formatting fixes only (shfmt, gofmt)
|
||||
--no-format Skip formatting and run checks only
|
||||
--help Show this help
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--format)
|
||||
MODE="format"
|
||||
shift
|
||||
;;
|
||||
--no-format)
|
||||
MODE="check"
|
||||
shift
|
||||
;;
|
||||
--help | -h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
readonly ICON_SUCCESS="✓"
|
||||
readonly ICON_ERROR="☻"
|
||||
readonly ICON_WARNING="●"
|
||||
readonly ICON_LIST="•"
|
||||
|
||||
echo -e "${BLUE}=== Running Mole Quality Checks ===${NC}\n"
|
||||
echo -e "${BLUE}=== Mole Check (${MODE}) ===${NC}\n"
|
||||
|
||||
# 1. Format check
|
||||
echo -e "${YELLOW}1. Checking code formatting...${NC}"
|
||||
if command -v shfmt > /dev/null 2>&1; then
|
||||
if ./scripts/format.sh --check; then
|
||||
echo -e "${GREEN}✓ Formatting check passed${NC}\n"
|
||||
SHELL_FILES=$(find . -type f \( -name "*.sh" -o -name "mole" \) \
|
||||
-not -path "./.git/*" \
|
||||
-not -path "*/node_modules/*" \
|
||||
-not -path "*/tests/tmp-*/*" \
|
||||
-not -path "*/.*" \
|
||||
2> /dev/null)
|
||||
|
||||
if [[ "$MODE" == "format" ]]; then
|
||||
echo -e "${YELLOW}Formatting shell scripts...${NC}"
|
||||
if command -v shfmt > /dev/null 2>&1; then
|
||||
echo "$SHELL_FILES" | xargs shfmt -i 4 -ci -sr -w
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Shell formatting complete${NC}\n"
|
||||
else
|
||||
echo -e "${RED}✗ Formatting check failed${NC}\n"
|
||||
echo -e "${RED}${ICON_ERROR} shfmt not installed${NC}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠ shfmt not installed, skipping format check${NC}\n"
|
||||
|
||||
if command -v go > /dev/null 2>&1; then
|
||||
echo -e "${YELLOW}Formatting Go code...${NC}"
|
||||
gofmt -w ./cmd
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Go formatting complete${NC}\n"
|
||||
else
|
||||
echo -e "${YELLOW}${ICON_WARNING} go not installed, skipping gofmt${NC}\n"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}=== Format Completed ===${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# 2. ShellCheck
|
||||
echo -e "${YELLOW}2. Running ShellCheck...${NC}"
|
||||
if [[ "$MODE" != "check" ]]; then
|
||||
echo -e "${YELLOW}1. Formatting shell scripts...${NC}"
|
||||
if command -v shfmt > /dev/null 2>&1; then
|
||||
echo "$SHELL_FILES" | xargs shfmt -i 4 -ci -sr -w
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Shell formatting applied${NC}\n"
|
||||
else
|
||||
echo -e "${YELLOW}${ICON_WARNING} shfmt not installed, skipping${NC}\n"
|
||||
fi
|
||||
|
||||
if command -v go > /dev/null 2>&1; then
|
||||
echo -e "${YELLOW}2. Formatting Go code...${NC}"
|
||||
gofmt -w ./cmd
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Go formatting applied${NC}\n"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}3. Running ShellCheck...${NC}"
|
||||
if command -v shellcheck > /dev/null 2>&1; then
|
||||
# Count total files
|
||||
SHELL_FILES=$(find . -type f \( -name "*.sh" -o -name "mole" \) -not -path "./tests/*" -not -path "./.git/*")
|
||||
FILE_COUNT=$(echo "$SHELL_FILES" | wc -l | tr -d ' ')
|
||||
|
||||
if shellcheck mole bin/*.sh lib/*.sh scripts/*.sh 2>&1 | grep -q "SC[0-9]"; then
|
||||
echo -e "${YELLOW}⚠ ShellCheck found some issues (non-critical):${NC}"
|
||||
shellcheck mole bin/*.sh lib/*.sh scripts/*.sh 2>&1 | head -20
|
||||
echo -e "${GREEN}✓ ShellCheck completed (${FILE_COUNT} files checked)${NC}\n"
|
||||
if shellcheck mole bin/*.sh lib/*/*.sh scripts/*.sh; then
|
||||
echo -e "${GREEN}${ICON_SUCCESS} ShellCheck passed${NC}\n"
|
||||
else
|
||||
echo -e "${GREEN}✓ ShellCheck passed (${FILE_COUNT} files checked)${NC}\n"
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠ shellcheck not installed, skipping${NC}\n"
|
||||
fi
|
||||
|
||||
# 3. Unit tests (if available)
|
||||
echo -e "${YELLOW}3. Running tests...${NC}"
|
||||
if command -v bats > /dev/null 2>&1 && [ -d "tests" ]; then
|
||||
if bats tests/*.bats; then
|
||||
echo -e "${GREEN}✓ Tests passed${NC}\n"
|
||||
else
|
||||
echo -e "${RED}✗ Tests failed (see output above)${NC}\n"
|
||||
echo -e "${RED}${ICON_ERROR} ShellCheck failed${NC}\n"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠ bats not installed or no tests found, skipping${NC}\n"
|
||||
echo -e "${YELLOW}${ICON_WARNING} shellcheck not installed, skipping${NC}\n"
|
||||
fi
|
||||
|
||||
# 4. Code optimization checks
|
||||
echo -e "${YELLOW}4. Checking code optimizations...${NC}"
|
||||
echo -e "${YELLOW}4. Running syntax check...${NC}"
|
||||
if ! bash -n mole; then
|
||||
echo -e "${RED}${ICON_ERROR} Syntax check failed (mole)${NC}\n"
|
||||
exit 1
|
||||
fi
|
||||
for script in bin/*.sh; do
|
||||
if ! bash -n "$script"; then
|
||||
echo -e "${RED}${ICON_ERROR} Syntax check failed ($script)${NC}\n"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
find lib -name "*.sh" | while read -r script; do
|
||||
if ! bash -n "$script"; then
|
||||
echo -e "${RED}${ICON_ERROR} Syntax check failed ($script)${NC}\n"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Syntax check passed${NC}\n"
|
||||
|
||||
echo -e "${YELLOW}5. Checking optimizations...${NC}"
|
||||
OPTIMIZATION_SCORE=0
|
||||
TOTAL_CHECKS=0
|
||||
|
||||
# Check 1: Keyboard input handling (restored to 1s for reliability)
|
||||
((TOTAL_CHECKS++))
|
||||
if grep -q "read -r -s -n 1 -t 1" lib/core/ui.sh; then
|
||||
echo -e "${GREEN} ✓ Keyboard timeout properly configured (1s)${NC}"
|
||||
echo -e "${GREEN} ${ICON_SUCCESS} Keyboard timeout configured${NC}"
|
||||
((OPTIMIZATION_SCORE++))
|
||||
else
|
||||
echo -e "${YELLOW} ⚠ Keyboard timeout may be misconfigured${NC}"
|
||||
echo -e "${YELLOW} ${ICON_WARNING} Keyboard timeout may be misconfigured${NC}"
|
||||
fi
|
||||
|
||||
# Check 2: Single-pass drain_pending_input
|
||||
((TOTAL_CHECKS++))
|
||||
DRAIN_PASSES=$(grep -c "while IFS= read -r -s -n 1" lib/core/ui.sh 2> /dev/null || true)
|
||||
DRAIN_PASSES=${DRAIN_PASSES:-0}
|
||||
if [[ $DRAIN_PASSES -eq 1 ]]; then
|
||||
echo -e "${GREEN} ✓ drain_pending_input optimized (single-pass)${NC}"
|
||||
echo -e "${GREEN} ${ICON_SUCCESS} drain_pending_input optimized${NC}"
|
||||
((OPTIMIZATION_SCORE++))
|
||||
else
|
||||
echo -e "${YELLOW} ⚠ drain_pending_input has multiple passes${NC}"
|
||||
echo -e "${YELLOW} ${ICON_WARNING} drain_pending_input has multiple passes${NC}"
|
||||
fi
|
||||
|
||||
# Check 3: Log rotation once per session
|
||||
((TOTAL_CHECKS++))
|
||||
if grep -q "rotate_log_once" lib/core/log.sh; then
|
||||
echo -e "${GREEN} ✓ Log rotation optimized (once per session)${NC}"
|
||||
echo -e "${GREEN} ${ICON_SUCCESS} Log rotation optimized${NC}"
|
||||
((OPTIMIZATION_SCORE++))
|
||||
else
|
||||
echo -e "${YELLOW} ⚠ Log rotation not optimized${NC}"
|
||||
echo -e "${YELLOW} ${ICON_WARNING} Log rotation not optimized${NC}"
|
||||
fi
|
||||
|
||||
# Check 4: Simplified cache validation
|
||||
((TOTAL_CHECKS++))
|
||||
if ! grep -q "cache_meta\|cache_dir_mtime" bin/uninstall.sh; then
|
||||
echo -e "${GREEN} ✓ Cache validation simplified${NC}"
|
||||
echo -e "${GREEN} ${ICON_SUCCESS} Cache validation simplified${NC}"
|
||||
((OPTIMIZATION_SCORE++))
|
||||
else
|
||||
echo -e "${YELLOW} ⚠ Cache still uses redundant metadata${NC}"
|
||||
echo -e "${YELLOW} ${ICON_WARNING} Cache still uses redundant metadata${NC}"
|
||||
fi
|
||||
|
||||
# Check 5: Stricter path validation
|
||||
((TOTAL_CHECKS++))
|
||||
if grep -q "Consecutive slashes" bin/clean.sh; then
|
||||
echo -e "${GREEN} ✓ Path validation enhanced${NC}"
|
||||
echo -e "${GREEN} ${ICON_SUCCESS} Path validation enhanced${NC}"
|
||||
((OPTIMIZATION_SCORE++))
|
||||
else
|
||||
echo -e "${YELLOW} ⚠ Path validation not enhanced${NC}"
|
||||
echo -e "${YELLOW} ${ICON_WARNING} Path validation not enhanced${NC}"
|
||||
fi
|
||||
|
||||
echo -e "${BLUE} Optimization score: $OPTIMIZATION_SCORE/$TOTAL_CHECKS${NC}\n"
|
||||
|
||||
# Summary
|
||||
echo -e "${GREEN}=== All Checks Completed ===${NC}"
|
||||
echo -e "${GREEN}=== Checks Completed ===${NC}"
|
||||
if [[ $OPTIMIZATION_SCORE -eq $TOTAL_CHECKS ]]; then
|
||||
echo -e "${GREEN}✓ Code quality checks passed!${NC}"
|
||||
echo -e "${GREEN}✓ All optimizations applied!${NC}"
|
||||
echo -e "${GREEN}${ICON_SUCCESS} All optimizations applied${NC}"
|
||||
else
|
||||
echo -e "${YELLOW}⚠ Code quality checks passed, but some optimizations missing${NC}"
|
||||
echo -e "${YELLOW}${ICON_WARNING} Some optimizations missing${NC}"
|
||||
fi
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Format all shell scripts in the Mole project
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/format.sh # Format all scripts
|
||||
# ./scripts/format.sh --check # Check only, don't modify
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
CHECK_ONLY=false
|
||||
|
||||
# Parse arguments
|
||||
if [[ "${1:-}" == "--check" ]]; then
|
||||
CHECK_ONLY=true
|
||||
elif [[ "${1:-}" == "--help" || "${1:-}" == "-h" ]]; then
|
||||
cat << 'EOF'
|
||||
Usage: ./scripts/format.sh [--check]
|
||||
|
||||
Format shell scripts using shfmt.
|
||||
|
||||
Options:
|
||||
--check Check formatting without modifying files
|
||||
--help Show this help
|
||||
|
||||
Install: brew install shfmt
|
||||
EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if shfmt is installed
|
||||
if ! command -v shfmt > /dev/null 2>&1; then
|
||||
echo "Error: shfmt not installed"
|
||||
echo "Install: brew install shfmt"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Find all shell scripts (excluding temp directories and build artifacts)
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Build list of files to format (exclude .git, node_modules, tmp directories)
|
||||
FILES=$(find . -type f \( -name "*.sh" -o -name "mole" \) \
|
||||
-not -path "./.git/*" \
|
||||
-not -path "*/node_modules/*" \
|
||||
-not -path "*/tests/tmp-*/*" \
|
||||
-not -path "*/.*" \
|
||||
2> /dev/null)
|
||||
|
||||
if [[ -z "$FILES" ]]; then
|
||||
echo "No shell scripts found"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# shfmt options: -i 4 (4 spaces), -ci (indent switch cases), -sr (space after redirect)
|
||||
if [[ "$CHECK_ONLY" == "true" ]]; then
|
||||
echo "Checking formatting..."
|
||||
if echo "$FILES" | xargs shfmt -i 4 -ci -sr -d > /dev/null 2>&1; then
|
||||
echo "✓ All scripts properly formatted"
|
||||
exit 0
|
||||
else
|
||||
echo "✗ Some scripts need formatting:"
|
||||
echo "$FILES" | xargs shfmt -i 4 -ci -sr -d
|
||||
echo ""
|
||||
echo "Run './scripts/format.sh' to fix"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Formatting scripts..."
|
||||
echo "$FILES" | xargs shfmt -i 4 -ci -sr -w
|
||||
echo "✓ Done"
|
||||
fi
|
||||
@@ -1,115 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Quick test runner script
|
||||
# Runs all tests before committing
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR/.."
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
echo "==============================="
|
||||
echo " Mole Test Runner"
|
||||
echo "==============================="
|
||||
echo ""
|
||||
|
||||
# Track failures
|
||||
FAILED=0
|
||||
|
||||
# 1. ShellCheck
|
||||
echo "1. Running ShellCheck..."
|
||||
if command -v shellcheck > /dev/null 2>&1; then
|
||||
if shellcheck mole bin/*.sh 2> /dev/null &&
|
||||
find lib -name "*.sh" -type f -exec shellcheck {} + 2> /dev/null; then
|
||||
printf "${GREEN}✓ ShellCheck passed${NC}\n"
|
||||
else
|
||||
printf "${RED}✗ ShellCheck failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
printf "${YELLOW}⚠ ShellCheck not installed, skipping${NC}\n"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 2. Syntax Check
|
||||
echo "2. Running syntax check..."
|
||||
if bash -n mole &&
|
||||
bash -n bin/*.sh 2> /dev/null &&
|
||||
find lib -name "*.sh" -type f -exec bash -n {} \; 2> /dev/null; then
|
||||
printf "${GREEN}✓ Syntax check passed${NC}\n"
|
||||
else
|
||||
printf "${RED}✗ Syntax check failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 3. Unit Tests
|
||||
echo "3. Running unit tests..."
|
||||
if command -v bats > /dev/null 2>&1; then
|
||||
# Note: bats might detect non-TTY and suppress color.
|
||||
# Adding --tap prevents spinner issues in background.
|
||||
if bats tests/*.bats; then
|
||||
printf "${GREEN}✓ Unit tests passed${NC}\n"
|
||||
else
|
||||
printf "${RED}✗ Unit tests failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
printf "${YELLOW}⚠ Bats not installed, skipping unit tests${NC}\n"
|
||||
echo " Install with: brew install bats-core"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 4. Go Tests
|
||||
echo "4. Running Go tests..."
|
||||
if command -v go > /dev/null 2>&1; then
|
||||
if go build ./... && go vet ./cmd/... && go test ./cmd/...; then
|
||||
printf "${GREEN}✓ Go tests passed${NC}\n"
|
||||
else
|
||||
printf "${RED}✗ Go tests failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
printf "${YELLOW}⚠ Go not installed, skipping Go tests${NC}\n"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 5. Module Loading Test
|
||||
echo "5. Testing module loading..."
|
||||
if bash -c 'source lib/core/common.sh && echo "OK"' > /dev/null 2>&1; then
|
||||
printf "${GREEN}✓ Module loading passed${NC}\n"
|
||||
else
|
||||
printf "${RED}✗ Module loading failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 6. Integration Tests
|
||||
echo "6. Running integration tests..."
|
||||
export MOLE_MAX_PARALLEL_JOBS=30
|
||||
if ./bin/clean.sh --dry-run > /dev/null 2>&1; then
|
||||
printf "${GREEN}✓ Clean dry-run passed${NC}\n"
|
||||
else
|
||||
printf "${RED}✗ Clean dry-run failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Summary
|
||||
echo "==============================="
|
||||
if [[ $FAILED -eq 0 ]]; then
|
||||
printf "${GREEN}All tests passed!${NC}\n"
|
||||
echo ""
|
||||
echo "You can now commit your changes."
|
||||
exit 0
|
||||
else
|
||||
printf "${RED}$FAILED test(s) failed!${NC}\n"
|
||||
echo ""
|
||||
echo "Please fix the failing tests before committing."
|
||||
exit 1
|
||||
fi
|
||||
@@ -18,7 +18,16 @@ log_step() { echo -e "${BLUE}${ICON_STEP}${NC} $1"; }
|
||||
log_success() { echo -e "${GREEN}${ICON_SUCCESS}${NC} $1"; }
|
||||
log_warn() { echo -e "${YELLOW}${ICON_WARN}${NC} $1"; }
|
||||
log_error() { echo -e "${RED}${ICON_ERR}${NC} $1"; }
|
||||
|
||||
log_header() { echo -e "\n${BLUE}==== $1 ====${NC}\n"; }
|
||||
is_interactive() { [[ -t 1 && -r /dev/tty ]]; }
|
||||
prompt_enter() {
|
||||
local prompt="$1"
|
||||
if is_interactive; then
|
||||
read -r -p "$prompt" < /dev/tty || true
|
||||
else
|
||||
echo "$prompt"
|
||||
fi
|
||||
}
|
||||
detect_mo() {
|
||||
if command -v mo > /dev/null 2>&1; then
|
||||
command -v mo
|
||||
@@ -223,42 +232,44 @@ EOF
|
||||
create_raycast_commands() {
|
||||
local mo_bin="$1"
|
||||
local default_dir="$HOME/Library/Application Support/Raycast/script-commands"
|
||||
local alt_dir="$HOME/Documents/Raycast/Scripts"
|
||||
local dirs=()
|
||||
|
||||
if [[ -d "$default_dir" ]]; then
|
||||
dirs+=("$default_dir")
|
||||
fi
|
||||
if [[ -d "$alt_dir" ]]; then
|
||||
dirs+=("$alt_dir")
|
||||
fi
|
||||
if [[ ${#dirs[@]} -eq 0 ]]; then
|
||||
dirs+=("$default_dir")
|
||||
fi
|
||||
local dir="$default_dir"
|
||||
|
||||
log_step "Installing Raycast commands..."
|
||||
for dir in "${dirs[@]}"; do
|
||||
mkdir -p "$dir"
|
||||
write_raycast_script "$dir/mole-clean.sh" "clean" "$mo_bin" "clean"
|
||||
write_raycast_script "$dir/mole-uninstall.sh" "uninstall" "$mo_bin" "uninstall"
|
||||
write_raycast_script "$dir/mole-optimize.sh" "optimize" "$mo_bin" "optimize"
|
||||
write_raycast_script "$dir/mole-analyze.sh" "analyze" "$mo_bin" "analyze"
|
||||
write_raycast_script "$dir/mole-status.sh" "status" "$mo_bin" "status"
|
||||
log_success "Scripts ready in: $dir"
|
||||
done
|
||||
mkdir -p "$dir"
|
||||
write_raycast_script "$dir/mole-clean.sh" "clean" "$mo_bin" "clean"
|
||||
write_raycast_script "$dir/mole-uninstall.sh" "uninstall" "$mo_bin" "uninstall"
|
||||
write_raycast_script "$dir/mole-optimize.sh" "optimize" "$mo_bin" "optimize"
|
||||
write_raycast_script "$dir/mole-analyze.sh" "analyze" "$mo_bin" "analyze"
|
||||
write_raycast_script "$dir/mole-status.sh" "status" "$mo_bin" "status"
|
||||
log_success "Scripts ready in: $dir"
|
||||
|
||||
echo ""
|
||||
if open "raycast://extensions/script-commands" > /dev/null 2>&1; then
|
||||
log_step "Raycast settings opened."
|
||||
log_header "Raycast Configuration"
|
||||
if command -v open > /dev/null 2>&1; then
|
||||
if open "raycast://extensions/raycast/raycast-settings/extensions" > /dev/null 2>&1; then
|
||||
log_step "Raycast settings opened."
|
||||
else
|
||||
log_warn "Could not auto-open Raycast."
|
||||
fi
|
||||
else
|
||||
log_warn "Could not auto-open Raycast."
|
||||
log_warn "open command not available; please open Raycast manually."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Next steps to activate Raycast commands:"
|
||||
echo " 1. Open Raycast (⌘ Space)"
|
||||
echo " 2. Search for 'Reload Script Directories'"
|
||||
echo " 3. Press Enter to load new commands"
|
||||
echo "If Raycast asks to add a Script Directory, use:"
|
||||
echo " $dir"
|
||||
|
||||
if is_interactive; then
|
||||
log_header "Finalizing Setup"
|
||||
prompt_enter "Press [Enter] to reload script directories in Raycast..."
|
||||
if command -v open > /dev/null 2>&1 && open "raycast://extensions/raycast/raycast/reload-script-directories" > /dev/null 2>&1; then
|
||||
log_step "Raycast script directories reloaded."
|
||||
else
|
||||
log_warn "Could not auto-reload Raycast script directories."
|
||||
fi
|
||||
|
||||
log_success "Raycast setup complete!"
|
||||
else
|
||||
log_warn "Non-interactive mode; skip Raycast reload. Please run 'Reload Script Directories' in Raycast."
|
||||
fi
|
||||
}
|
||||
|
||||
uuid() {
|
||||
@@ -277,7 +288,6 @@ create_alfred_workflow() {
|
||||
local workflows_dir="$prefs_dir/workflows"
|
||||
|
||||
if [[ ! -d "$workflows_dir" ]]; then
|
||||
log_warn "Alfred preferences not found at $workflows_dir. Skipping Alfred workflow."
|
||||
return
|
||||
fi
|
||||
|
||||
|
||||
128
scripts/test.sh
Executable file
128
scripts/test.sh
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/bin/bash
|
||||
# Test runner for Mole.
|
||||
# Runs unit, Go, and integration tests.
|
||||
# Exits non-zero on failures.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# shellcheck source=lib/core/file_ops.sh
|
||||
source "$PROJECT_ROOT/lib/core/file_ops.sh"
|
||||
|
||||
echo "==============================="
|
||||
echo "Mole Test Runner"
|
||||
echo "==============================="
|
||||
echo ""
|
||||
|
||||
FAILED=0
|
||||
|
||||
echo "1. Linting test scripts..."
|
||||
if command -v shellcheck > /dev/null 2>&1; then
|
||||
TEST_FILES=()
|
||||
while IFS= read -r file; do
|
||||
TEST_FILES+=("$file")
|
||||
done < <(find tests -type f \( -name '*.bats' -o -name '*.sh' \) | sort)
|
||||
if [[ ${#TEST_FILES[@]} -gt 0 ]]; then
|
||||
if shellcheck --rcfile "$PROJECT_ROOT/.shellcheckrc" "${TEST_FILES[@]}"; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Test script lint passed${NC}\n"
|
||||
else
|
||||
printf "${RED}${ICON_ERROR} Test script lint failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
printf "${YELLOW}${ICON_WARNING} No test scripts found, skipping${NC}\n"
|
||||
fi
|
||||
else
|
||||
printf "${YELLOW}${ICON_WARNING} shellcheck not installed, skipping${NC}\n"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "2. Running unit tests..."
|
||||
if command -v bats > /dev/null 2>&1 && [ -d "tests" ]; then
|
||||
if [[ -z "${TERM:-}" ]]; then
|
||||
export TERM="xterm-256color"
|
||||
fi
|
||||
if [[ $# -eq 0 ]]; then
|
||||
set -- tests
|
||||
fi
|
||||
if [[ -t 1 ]]; then
|
||||
if bats -p "$@" | sed -e 's/^ok /OK /' -e 's/^not ok /FAIL /'; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Unit tests passed${NC}\n"
|
||||
else
|
||||
printf "${RED}${ICON_ERROR} Unit tests failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
if TERM="${TERM:-xterm-256color}" bats --tap "$@" | sed -e 's/^ok /OK /' -e 's/^not ok /FAIL /'; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Unit tests passed${NC}\n"
|
||||
else
|
||||
printf "${RED}${ICON_ERROR} Unit tests failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
fi
|
||||
else
|
||||
printf "${YELLOW}${ICON_WARNING} bats not installed or no tests found, skipping${NC}\n"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "3. Running Go tests..."
|
||||
if command -v go > /dev/null 2>&1; then
|
||||
if go build ./... > /dev/null 2>&1 && go vet ./cmd/... > /dev/null 2>&1 && go test ./cmd/... > /dev/null 2>&1; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Go tests passed${NC}\n"
|
||||
else
|
||||
printf "${RED}${ICON_ERROR} Go tests failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
printf "${YELLOW}${ICON_WARNING} Go not installed, skipping Go tests${NC}\n"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "4. Testing module loading..."
|
||||
if bash -c 'source lib/core/common.sh && echo "OK"' > /dev/null 2>&1; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Module loading passed${NC}\n"
|
||||
else
|
||||
printf "${RED}${ICON_ERROR} Module loading failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "5. Running integration tests..."
|
||||
# Quick syntax check for main scripts
|
||||
if bash -n mole && bash -n bin/clean.sh && bash -n bin/optimize.sh; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Integration tests passed${NC}\n"
|
||||
else
|
||||
printf "${RED}${ICON_ERROR} Integration tests failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "6. Testing installation..."
|
||||
# Skip if Homebrew mole is installed (install.sh will refuse to overwrite)
|
||||
if brew list mole &> /dev/null; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Installation test skipped (Homebrew)${NC}\n"
|
||||
elif ./install.sh --prefix /tmp/mole-test > /dev/null 2>&1; then
|
||||
if [ -f /tmp/mole-test/mole ]; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Installation test passed${NC}\n"
|
||||
else
|
||||
printf "${RED}${ICON_ERROR} Installation test failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
printf "${RED}${ICON_ERROR} Installation test failed${NC}\n"
|
||||
((FAILED++))
|
||||
fi
|
||||
safe_remove "/tmp/mole-test" true || true
|
||||
echo ""
|
||||
|
||||
echo "==============================="
|
||||
if [[ $FAILED -eq 0 ]]; then
|
||||
printf "${GREEN}${ICON_SUCCESS} All tests passed!${NC}\n"
|
||||
exit 0
|
||||
fi
|
||||
printf "${RED}${ICON_ERROR} $FAILED test(s) failed!${NC}\n"
|
||||
exit 1
|
||||
72
tests/app_caches.bats
Normal file
72
tests/app_caches.bats
Normal file
@@ -0,0 +1,72 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
setup_file() {
|
||||
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
|
||||
export PROJECT_ROOT
|
||||
|
||||
ORIGINAL_HOME="${HOME:-}"
|
||||
export ORIGINAL_HOME
|
||||
|
||||
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-app-caches.XXXXXX")"
|
||||
export HOME
|
||||
|
||||
mkdir -p "$HOME"
|
||||
}
|
||||
|
||||
teardown_file() {
|
||||
rm -rf "$HOME"
|
||||
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
|
||||
export HOME="$ORIGINAL_HOME"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "clean_xcode_tools skips derived data when Xcode running" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" /bin/bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
pgrep() { return 0; }
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_xcode_tools
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Xcode is running"* ]]
|
||||
[[ "$output" != *"derived data"* ]]
|
||||
[[ "$output" != *"archives"* ]]
|
||||
}
|
||||
|
||||
@test "clean_media_players protects spotify offline cache" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" /bin/bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
mkdir -p "$HOME/Library/Application Support/Spotify/PersistentCache/Storage"
|
||||
touch "$HOME/Library/Application Support/Spotify/PersistentCache/Storage/offline.bnk"
|
||||
safe_clean() { echo "CLEAN:$2"; }
|
||||
clean_media_players
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Spotify cache protected"* ]]
|
||||
[[ "$output" != *"CLEAN: Spotify cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_user_gui_applications calls all sections" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" /bin/bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
stop_section_spinner() { :; }
|
||||
safe_clean() { :; }
|
||||
clean_xcode_tools() { echo "xcode"; }
|
||||
clean_code_editors() { echo "editors"; }
|
||||
clean_communication_apps() { echo "comm"; }
|
||||
clean_user_gui_applications
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"xcode"* ]]
|
||||
[[ "$output" == *"editors"* ]]
|
||||
[[ "$output" == *"comm"* ]]
|
||||
}
|
||||
113
tests/app_caches_more.bats
Normal file
113
tests/app_caches_more.bats
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
setup_file() {
|
||||
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
|
||||
export PROJECT_ROOT
|
||||
|
||||
ORIGINAL_HOME="${HOME:-}"
|
||||
export ORIGINAL_HOME
|
||||
|
||||
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-app-caches-more.XXXXXX")"
|
||||
export HOME
|
||||
|
||||
mkdir -p "$HOME"
|
||||
}
|
||||
|
||||
teardown_file() {
|
||||
rm -rf "$HOME"
|
||||
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
|
||||
export HOME="$ORIGINAL_HOME"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "clean_ai_apps calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_ai_apps
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"ChatGPT cache"* ]]
|
||||
[[ "$output" == *"Claude desktop cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_design_tools calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_design_tools
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Sketch cache"* ]]
|
||||
[[ "$output" == *"Figma cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_dingtalk calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_dingtalk
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"DingTalk iDingTalk cache"* ]]
|
||||
[[ "$output" == *"DingTalk logs"* ]]
|
||||
}
|
||||
|
||||
@test "clean_download_managers calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_download_managers
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Aria2 cache"* ]]
|
||||
[[ "$output" == *"qBittorrent cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_productivity_apps calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_productivity_apps
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"MiaoYan cache"* ]]
|
||||
[[ "$output" == *"Flomo cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_screenshot_tools calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_screenshot_tools
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"CleanShot cache"* ]]
|
||||
[[ "$output" == *"Xnip cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_office_applications calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
stop_section_spinner() { :; }
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_office_applications
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Microsoft Word cache"* ]]
|
||||
[[ "$output" == *"Apple iWork cache"* ]]
|
||||
}
|
||||
32
tests/app_protection.bats
Normal file
32
tests/app_protection.bats
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
setup_file() {
|
||||
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
|
||||
export PROJECT_ROOT
|
||||
}
|
||||
|
||||
@test "is_critical_system_component matches known system services" {
|
||||
run bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/app_protection.sh"
|
||||
is_critical_system_component "backgroundtaskmanagement" && echo "yes"
|
||||
is_critical_system_component "SystemSettings" && echo "yes"
|
||||
EOF
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "${lines[0]}" == "yes" ]]
|
||||
[[ "${lines[1]}" == "yes" ]]
|
||||
}
|
||||
|
||||
@test "is_critical_system_component ignores non-system names" {
|
||||
run bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/app_protection.sh"
|
||||
if is_critical_system_component "myapp"; then
|
||||
echo "bad"
|
||||
else
|
||||
echo "ok"
|
||||
fi
|
||||
EOF
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == "ok" ]]
|
||||
}
|
||||
90
tests/apps_module.bats
Normal file
90
tests/apps_module.bats
Normal file
@@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
setup_file() {
|
||||
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
|
||||
export PROJECT_ROOT
|
||||
|
||||
ORIGINAL_HOME="${HOME:-}"
|
||||
export ORIGINAL_HOME
|
||||
|
||||
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-apps-module.XXXXXX")"
|
||||
export HOME
|
||||
|
||||
mkdir -p "$HOME"
|
||||
}
|
||||
|
||||
teardown_file() {
|
||||
rm -rf "$HOME"
|
||||
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
|
||||
export HOME="$ORIGINAL_HOME"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "clean_ds_store_tree reports dry-run summary" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true /bin/bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/apps.sh"
|
||||
start_inline_spinner() { :; }
|
||||
stop_section_spinner() { :; }
|
||||
note_activity() { :; }
|
||||
get_file_size() { echo 10; }
|
||||
bytes_to_human() { echo "0B"; }
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
total_items=0
|
||||
mkdir -p "$HOME/test_ds"
|
||||
touch "$HOME/test_ds/.DS_Store"
|
||||
clean_ds_store_tree "$HOME/test_ds" "DS test"
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"DS test"* ]]
|
||||
}
|
||||
|
||||
@test "scan_installed_apps uses cache when fresh" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/apps.sh"
|
||||
mkdir -p "$HOME/.cache/mole"
|
||||
echo "com.example.App" > "$HOME/.cache/mole/installed_apps_cache"
|
||||
get_file_mtime() { date +%s; }
|
||||
debug_log() { :; }
|
||||
scan_installed_apps "$HOME/installed.txt"
|
||||
cat "$HOME/installed.txt"
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"com.example.App"* ]]
|
||||
}
|
||||
|
||||
@test "is_bundle_orphaned returns true for old uninstalled bundle" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" ORPHAN_AGE_THRESHOLD=60 bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/apps.sh"
|
||||
should_protect_data() { return 1; }
|
||||
get_file_mtime() { echo 0; }
|
||||
if is_bundle_orphaned "com.example.Old" "$HOME/old" "$HOME/installed.txt"; then
|
||||
echo "orphan"
|
||||
fi
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"orphan"* ]]
|
||||
}
|
||||
|
||||
@test "clean_orphaned_app_data skips when no permission" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/apps.sh"
|
||||
ls() { return 1; }
|
||||
stop_section_spinner() { :; }
|
||||
clean_orphaned_app_data
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Skipped: No permission"* ]]
|
||||
}
|
||||
@@ -76,6 +76,7 @@ sudo() {
|
||||
echo "Installing Rosetta 2 stub output"
|
||||
return 0
|
||||
;;
|
||||
/usr/libexec/ApplicationFirewall/socketfilterfw) return 0 ;;
|
||||
*) return 0 ;;
|
||||
esac
|
||||
}
|
||||
|
||||
289
tests/browser_version_cleanup.bats
Normal file
289
tests/browser_version_cleanup.bats
Normal file
@@ -0,0 +1,289 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
setup_file() {
|
||||
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
|
||||
export PROJECT_ROOT
|
||||
|
||||
ORIGINAL_HOME="${HOME:-}"
|
||||
export ORIGINAL_HOME
|
||||
|
||||
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-browser-cleanup.XXXXXX")"
|
||||
export HOME
|
||||
|
||||
mkdir -p "$HOME"
|
||||
}
|
||||
|
||||
teardown_file() {
|
||||
rm -rf "$HOME"
|
||||
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
|
||||
export HOME="$ORIGINAL_HOME"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "clean_chrome_old_versions skips when Chrome is running" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
|
||||
# Mock pgrep to simulate Chrome running
|
||||
pgrep() { return 0; }
|
||||
export -f pgrep
|
||||
|
||||
clean_chrome_old_versions
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Google Chrome running"* ]]
|
||||
[[ "$output" == *"old versions cleanup skipped"* ]]
|
||||
}
|
||||
|
||||
@test "clean_chrome_old_versions removes old versions but keeps current" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
|
||||
# Mock pgrep to simulate Chrome not running
|
||||
pgrep() { return 1; }
|
||||
export -f pgrep
|
||||
|
||||
# Create mock Chrome directory structure
|
||||
CHROME_APP="$HOME/Applications/Google Chrome.app"
|
||||
VERSIONS_DIR="$CHROME_APP/Contents/Frameworks/Google Chrome Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR"/{128.0.0.0,129.0.0.0,130.0.0.0}
|
||||
|
||||
# Create Current symlink pointing to 130.0.0.0
|
||||
ln -s "130.0.0.0" "$VERSIONS_DIR/Current"
|
||||
|
||||
# Mock functions
|
||||
is_path_whitelisted() { return 1; }
|
||||
get_path_size_kb() { echo "10240"; }
|
||||
bytes_to_human() { echo "10M"; }
|
||||
note_activity() { :; }
|
||||
export -f is_path_whitelisted get_path_size_kb bytes_to_human note_activity
|
||||
|
||||
# Initialize counters
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
total_items=0
|
||||
|
||||
clean_chrome_old_versions
|
||||
|
||||
# Verify output mentions old versions cleanup
|
||||
echo "Cleaned: $files_cleaned items"
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Chrome old versions"* ]]
|
||||
[[ "$output" == *"dry"* ]]
|
||||
[[ "$output" == *"Cleaned: 2 items"* ]]
|
||||
}
|
||||
|
||||
@test "clean_chrome_old_versions respects whitelist" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
|
||||
# Mock pgrep to simulate Chrome not running
|
||||
pgrep() { return 1; }
|
||||
export -f pgrep
|
||||
|
||||
# Create mock Chrome directory structure
|
||||
CHROME_APP="$HOME/Applications/Google Chrome.app"
|
||||
VERSIONS_DIR="$CHROME_APP/Contents/Frameworks/Google Chrome Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR"/{128.0.0.0,129.0.0.0,130.0.0.0}
|
||||
|
||||
# Create Current symlink pointing to 130.0.0.0
|
||||
ln -s "130.0.0.0" "$VERSIONS_DIR/Current"
|
||||
|
||||
# Mock is_path_whitelisted to protect version 128.0.0.0
|
||||
is_path_whitelisted() {
|
||||
[[ "$1" == *"128.0.0.0"* ]] && return 0
|
||||
return 1
|
||||
}
|
||||
get_path_size_kb() { echo "10240"; }
|
||||
bytes_to_human() { echo "10M"; }
|
||||
note_activity() { :; }
|
||||
export -f is_path_whitelisted get_path_size_kb bytes_to_human note_activity
|
||||
|
||||
# Initialize counters
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
total_items=0
|
||||
|
||||
clean_chrome_old_versions
|
||||
|
||||
# Should only clean 129.0.0.0 (not 128.0.0.0 which is whitelisted)
|
||||
echo "Cleaned: $files_cleaned items"
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Cleaned: 1 items"* ]]
|
||||
}
|
||||
|
||||
@test "clean_chrome_old_versions DRY_RUN mode does not delete files" {
|
||||
# Create test directory
|
||||
CHROME_APP="$HOME/Applications/Google Chrome.app"
|
||||
VERSIONS_DIR="$CHROME_APP/Contents/Frameworks/Google Chrome Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR"/{128.0.0.0,130.0.0.0}
|
||||
|
||||
# Remove Current if it exists as a directory, then create symlink
|
||||
rm -rf "$VERSIONS_DIR/Current"
|
||||
ln -s "130.0.0.0" "$VERSIONS_DIR/Current"
|
||||
|
||||
# Create a marker file in old version
|
||||
touch "$VERSIONS_DIR/128.0.0.0/marker.txt"
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
|
||||
pgrep() { return 1; }
|
||||
is_path_whitelisted() { return 1; }
|
||||
get_path_size_kb() { echo "10240"; }
|
||||
bytes_to_human() { echo "10M"; }
|
||||
note_activity() { :; }
|
||||
export -f pgrep is_path_whitelisted get_path_size_kb bytes_to_human note_activity
|
||||
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
total_items=0
|
||||
|
||||
clean_chrome_old_versions
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"dry"* ]]
|
||||
# Verify marker file still exists (not deleted in dry run)
|
||||
[ -f "$VERSIONS_DIR/128.0.0.0/marker.txt" ]
|
||||
}
|
||||
|
||||
@test "clean_chrome_old_versions handles missing Current symlink gracefully" {
|
||||
# Use a fresh temp directory for this test
|
||||
TEST_HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-test5.XXXXXX")"
|
||||
|
||||
run env HOME="$TEST_HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
|
||||
pgrep() { return 1; }
|
||||
is_path_whitelisted() { return 1; }
|
||||
get_path_size_kb() { echo "10240"; }
|
||||
bytes_to_human() { echo "10M"; }
|
||||
note_activity() { :; }
|
||||
export -f pgrep is_path_whitelisted get_path_size_kb bytes_to_human note_activity
|
||||
|
||||
# Initialize counters to prevent unbound variable errors
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
total_items=0
|
||||
|
||||
# Create Chrome app without Current symlink
|
||||
CHROME_APP="$HOME/Applications/Google Chrome.app"
|
||||
VERSIONS_DIR="$CHROME_APP/Contents/Frameworks/Google Chrome Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR"/{128.0.0.0,129.0.0.0}
|
||||
# No Current symlink created
|
||||
|
||||
clean_chrome_old_versions
|
||||
EOF
|
||||
|
||||
rm -rf "$TEST_HOME"
|
||||
[ "$status" -eq 0 ]
|
||||
# Should exit gracefully with no output
|
||||
}
|
||||
|
||||
@test "clean_edge_old_versions skips when Edge is running" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
|
||||
# Mock pgrep to simulate Edge running
|
||||
pgrep() { return 0; }
|
||||
export -f pgrep
|
||||
|
||||
clean_edge_old_versions
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Microsoft Edge running"* ]]
|
||||
[[ "$output" == *"old versions cleanup skipped"* ]]
|
||||
}
|
||||
|
||||
@test "clean_edge_old_versions removes old versions but keeps current" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
|
||||
pgrep() { return 1; }
|
||||
export -f pgrep
|
||||
|
||||
# Create mock Edge directory structure
|
||||
EDGE_APP="$HOME/Applications/Microsoft Edge.app"
|
||||
VERSIONS_DIR="$EDGE_APP/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR"/{120.0.0.0,121.0.0.0,122.0.0.0}
|
||||
|
||||
# Create Current symlink pointing to 122.0.0.0
|
||||
ln -s "122.0.0.0" "$VERSIONS_DIR/Current"
|
||||
|
||||
is_path_whitelisted() { return 1; }
|
||||
get_path_size_kb() { echo "10240"; }
|
||||
bytes_to_human() { echo "10M"; }
|
||||
note_activity() { :; }
|
||||
export -f is_path_whitelisted get_path_size_kb bytes_to_human note_activity
|
||||
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
total_items=0
|
||||
|
||||
clean_edge_old_versions
|
||||
|
||||
echo "Cleaned: $files_cleaned items"
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Edge old versions"* ]]
|
||||
[[ "$output" == *"dry"* ]]
|
||||
[[ "$output" == *"Cleaned: 2 items"* ]]
|
||||
}
|
||||
|
||||
@test "clean_edge_old_versions handles no old versions gracefully" {
|
||||
# Use a fresh temp directory for this test
|
||||
TEST_HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-test8.XXXXXX")"
|
||||
|
||||
run env HOME="$TEST_HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
|
||||
pgrep() { return 1; }
|
||||
is_path_whitelisted() { return 1; }
|
||||
get_path_size_kb() { echo "10240"; }
|
||||
bytes_to_human() { echo "10M"; }
|
||||
note_activity() { :; }
|
||||
export -f pgrep is_path_whitelisted get_path_size_kb bytes_to_human note_activity
|
||||
|
||||
# Initialize counters
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
total_items=0
|
||||
|
||||
# Create Edge with only current version
|
||||
EDGE_APP="$HOME/Applications/Microsoft Edge.app"
|
||||
VERSIONS_DIR="$EDGE_APP/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR/122.0.0.0"
|
||||
ln -s "122.0.0.0" "$VERSIONS_DIR/Current"
|
||||
|
||||
clean_edge_old_versions
|
||||
EOF
|
||||
|
||||
rm -rf "$TEST_HOME"
|
||||
[ "$status" -eq 0 ]
|
||||
# Should exit gracefully with no cleanup output
|
||||
[[ "$output" != *"Edge old versions"* ]]
|
||||
}
|
||||
177
tests/clean.bats
177
tests/clean.bats
@@ -28,7 +28,7 @@ setup() {
|
||||
}
|
||||
|
||||
@test "mo clean --dry-run skips system cleanup in non-interactive mode" {
|
||||
run env HOME="$HOME" "$PROJECT_ROOT/mole" clean --dry-run
|
||||
run env HOME="$HOME" MOLE_TEST_MODE=1 "$PROJECT_ROOT/mole" clean --dry-run
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Dry Run Mode"* ]]
|
||||
[[ "$output" != *"Deep system-level cleanup"* ]]
|
||||
@@ -38,7 +38,7 @@ setup() {
|
||||
mkdir -p "$HOME/Library/Caches/TestApp"
|
||||
echo "cache data" > "$HOME/Library/Caches/TestApp/cache.tmp"
|
||||
|
||||
run env HOME="$HOME" "$PROJECT_ROOT/mole" clean --dry-run
|
||||
run env HOME="$HOME" MOLE_TEST_MODE=1 "$PROJECT_ROOT/mole" clean --dry-run
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"User app cache"* ]]
|
||||
[[ "$output" == *"Potential space"* ]]
|
||||
@@ -53,7 +53,7 @@ setup() {
|
||||
$HOME/Library/Caches/WhitelistedApp*
|
||||
EOF
|
||||
|
||||
run env HOME="$HOME" "$PROJECT_ROOT/mole" clean --dry-run
|
||||
run env HOME="$HOME" MOLE_TEST_MODE=1 "$PROJECT_ROOT/mole" clean --dry-run
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Protected"* ]]
|
||||
[ -f "$HOME/Library/Caches/WhitelistedApp/data.tmp" ]
|
||||
@@ -63,7 +63,7 @@ EOF
|
||||
mkdir -p "$HOME/.m2/repository/org/example"
|
||||
echo "dependency" > "$HOME/.m2/repository/org/example/lib.jar"
|
||||
|
||||
run env HOME="$HOME" "$PROJECT_ROOT/mole" clean --dry-run
|
||||
run env HOME="$HOME" MOLE_TEST_MODE=1 "$PROJECT_ROOT/mole" clean --dry-run
|
||||
[ "$status" -eq 0 ]
|
||||
[ -f "$HOME/.m2/repository/org/example/lib.jar" ]
|
||||
[[ "$output" != *"Maven repository cache"* ]]
|
||||
@@ -86,8 +86,175 @@ EOF
|
||||
FINDER_METADATA_SENTINEL
|
||||
EOF
|
||||
|
||||
run env HOME="$HOME" "$PROJECT_ROOT/mole" clean --dry-run
|
||||
# Test whitelist logic directly instead of running full clean
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/manage/whitelist.sh"
|
||||
load_whitelist
|
||||
if is_whitelisted "$HOME/Documents/.DS_Store"; then
|
||||
echo "protected by whitelist"
|
||||
fi
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"protected by whitelist"* ]]
|
||||
[ -f "$HOME/Documents/.DS_Store" ]
|
||||
}
|
||||
|
||||
@test "clean_recent_items removes shared file lists" {
|
||||
local shared_dir="$HOME/Library/Application Support/com.apple.sharedfilelist"
|
||||
mkdir -p "$shared_dir"
|
||||
touch "$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl2"
|
||||
touch "$shared_dir/com.apple.LSSharedFileList.RecentDocuments.sfl2"
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
safe_clean() {
|
||||
echo "safe_clean $1"
|
||||
}
|
||||
clean_recent_items
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Recent"* ]]
|
||||
}
|
||||
|
||||
@test "clean_recent_items handles missing shared directory" {
|
||||
rm -rf "$HOME/Library/Application Support/com.apple.sharedfilelist"
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
safe_clean() {
|
||||
echo "safe_clean $1"
|
||||
}
|
||||
clean_recent_items
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "clean_mail_downloads skips cleanup when size below threshold" {
|
||||
mkdir -p "$HOME/Library/Mail Downloads"
|
||||
echo "test" > "$HOME/Library/Mail Downloads/small.txt"
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
clean_mail_downloads
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[ -f "$HOME/Library/Mail Downloads/small.txt" ]
|
||||
}
|
||||
|
||||
@test "clean_mail_downloads removes old attachments" {
|
||||
mkdir -p "$HOME/Library/Mail Downloads"
|
||||
touch "$HOME/Library/Mail Downloads/old.pdf"
|
||||
touch -t 202301010000 "$HOME/Library/Mail Downloads/old.pdf"
|
||||
|
||||
dd if=/dev/zero of="$HOME/Library/Mail Downloads/dummy.dat" bs=1024 count=6000 2>/dev/null
|
||||
|
||||
[ -f "$HOME/Library/Mail Downloads/old.pdf" ]
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
clean_mail_downloads
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[ ! -f "$HOME/Library/Mail Downloads/old.pdf" ]
|
||||
}
|
||||
|
||||
@test "clean_time_machine_failed_backups detects running backup correctly" {
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
skip "tmutil not available"
|
||||
fi
|
||||
|
||||
local mock_bin="$HOME/bin"
|
||||
mkdir -p "$mock_bin"
|
||||
|
||||
cat > "$mock_bin/tmutil" << 'MOCK_TMUTIL'
|
||||
#!/bin/bash
|
||||
if [[ "$1" == "status" ]]; then
|
||||
cat << 'TMUTIL_OUTPUT'
|
||||
Backup session status:
|
||||
{
|
||||
ClientID = "com.apple.backupd";
|
||||
Running = 0;
|
||||
}
|
||||
TMUTIL_OUTPUT
|
||||
elif [[ "$1" == "destinationinfo" ]]; then
|
||||
cat << 'DEST_OUTPUT'
|
||||
====================================================
|
||||
Name : TestBackup
|
||||
Kind : Local
|
||||
Mount Point : /Volumes/TestBackup
|
||||
ID : 12345678-1234-1234-1234-123456789012
|
||||
====================================================
|
||||
DEST_OUTPUT
|
||||
fi
|
||||
MOCK_TMUTIL
|
||||
chmod +x "$mock_bin/tmutil"
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" PATH="$mock_bin:$PATH" bash --noprofile --norc << 'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/system.sh"
|
||||
|
||||
clean_time_machine_failed_backups
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" != *"Time Machine backup in progress, skipping cleanup"* ]]
|
||||
}
|
||||
|
||||
@test "clean_time_machine_failed_backups skips when backup is actually running" {
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
skip "tmutil not available"
|
||||
fi
|
||||
|
||||
local mock_bin="$HOME/bin"
|
||||
mkdir -p "$mock_bin"
|
||||
|
||||
cat > "$mock_bin/tmutil" << 'MOCK_TMUTIL'
|
||||
#!/bin/bash
|
||||
if [[ "$1" == "status" ]]; then
|
||||
cat << 'TMUTIL_OUTPUT'
|
||||
Backup session status:
|
||||
{
|
||||
ClientID = "com.apple.backupd";
|
||||
Running = 1;
|
||||
}
|
||||
TMUTIL_OUTPUT
|
||||
elif [[ "$1" == "destinationinfo" ]]; then
|
||||
cat << 'DEST_OUTPUT'
|
||||
====================================================
|
||||
Name : TestBackup
|
||||
Kind : Local
|
||||
Mount Point : /Volumes/TestBackup
|
||||
ID : 12345678-1234-1234-1234-123456789012
|
||||
====================================================
|
||||
DEST_OUTPUT
|
||||
fi
|
||||
MOCK_TMUTIL
|
||||
chmod +x "$mock_bin/tmutil"
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" PATH="$mock_bin:$PATH" bash --noprofile --norc << 'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/system.sh"
|
||||
|
||||
clean_time_machine_failed_backups
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Time Machine backup in progress, skipping cleanup"* ]]
|
||||
}
|
||||
|
||||
@@ -27,77 +27,84 @@ setup() {
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/caches.sh"
|
||||
|
||||
# Clean permission flag for each test
|
||||
# Mock run_with_timeout to skip timeout overhead in tests
|
||||
# shellcheck disable=SC2329
|
||||
run_with_timeout() {
|
||||
shift # Remove timeout argument
|
||||
"$@"
|
||||
}
|
||||
export -f run_with_timeout
|
||||
|
||||
rm -f "$HOME/.cache/mole/permissions_granted"
|
||||
}
|
||||
|
||||
# Test check_tcc_permissions in non-interactive mode
|
||||
@test "check_tcc_permissions skips in non-interactive mode" {
|
||||
# Redirect stdin to simulate non-TTY
|
||||
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/clean/caches.sh'; check_tcc_permissions" < /dev/null
|
||||
[ "$status" -eq 0 ]
|
||||
# Should not create permission flag in non-interactive mode
|
||||
[[ ! -f "$HOME/.cache/mole/permissions_granted" ]]
|
||||
}
|
||||
|
||||
# Test check_tcc_permissions with existing permission flag
|
||||
@test "check_tcc_permissions skips when permissions already granted" {
|
||||
# Create permission flag
|
||||
mkdir -p "$HOME/.cache/mole"
|
||||
touch "$HOME/.cache/mole/permissions_granted"
|
||||
|
||||
# Even in TTY mode, should skip if flag exists
|
||||
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/clean/caches.sh'; [[ -t 1 ]] || true; check_tcc_permissions"
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
# Test check_tcc_permissions directory checks
|
||||
@test "check_tcc_permissions validates protected directories" {
|
||||
# The function checks these directories exist:
|
||||
# - ~/Library/Caches
|
||||
# - ~/Library/Logs
|
||||
# - ~/Library/Application Support
|
||||
# - ~/Library/Containers
|
||||
# - ~/.cache
|
||||
|
||||
# Ensure test environment has these directories
|
||||
[[ -d "$HOME/Library/Caches" ]]
|
||||
[[ -d "$HOME/Library/Logs" ]]
|
||||
[[ -d "$HOME/.cache/mole" ]]
|
||||
|
||||
# Function should handle missing directories gracefully
|
||||
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/clean/caches.sh'; check_tcc_permissions < /dev/null"
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
# Test clean_service_worker_cache with non-existent path
|
||||
@test "clean_service_worker_cache returns early when path doesn't exist" {
|
||||
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/clean/caches.sh'; clean_service_worker_cache 'TestBrowser' '/nonexistent/path'"
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
# Test clean_service_worker_cache with empty directory
|
||||
@test "clean_service_worker_cache handles empty cache directory" {
|
||||
local test_cache="$HOME/test_sw_cache"
|
||||
mkdir -p "$test_cache"
|
||||
|
||||
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/clean/caches.sh'; clean_service_worker_cache 'TestBrowser' '$test_cache'"
|
||||
run bash -c "
|
||||
run_with_timeout() { shift; \"\$@\"; }
|
||||
export -f run_with_timeout
|
||||
source '$PROJECT_ROOT/lib/core/common.sh'
|
||||
source '$PROJECT_ROOT/lib/clean/caches.sh'
|
||||
clean_service_worker_cache 'TestBrowser' '$test_cache'
|
||||
"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
rm -rf "$test_cache"
|
||||
}
|
||||
|
||||
# Test clean_service_worker_cache domain protection
|
||||
@test "clean_service_worker_cache protects specified domains" {
|
||||
local test_cache="$HOME/test_sw_cache"
|
||||
mkdir -p "$test_cache/abc123_https_capcut.com_0"
|
||||
mkdir -p "$test_cache/def456_https_example.com_0"
|
||||
|
||||
# Mock PROTECTED_SW_DOMAINS
|
||||
export PROTECTED_SW_DOMAINS=("capcut.com" "photopea.com")
|
||||
|
||||
# Dry run to check protection logic
|
||||
run bash -c "
|
||||
run_with_timeout() {
|
||||
local timeout=\"\$1\"
|
||||
shift
|
||||
if [[ \"\$1\" == \"get_path_size_kb\" ]]; then
|
||||
echo 0
|
||||
return 0
|
||||
fi
|
||||
if [[ \"\$1\" == \"sh\" ]]; then
|
||||
printf '%s\n' \
|
||||
'$test_cache/abc123_https_capcut.com_0' \
|
||||
'$test_cache/def456_https_example.com_0'
|
||||
return 0
|
||||
fi
|
||||
\"\$@\"
|
||||
}
|
||||
export -f run_with_timeout
|
||||
export DRY_RUN=true
|
||||
export PROTECTED_SW_DOMAINS=(capcut.com photopea.com)
|
||||
source '$PROJECT_ROOT/lib/core/common.sh'
|
||||
@@ -106,19 +113,15 @@ setup() {
|
||||
"
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# Protected domain directory should still exist
|
||||
[[ -d "$test_cache/abc123_https_capcut.com_0" ]]
|
||||
|
||||
rm -rf "$test_cache"
|
||||
}
|
||||
|
||||
# Test clean_project_caches function
|
||||
@test "clean_project_caches completes without errors" {
|
||||
# Create test project structures
|
||||
mkdir -p "$HOME/projects/test-app/.next/cache"
|
||||
mkdir -p "$HOME/projects/python-app/__pycache__"
|
||||
|
||||
# Create some dummy cache files
|
||||
touch "$HOME/projects/test-app/.next/cache/test.cache"
|
||||
touch "$HOME/projects/python-app/__pycache__/module.pyc"
|
||||
|
||||
@@ -133,39 +136,30 @@ setup() {
|
||||
rm -rf "$HOME/projects"
|
||||
}
|
||||
|
||||
# Test clean_project_caches timeout protection
|
||||
@test "clean_project_caches handles timeout gracefully" {
|
||||
# Create a test directory structure
|
||||
mkdir -p "$HOME/test-project/.next"
|
||||
|
||||
# Mock find to simulate slow operation
|
||||
function find() {
|
||||
sleep 2 # Simulate slow find
|
||||
echo "$HOME/test-project/.next"
|
||||
}
|
||||
export -f find
|
||||
|
||||
# Should complete within reasonable time even with slow find
|
||||
run timeout 15 bash -c "
|
||||
source '$PROJECT_ROOT/lib/core/common.sh'
|
||||
source '$PROJECT_ROOT/lib/clean/caches.sh'
|
||||
clean_project_caches
|
||||
"
|
||||
# Either succeeds or times out gracefully (both acceptable)
|
||||
[ "$status" -eq 0 ] || [ "$status" -eq 124 ]
|
||||
|
||||
rm -rf "$HOME/test-project"
|
||||
}
|
||||
|
||||
# Test clean_project_caches exclusions
|
||||
@test "clean_project_caches excludes Library and Trash directories" {
|
||||
# These directories should be excluded from scan
|
||||
mkdir -p "$HOME/Library/.next/cache"
|
||||
mkdir -p "$HOME/.Trash/.next/cache"
|
||||
mkdir -p "$HOME/projects/.next/cache"
|
||||
|
||||
# Only non-excluded directories should be scanned
|
||||
# We can't easily test this without mocking, but we can verify no crashes
|
||||
run bash -c "
|
||||
export DRY_RUN=true
|
||||
source '$PROJECT_ROOT/lib/core/common.sh'
|
||||
@@ -176,4 +170,3 @@ setup() {
|
||||
|
||||
rm -rf "$HOME/projects"
|
||||
}
|
||||
|
||||
|
||||
102
tests/clean_extras.bats
Normal file
102
tests/clean_extras.bats
Normal file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
setup_file() {
|
||||
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
|
||||
export PROJECT_ROOT
|
||||
|
||||
ORIGINAL_HOME="${HOME:-}"
|
||||
export ORIGINAL_HOME
|
||||
|
||||
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-clean-extras.XXXXXX")"
|
||||
export HOME
|
||||
|
||||
mkdir -p "$HOME"
|
||||
}
|
||||
|
||||
teardown_file() {
|
||||
rm -rf "$HOME"
|
||||
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
|
||||
export HOME="$ORIGINAL_HOME"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "clean_cloud_storage calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
stop_section_spinner() { :; }
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_cloud_storage
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Dropbox cache"* ]]
|
||||
[[ "$output" == *"Google Drive cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_virtualization_tools hits cache paths" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
stop_section_spinner() { :; }
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_virtualization_tools
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"VMware Fusion cache"* ]]
|
||||
[[ "$output" == *"Parallels cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_email_clients calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_email_clients
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Spark cache"* ]]
|
||||
[[ "$output" == *"Airmail cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_note_apps calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_note_apps
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Notion cache"* ]]
|
||||
[[ "$output" == *"Obsidian cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_task_apps calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_task_apps
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Todoist cache"* ]]
|
||||
[[ "$output" == *"Any.do cache"* ]]
|
||||
}
|
||||
|
||||
@test "scan_external_volumes skips when no volumes" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
run_with_timeout() { return 1; }
|
||||
scan_external_volumes
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
138
tests/clean_extras_more.bats
Normal file
138
tests/clean_extras_more.bats
Normal file
@@ -0,0 +1,138 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
setup_file() {
|
||||
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
|
||||
export PROJECT_ROOT
|
||||
|
||||
ORIGINAL_HOME="${HOME:-}"
|
||||
export ORIGINAL_HOME
|
||||
|
||||
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-clean-extras-more.XXXXXX")"
|
||||
export HOME
|
||||
|
||||
mkdir -p "$HOME"
|
||||
}
|
||||
|
||||
teardown_file() {
|
||||
rm -rf "$HOME"
|
||||
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
|
||||
export HOME="$ORIGINAL_HOME"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "clean_video_tools calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_video_tools
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"ScreenFlow cache"* ]]
|
||||
[[ "$output" == *"Final Cut Pro cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_video_players calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_video_players
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"IINA cache"* ]]
|
||||
[[ "$output" == *"VLC cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_3d_tools calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_3d_tools
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Blender cache"* ]]
|
||||
[[ "$output" == *"Cinema 4D cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_gaming_platforms calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_gaming_platforms
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Steam cache"* ]]
|
||||
[[ "$output" == *"Epic Games cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_translation_apps calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_translation_apps
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Youdao Dictionary cache"* ]]
|
||||
[[ "$output" == *"Eudict cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_launcher_apps calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_launcher_apps
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Alfred cache"* ]]
|
||||
[[ "$output" == *"The Unarchiver cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_remote_desktop calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_remote_desktop
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"TeamViewer cache"* ]]
|
||||
[[ "$output" == *"AnyDesk cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_system_utils calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_system_utils
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Input Source Pro cache"* ]]
|
||||
[[ "$output" == *"WakaTime cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_shell_utils calls expected caches" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
|
||||
safe_clean() { echo "$2"; }
|
||||
clean_shell_utils
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Zsh completion cache"* ]]
|
||||
[[ "$output" == *"wget HSTS cache"* ]]
|
||||
}
|
||||
@@ -46,24 +46,18 @@ setup() {
|
||||
}
|
||||
|
||||
@test "touchid status reports current configuration" {
|
||||
# Don't test actual Touch ID config (system-dependent, may trigger prompts)
|
||||
# Just verify the command exists and can run
|
||||
run env HOME="$HOME" "$PROJECT_ROOT/mole" touchid status
|
||||
[ "$status" -eq 0 ]
|
||||
# Should output either "enabled" or "not configured" message
|
||||
[[ "$output" == *"Touch ID"* ]]
|
||||
}
|
||||
|
||||
@test "mo optimize command is recognized" {
|
||||
# Test that optimize command exists without actually running it
|
||||
# Running full optimize in tests is too slow (waits for sudo, runs health checks)
|
||||
run bash -c "grep -q '\"optimize\")' '$PROJECT_ROOT/mole'"
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "mo analyze binary is valid" {
|
||||
if [[ -f "$PROJECT_ROOT/bin/analyze-go" ]]; then
|
||||
# Verify binary is executable and valid Universal Binary
|
||||
[ -x "$PROJECT_ROOT/bin/analyze-go" ]
|
||||
run file "$PROJECT_ROOT/bin/analyze-go"
|
||||
[[ "$output" == *"Mach-O"* ]] || [[ "$output" == *"executable"* ]]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user