mirror of
https://github.com/tw93/Mole.git
synced 2026-02-06 11:22:57 +00:00
Merge branch 'main' into dev
This commit is contained in:
8
.github/workflows/check.yml
vendored
8
.github/workflows/check.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Cache Homebrew
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v4
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v4
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/Homebrew
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
run: brew install shfmt shellcheck golangci-lint
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v5
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v5
|
||||
with:
|
||||
go-version: '1.24.6'
|
||||
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
ref: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository && github.head_ref) || github.ref }}
|
||||
|
||||
- name: Cache Homebrew
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v4
|
||||
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v4
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/Homebrew
|
||||
@@ -89,7 +89,7 @@ jobs:
|
||||
run: brew install shfmt shellcheck golangci-lint
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v5
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v5
|
||||
with:
|
||||
go-version: '1.24.6'
|
||||
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v5
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v5
|
||||
with:
|
||||
go-version: "1.24.6"
|
||||
|
||||
|
||||
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
run: brew install bats-core shellcheck
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v5
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v5
|
||||
with:
|
||||
go-version: "1.24.6"
|
||||
|
||||
@@ -79,7 +79,7 @@ jobs:
|
||||
echo "Checking for hardcoded secrets..."
|
||||
matches=$(grep -r "password\|secret\|api_key" --include="*.sh" . \
|
||||
| grep -v "# \|test" \
|
||||
| grep -v -E "lib/core/sudo\.sh|lib/core/app_protection\.sh|lib/clean/user\.sh|lib/clean/brew\.sh|bin/optimize\.sh|lib/clean/apps\.sh" || true)
|
||||
| grep -v -E "lib/core/sudo\.sh|lib/core/app_protection\.sh|lib/clean/user\.sh|lib/clean/brew\.sh|bin/optimize\.sh|lib/clean/apps\.sh|lib/uninstall/batch\.sh" || true)
|
||||
if [[ -n "$matches" ]]; then
|
||||
echo "$matches"
|
||||
echo "✗ Potential secrets found"
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -48,6 +48,8 @@ tests/tmp-*
|
||||
CLAUDE.md
|
||||
GEMINI.md
|
||||
ANTIGRAVITY.md
|
||||
WARP.md
|
||||
AGENTS.md
|
||||
.cursorrules
|
||||
|
||||
# Go build artifacts (development)
|
||||
|
||||
412
AGENTS.md
412
AGENTS.md
@@ -1,412 +0,0 @@
|
||||
# AGENTS.md - Development Guide for Mole
|
||||
|
||||
This guide provides AI coding assistants with essential commands, patterns, and conventions for working in the Mole codebase.
|
||||
|
||||
**Quick reference**: Build/test commands • Safety rules • Architecture map • Code style
|
||||
|
||||
---
|
||||
|
||||
## Safety Checklist
|
||||
|
||||
Before any operation:
|
||||
|
||||
- Use `safe_*` helpers (never raw `rm -rf` or `find -delete`)
|
||||
- Check protection: `is_protected()`, `is_whitelisted()`
|
||||
- Test first: `MO_DRY_RUN=1 ./mole clean`
|
||||
- Validate syntax: `bash -n <file>`
|
||||
- Run tests: `./scripts/test.sh`
|
||||
|
||||
## NEVER Do These
|
||||
|
||||
- Run `rm -rf` or any raw deletion commands
|
||||
- Delete files without checking protection lists
|
||||
- Modify system-critical paths (e.g., `/System`, `/Library/Apple`)
|
||||
- Remove installer flags `--prefix`/`--config` from `install.sh`
|
||||
- **Commit code changes or run `git commit` unless the user explicitly asks you to commit**
|
||||
- **Reply to GitHub issues or PRs on behalf of the user** - only prepare responses for user review
|
||||
- Run destructive operations without dry-run validation
|
||||
- Use raw `git` commands when `gh` CLI is available
|
||||
|
||||
## ALWAYS Do These
|
||||
|
||||
- Use `safe_*` helper functions for deletions (`safe_rm`, `safe_find_delete`)
|
||||
- Respect whitelist files (e.g., `~/.config/mole/whitelist`)
|
||||
- Check protection logic before cleanup operations
|
||||
- Test with dry-run modes first
|
||||
- Validate syntax before suggesting changes: `bash -n <file>`
|
||||
- **Prioritize `gh` CLI for ALL GitHub operations** - Always use `gh` to fetch and manipulate GitHub data (issues, PRs, releases, comments, etc.) instead of raw git commands or web scraping
|
||||
- **Document fixes in AGENTS.md instead of committing or replying** - Prepare commit messages and GitHub responses for user review, don't execute them
|
||||
- Review and update `SECURITY_AUDIT.md` when modifying `clean` or `optimize` logic
|
||||
|
||||
---
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Build Commands
|
||||
|
||||
```bash
|
||||
# Build Go binaries for current platform
|
||||
make build
|
||||
|
||||
# Build release binaries (cross-platform)
|
||||
make release-amd64 # macOS Intel
|
||||
make release-arm64 # macOS Apple Silicon
|
||||
|
||||
# Clean build artifacts
|
||||
make clean
|
||||
```
|
||||
|
||||
### Test Commands
|
||||
|
||||
```bash
|
||||
# Run full test suite (recommended before commits)
|
||||
./scripts/test.sh
|
||||
|
||||
# Run specific BATS test file
|
||||
bats tests/clean.bats
|
||||
|
||||
# Run specific test case by name
|
||||
bats tests/clean.bats -f "should respect whitelist"
|
||||
|
||||
# Run Go tests only
|
||||
go test -v ./cmd/...
|
||||
|
||||
# Run Go tests for specific package
|
||||
go test -v ./cmd/analyze
|
||||
|
||||
# Shell syntax check
|
||||
bash -n lib/clean/user.sh
|
||||
bash -n mole
|
||||
|
||||
# Lint shell scripts
|
||||
shellcheck --rcfile .shellcheckrc lib/**/*.sh bin/**/*.sh
|
||||
```
|
||||
|
||||
### Development Commands
|
||||
|
||||
```bash
|
||||
# Test cleanup in dry-run mode
|
||||
MO_DRY_RUN=1 ./mole clean
|
||||
|
||||
# Enable debug logging
|
||||
MO_DEBUG=1 ./mole clean
|
||||
|
||||
# Test Go tool directly
|
||||
go run ./cmd/analyze
|
||||
|
||||
# Test installation locally
|
||||
./install.sh --prefix /usr/local/bin --config ~/.config/mole
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Architecture Quick Map
|
||||
|
||||
```
|
||||
mole/ # Main CLI entrypoint (menu + routing)
|
||||
├── mo # CLI alias wrapper
|
||||
├── install.sh # Manual installer/updater (preserves --prefix/--config)
|
||||
├── bin/ # Command entry points (thin wrappers)
|
||||
│ ├── clean.sh # Deep cleanup orchestrator
|
||||
│ ├── uninstall.sh # App removal with leftover detection
|
||||
│ ├── optimize.sh # Cache rebuild + service refresh
|
||||
│ ├── purge.sh # Aggressive cleanup mode
|
||||
│ ├── touchid.sh # Touch ID sudo enabler
|
||||
│ ├── analyze.sh # Disk usage explorer wrapper
|
||||
│ ├── status.sh # System health dashboard wrapper
|
||||
│ ├── installer.sh # Core installation logic
|
||||
│ └── completion.sh # Shell completion support
|
||||
├── lib/ # Reusable shell logic
|
||||
│ ├── core/ # base.sh, log.sh, sudo.sh, ui.sh
|
||||
│ ├── clean/ # Cleanup modules (user, apps, brew, system...)
|
||||
│ ├── optimize/ # Optimization modules
|
||||
│ ├── check/ # Health check modules
|
||||
│ ├── manage/ # Management utilities
|
||||
│ ├── ui/ # UI components (balloons, spinners)
|
||||
│ └── uninstall/ # Uninstallation logic
|
||||
├── cmd/ # Go applications
|
||||
│ ├── analyze/ # Disk analysis tool
|
||||
│ └── status/ # Real-time monitoring
|
||||
├── scripts/ # Build and test automation
|
||||
│ └── test.sh # Main test runner (shell + go + BATS)
|
||||
└── tests/ # BATS integration tests
|
||||
```
|
||||
|
||||
**Decision Tree**:
|
||||
|
||||
- User cleanup logic → `lib/clean/<module>.sh`
|
||||
- Command entry → `bin/<command>.sh`
|
||||
- Core utils → `lib/core/<util>.sh`
|
||||
- Performance tool → `cmd/<tool>/*.go`
|
||||
- Tests → `tests/<test>.bats`
|
||||
|
||||
### Language Stack
|
||||
|
||||
- **Shell (Bash 3.2)**: Core cleanup and system operations (`lib/`, `bin/`)
|
||||
- **Go**: Performance-critical tools (`cmd/analyze/`, `cmd/status/`)
|
||||
- **BATS**: Integration testing (`tests/`)
|
||||
|
||||
---
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
### Shell Scripts
|
||||
|
||||
- **Indentation**: 4 spaces (configured in .editorconfig)
|
||||
- **Variables**: `lowercase_with_underscores`
|
||||
- **Functions**: `verb_noun` format (e.g., `clean_caches`, `get_size`)
|
||||
- **Constants**: `UPPERCASE_WITH_UNDERSCORES`
|
||||
- **Quoting**: Always quote variables: `"$var"` not `$var`
|
||||
- **Tests**: Use `[[` instead of `[`
|
||||
- **Command substitution**: Use `$(command)` not backticks
|
||||
- **Error handling**: Use `set -euo pipefail` at top of files
|
||||
|
||||
### Go Code
|
||||
|
||||
- **Formatting**: Follow standard Go conventions (`gofmt`, `go vet`)
|
||||
- **Package docs**: Add package-level documentation for exported functions
|
||||
- **Error handling**: Never ignore errors, always handle them explicitly
|
||||
- **Build tags**: Use `//go:build darwin` for macOS-specific code
|
||||
|
||||
### Comments
|
||||
|
||||
- **Language**: English only
|
||||
- **Focus**: Explain "why" not "what" (code should be self-documenting)
|
||||
- **Safety**: Document safety boundaries explicitly
|
||||
- **Non-obvious logic**: Explain workarounds or complex patterns
|
||||
|
||||
---
|
||||
|
||||
## Key Helper Functions
|
||||
|
||||
### Safety Helpers (lib/core/base.sh)
|
||||
|
||||
- `safe_rm <path>`: Safe deletion with validation
|
||||
- `safe_find_delete <base> <pattern> <days> <type>`: Protected find+delete
|
||||
- `is_protected <path>`: Check if path is system-protected
|
||||
- `is_whitelisted <name>`: Check user whitelist
|
||||
|
||||
### Logging (lib/core/log.sh)
|
||||
|
||||
- `log_info <msg>`: Informational messages
|
||||
- `log_success <msg>`: Success notifications
|
||||
- `log_warn <msg>`: Warnings
|
||||
- `log_error <msg>`: Error messages
|
||||
- `debug <msg>`: Debug output (requires MO_DEBUG=1)
|
||||
|
||||
### UI Helpers (lib/core/ui.sh)
|
||||
|
||||
- `confirm <prompt>`: Yes/no confirmation
|
||||
- `show_progress <current> <total> <msg>`: Progress display
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Test Types
|
||||
|
||||
1. **Syntax Validation**: `bash -n <file>` - catches basic errors
|
||||
2. **Unit Tests**: BATS tests for individual functions
|
||||
3. **Integration Tests**: Full command execution with BATS
|
||||
4. **Dry-run Tests**: `MO_DRY_RUN=1` to validate without deletion
|
||||
5. **Go Tests**: `go test -v ./cmd/...`
|
||||
|
||||
### Test Environment Variables
|
||||
|
||||
- `MO_DRY_RUN=1`: Preview changes without execution
|
||||
- `MO_DEBUG=1`: Enable detailed debug logging
|
||||
- `BATS_FORMATTER=pretty`: Use pretty output for BATS (default)
|
||||
- `BATS_FORMATTER=tap`: Use TAP output for CI
|
||||
|
||||
---
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Adding New Cleanup Module
|
||||
|
||||
1. Create `lib/clean/new_module.sh`
|
||||
2. Implement cleanup logic using `safe_*` helpers
|
||||
3. Source it in `bin/clean.sh`
|
||||
4. Add protection checks for critical paths
|
||||
5. Write BATS test in `tests/clean.bats`
|
||||
6. Test with `MO_DRY_RUN=1` first
|
||||
|
||||
### Modifying Go Tools
|
||||
|
||||
1. Navigate to `cmd/<tool>/`
|
||||
2. Make changes to Go files
|
||||
3. Test with `go run .` or `make build && ./bin/<tool>-go`
|
||||
4. Run `go test -v` for unit tests
|
||||
5. Check integration: `./mole <command>`
|
||||
|
||||
### Debugging Issues
|
||||
|
||||
1. Enable debug mode: `MO_DEBUG=1 ./mole clean`
|
||||
2. Check logs for error messages
|
||||
3. Verify sudo permissions: `sudo -n true` or `./mole touchid`
|
||||
4. Test individual functions in isolation
|
||||
5. Use `shellcheck` for shell script issues
|
||||
|
||||
---
|
||||
|
||||
## Linting and Quality
|
||||
|
||||
### Shell Script Linting
|
||||
|
||||
- **Tool**: shellcheck with custom `.shellcheckrc`
|
||||
- **Disabled rules**: SC2155, SC2034, SC2059, SC1091, SC2038
|
||||
- **Command**: `shellcheck --rcfile .shellcheckrc lib/**/*.sh bin/**/*.sh`
|
||||
|
||||
### Go Code Quality
|
||||
|
||||
- **Tools**: `go vet`, `go fmt`, `go test`
|
||||
- **Command**: `go vet ./cmd/... && go test ./cmd/...`
|
||||
|
||||
### CI/CD Pipeline
|
||||
|
||||
- **Triggers**: Push/PR to main, dev branches
|
||||
- **Platforms**: macOS 14, macOS 15
|
||||
- **Tools**: bats-core, shellcheck, Go 1.24.6
|
||||
- **Security checks**: Unsafe rm usage, app protection, secret scanning
|
||||
|
||||
---
|
||||
|
||||
## File Organization Patterns
|
||||
|
||||
### Shell Modules
|
||||
|
||||
- Entry scripts in `bin/` should be thin wrappers
|
||||
- Reusable logic goes in `lib/`
|
||||
- Core utilities in `lib/core/`
|
||||
- Feature-specific modules in `lib/clean/`, `lib/ui/`, etc.
|
||||
|
||||
### Go Packages
|
||||
|
||||
- Each tool in its own `cmd/<tool>/` directory
|
||||
- Main entry point in `main.go`
|
||||
- Use standard Go project layout
|
||||
- macOS-specific code guarded with build tags
|
||||
|
||||
---
|
||||
|
||||
## GitHub Operations
|
||||
|
||||
### ⚡ ALWAYS Use gh CLI for GitHub Information
|
||||
|
||||
**Golden Rule**: Whenever you need to fetch or manipulate GitHub data (issues, PRs, commits, releases, comments, etc.), **ALWAYS use `gh` CLI first**. It's more reliable, authenticated, and provides structured output compared to web scraping or raw git commands.
|
||||
|
||||
**Preferred Commands**:
|
||||
|
||||
```bash
|
||||
# Issues
|
||||
gh issue view 123 # View issue details
|
||||
gh issue list # List issues
|
||||
gh issue comment 123 "message" # Comment on issue
|
||||
|
||||
# Pull Requests
|
||||
gh pr view # View current PR
|
||||
gh pr diff # Show diff
|
||||
gh pr list # List PRs
|
||||
gh pr checkout 123 # Checkout PR branch
|
||||
gh pr merge # Merge current PR
|
||||
|
||||
# Repository operations
|
||||
gh release create v1.0.0 # Create release
|
||||
gh repo view # Repository info
|
||||
gh api repos/owner/repo/issues # Raw API access
|
||||
```
|
||||
|
||||
**NEVER use raw git commands for GitHub operations** when `gh` is available:
|
||||
|
||||
- ❌ `git log --oneline origin/main..HEAD` → ✅ `gh pr view`
|
||||
- ❌ `git remote get-url origin` → ✅ `gh repo view`
|
||||
- ❌ Manual GitHub API curl commands → ✅ `gh api`
|
||||
|
||||
## Error Handling Patterns
|
||||
|
||||
### Shell Scripts
|
||||
|
||||
- Use `set -euo pipefail` for strict error handling
|
||||
- Check command exit codes: `if command; then ...`
|
||||
- Provide meaningful error messages with `log_error`
|
||||
- Use cleanup traps for temporary resources
|
||||
|
||||
### Go Code
|
||||
|
||||
- Never ignore errors: `if err != nil { return err }`
|
||||
- Use structured error messages
|
||||
- Handle context cancellation appropriately
|
||||
- Log errors with context information
|
||||
|
||||
---
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Shell Optimization
|
||||
|
||||
- Use built-in shell operations over external commands
|
||||
- Prefer `find -delete` over `-exec rm`
|
||||
- Minimize subprocess creation
|
||||
- Use appropriate timeout mechanisms
|
||||
|
||||
### Go Optimization
|
||||
|
||||
- Use concurrency for I/O-bound operations
|
||||
- Implement proper caching for expensive operations
|
||||
- Profile memory usage in scanning operations
|
||||
- Use efficient data structures for large datasets
|
||||
|
||||
---
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
### Path Validation
|
||||
|
||||
- Always validate user-provided paths
|
||||
- Check against protection lists before operations
|
||||
- Use absolute paths to prevent directory traversal
|
||||
- Implement proper sandboxing for destructive operations
|
||||
|
||||
### Permission Management
|
||||
|
||||
- Request sudo only when necessary
|
||||
- Use `sudo -n true` to check sudo availability
|
||||
- Implement proper Touch ID integration
|
||||
- Respect user whitelist configurations
|
||||
|
||||
---
|
||||
|
||||
## Common Pitfalls to Avoid
|
||||
|
||||
1. **Over-engineering**: Keep solutions simple. Don't add abstractions for one-time operations.
|
||||
2. **Premature optimization**: Focus on correctness first, performance second.
|
||||
3. **Assuming paths exist**: Always check before operating on files/directories.
|
||||
4. **Ignoring protection logic**: User data loss is unacceptable.
|
||||
5. **Breaking updates**: Keep `--prefix`/`--config` flags in `install.sh`.
|
||||
6. **Platform assumptions**: Code must work on all supported macOS versions (10.13+).
|
||||
7. **Silent failures**: Always log errors and provide actionable messages.
|
||||
|
||||
---
|
||||
|
||||
## Communication Style
|
||||
|
||||
- Be concise and technical
|
||||
- Explain safety implications upfront
|
||||
- Show before/after for significant changes
|
||||
- Provide file:line references for code locations
|
||||
- Suggest testing steps for validation
|
||||
|
||||
---
|
||||
|
||||
## Resources
|
||||
|
||||
- Main script: `mole` (menu + routing logic)
|
||||
- Protection lists: Check `is_protected()` implementations
|
||||
- User config: `~/.config/mole/`
|
||||
- Test directory: `tests/`
|
||||
- Build scripts: `scripts/`
|
||||
- Documentation: `README.md`, `CONTRIBUTING.md`, `SECURITY_AUDIT.md`
|
||||
|
||||
---
|
||||
|
||||
**Remember**: When in doubt, err on the side of safety. It's better to clean less than to risk user data.
|
||||
22
README.md
22
README.md
@@ -39,7 +39,7 @@ brew install mole
|
||||
curl -fsSL https://raw.githubusercontent.com/tw93/mole/main/install.sh | bash
|
||||
```
|
||||
|
||||
**Windows:** Mole is designed for macOS, but we offer an experimental Windows version based on user demand. See the [windows branch](https://github.com/tw93/Mole/tree/windows) — for early adopters only.
|
||||
**Windows:** Mole is designed for macOS, but we offer an experimental Windows version based on user demand. See the [windows branch](https://github.com/tw93/Mole/tree/windows), for early adopters only.
|
||||
|
||||
**Run:**
|
||||
|
||||
@@ -76,6 +76,7 @@ mo purge --paths # Configure project scan directories
|
||||
- **Safety**: Built with strict protections. See [Security Audit](SECURITY_AUDIT.md). Preview changes with `mo clean --dry-run`.
|
||||
- **Be Careful**: Although safe by design, file deletion is permanent. Please review operations carefully.
|
||||
- **Debug Mode**: Use `--debug` for detailed logs (e.g., `mo clean --debug`). Combine with `--dry-run` for comprehensive preview including risk levels and file details.
|
||||
- **Operation Log**: File operations are logged to `~/.config/mole/operations.log` for troubleshooting. Disable with `MO_NO_OPLOG=1`.
|
||||
- **Navigation**: Supports arrow keys and Vim bindings (`h/j/k/l`).
|
||||
- **Status Shortcuts**: In `mo status`, press `k` to toggle cat visibility and save preference, `q` to quit.
|
||||
- **Configuration**: Run `mo touchid` for Touch ID sudo, `mo completion` for shell tab completion, `mo clean --whitelist` to manage protected paths.
|
||||
@@ -184,8 +185,8 @@ Read ▮▯▯▯▯ 2.1 MB/s Health Normal · 423 cycles
|
||||
Write ▮▮▮▯▯ 18.3 MB/s Temp 58°C · 1200 RPM
|
||||
|
||||
⇅ Network ▶ Processes
|
||||
Down ▮▮▯▯▯ 3.2 MB/s Code ▮▮▮▮▯ 42.1%
|
||||
Up ▮▯▯▯▯ 0.8 MB/s Chrome ▮▮▮▯▯ 28.3%
|
||||
Down ▁▁█▂▁▁▁▁▁▁▁▁▇▆▅▂ 0.54 MB/s Code ▮▮▮▮▯ 42.1%
|
||||
Up ▄▄▄▃▃▃▄▆▆▇█▁▁▁▁▁ 0.02 MB/s Chrome ▮▮▮▯▯ 28.3%
|
||||
Proxy HTTP · 192.168.1.100 Terminal ▮▯▯▯▯ 12.5%
|
||||
```
|
||||
|
||||
@@ -210,7 +211,7 @@ Select Categories to Clean - 18.5GB (8 selected)
|
||||
● backend-service 2.5GB | node_modules
|
||||
```
|
||||
|
||||
> **Use with caution:** This will permanently delete selected artifacts. Review carefully before confirming. Recent projects — less than 7 days old — are marked and unselected by default.
|
||||
> **Use with caution:** This will permanently delete selected artifacts. Review carefully before confirming. Recent projects, less than 7 days old, are marked and unselected by default.
|
||||
|
||||
<details>
|
||||
<summary><strong>Custom Scan Paths</strong></summary>
|
||||
@@ -258,21 +259,22 @@ Mole automatically detects your terminal, or set `MO_LAUNCHER_APP=<name>` to ove
|
||||
|
||||
## Community Love
|
||||
|
||||
Mole wouldn't be possible without these amazing contributors. They've built countless features that make Mole what it is today. Go follow them! ❤️
|
||||
Big thanks to all contributors who helped build Mole. Go follow them! ❤️
|
||||
|
||||
<a href="https://github.com/tw93/Mole/graphs/contributors">
|
||||
<img src="./CONTRIBUTORS.svg?v=2" width="1000" />
|
||||
</a>
|
||||
|
||||
Join thousands of users worldwide who trust Mole to keep their Macs clean and optimized.
|
||||
<br/><br/>
|
||||
Real feedback from users who shared Mole on X.
|
||||
|
||||
<img src="https://cdn.tw93.fun/pic/lovemole.jpeg" alt="Community feedback on Mole" width="1000" />
|
||||
|
||||
## Support
|
||||
|
||||
- If Mole saved you disk space, consider starring the repo or [sharing it](https://twitter.com/intent/tweet?url=https://github.com/tw93/Mole&text=Mole%20-%20Deep%20clean%20and%20optimize%20your%20Mac.) with friends.
|
||||
- Have ideas or fixes? Check our [Contributing Guide](CONTRIBUTING.md), then open an issue or PR to help shape Mole's future.
|
||||
- Love Mole? <a href="https://miaoyan.app/cats.html?name=Mole" target="_blank">Buy Tw93 an ice-cold Coke</a> to keep the project alive and kicking! 🥤
|
||||
- If Mole helped you, star the repo or [share it](https://twitter.com/intent/tweet?url=https://github.com/tw93/Mole&text=Mole%20-%20Deep%20clean%20and%20optimize%20your%20Mac.) with friends.
|
||||
- Got ideas or found bugs? Check the [Contributing Guide](CONTRIBUTING.md) and open an issue or PR.
|
||||
- Like Mole? <a href="https://miaoyan.app/cats.html?name=Mole" target="_blank">Buy Tw93 a Coke</a> to support the project! 🥤
|
||||
|
||||
<details>
|
||||
<summary><strong>Friends who bought me Coke</strong></summary>
|
||||
@@ -282,4 +284,4 @@ Join thousands of users worldwide who trust Mole to keep their Macs clean and op
|
||||
|
||||
## License
|
||||
|
||||
MIT License — feel free to enjoy and participate in open source.
|
||||
MIT License, feel free to enjoy and participate in open source.
|
||||
|
||||
@@ -1,356 +1,169 @@
|
||||
# Mole Security Audit Report
|
||||
# Mole Security Reference
|
||||
|
||||
<div align="center">
|
||||
Version 1.23.2 | 2026-01-26
|
||||
|
||||
**Status:** PASSED | **Risk Level:** LOW | **Version:** 1.21.0 (2026-01-15)
|
||||
## Recent Fixes
|
||||
|
||||
</div>
|
||||
**Uninstall audit, Jan 2026:**
|
||||
|
||||
---
|
||||
- `stop_launch_services()` now checks bundle_id is valid reverse-DNS before using it in find patterns. This stops glob injection.
|
||||
- `find_app_files()` skips LaunchAgents named after common words like Music or Notes.
|
||||
- Added comments explaining why `remove_file_list()` bypasses TOCTOU checks for symlinks.
|
||||
- `brew_uninstall_cask()` treats exit code 124 as timeout failure, returns immediately.
|
||||
|
||||
## Audit Overview
|
||||
Other changes:
|
||||
|
||||
| Attribute | Details |
|
||||
|-----------|---------|
|
||||
| Audit Date | January 15, 2026 |
|
||||
| Audit Conclusion | **PASSED** |
|
||||
| Mole Version | V1.21.0 |
|
||||
| Audited Branch | `main` (HEAD) |
|
||||
| Scope | Shell scripts, Go binaries, Configuration |
|
||||
| Methodology | Static analysis, Threat modeling, Code review |
|
||||
| Review Cycle | Every 6 months or after major feature additions |
|
||||
| Next Review | June 2026 |
|
||||
- Symlink cleanup in `bin/clean.sh` goes through `safe_remove` now
|
||||
- Orphaned helper cleanup in `lib/clean/apps.sh` switched to `safe_sudo_remove`
|
||||
- ByHost pref cleanup checks bundle ID format first
|
||||
|
||||
**Key Findings:**
|
||||
## Path Validation
|
||||
|
||||
- Multi-layer validation effectively blocks risky system modifications.
|
||||
- Conservative cleaning logic ensures safety (e.g., 60-day dormancy rule).
|
||||
- Comprehensive protection for VPNs, AI tools, and core system components.
|
||||
- Atomic operations prevent state corruption during crashes.
|
||||
- Dry-run and whitelist features give users full control.
|
||||
- Installer cleanup scans safely and requires user confirmation.
|
||||
Every deletion goes through `lib/core/file_ops.sh`. The `validate_path_for_deletion()` function rejects empty paths, paths with `/../` in them, and anything containing control characters like newlines or null bytes.
|
||||
|
||||
---
|
||||
**Blocked paths**, even with sudo:
|
||||
|
||||
## Security Philosophy
|
||||
|
||||
**Core Principle: "Do No Harm"**
|
||||
|
||||
We built Mole on a **Zero Trust** architecture for filesystem operations. Every modification request is treated as dangerous until it passes strict validation.
|
||||
|
||||
**Guiding Priorities:**
|
||||
|
||||
1. **System Stability First** - We'd rather leave 1GB of junk than delete 1KB of your data.
|
||||
2. **Conservative by Default** - High-risk operations always require explicit confirmation.
|
||||
3. **Fail Safe** - When in doubt, we abort immediately.
|
||||
4. **Transparency** - Every operation is logged and allows a preview via dry-run mode.
|
||||
|
||||
---
|
||||
|
||||
## Threat Model
|
||||
|
||||
### Attack Vectors & Mitigations
|
||||
|
||||
| Threat | Risk Level | Mitigation | Status |
|
||||
|--------|------------|------------|--------|
|
||||
| Accidental System File Deletion | Critical | Multi-layer path validation, system directory blocklist | Mitigated |
|
||||
| Path Traversal Attack | High | Absolute path enforcement, relative path rejection | Mitigated |
|
||||
| Symlink Exploitation | High | Symlink detection in privileged mode | Mitigated |
|
||||
| Command Injection | High | Control character filtering, strict validation | Mitigated |
|
||||
| Empty Variable Deletion | High | Empty path validation, defensive checks | Mitigated |
|
||||
| Race Conditions | Medium | Atomic operations, process isolation | Mitigated |
|
||||
| Network Mount Hangs | Medium | Timeout protection, volume type detection | Mitigated |
|
||||
| Privilege Escalation | Medium | Restricted sudo scope, user home validation | Mitigated |
|
||||
| False Positive Deletion | Medium | 3-char minimum, fuzzy matching disabled | Mitigated |
|
||||
| VPN Configuration Loss | Medium | Comprehensive VPN/proxy whitelist | Mitigated |
|
||||
|
||||
---
|
||||
|
||||
## Defense Architecture
|
||||
|
||||
### Multi-Layered Validation System
|
||||
|
||||
All automated operations pass through hardened middleware (`lib/core/file_ops.sh`) with 4 layers of validation:
|
||||
|
||||
#### Layer 1: Input Sanitization
|
||||
|
||||
| Control | Protection Against |
|
||||
|---------|---------------------|
|
||||
| Absolute Path Enforcement | Path traversal attacks (`../etc`) |
|
||||
| Control Character Filtering | Command injection (`\n`, `\r`, `\0`) |
|
||||
| Empty Variable Protection | Accidental `rm -rf /` |
|
||||
| Secure Temp Workspaces | Data leakage, race conditions |
|
||||
|
||||
**Code:** `lib/core/file_ops.sh:validate_path_for_deletion()`
|
||||
|
||||
#### Layer 2: System Path Protection ("Iron Dome")
|
||||
|
||||
Even with `sudo`, these paths are **unconditionally blocked**:
|
||||
|
||||
```bash
|
||||
/ # Root filesystem
|
||||
/System # macOS system files
|
||||
/bin, /sbin, /usr # Core binaries
|
||||
/etc, /var # System configuration
|
||||
/Library/Extensions # Kernel extensions
|
||||
/private # System-private directories
|
||||
```text
|
||||
/ # root
|
||||
/System # macOS system
|
||||
/bin, /sbin, /usr # binaries
|
||||
/etc, /var # config
|
||||
/Library/Extensions # kexts
|
||||
/private # system private
|
||||
```
|
||||
|
||||
**Exceptions:**
|
||||
Some system caches are OK to delete:
|
||||
|
||||
- `/System/Library/Caches/com.apple.coresymbolicationd/data` (safe, rebuildable cache)
|
||||
- `/System/Library/Caches/com.apple.coresymbolicationd/data`
|
||||
- `/private/tmp`, `/private/var/tmp`, `/private/var/log`, `/private/var/folders`
|
||||
- `/private/var/db/diagnostics`, `/private/var/db/DiagnosticPipeline`, `/private/var/db/powerlog`, `/private/var/db/reportmemoryexception`
|
||||
|
||||
**Code:** `lib/core/file_ops.sh:60-78`
|
||||
See `lib/core/file_ops.sh:60-78`.
|
||||
|
||||
#### Layer 3: Symlink Detection
|
||||
When running with sudo, `safe_sudo_recursive_delete()` also checks for symlinks. Refuses to follow symlinks pointing to system files.
|
||||
|
||||
For privileged operations, pre-flight checks prevent symlink-based attacks:
|
||||
## Cleanup Rules
|
||||
|
||||
- Detects symlinks from cache folders pointing to system files.
|
||||
- Refuses recursive deletion of symbolic links in sudo mode.
|
||||
- Validates real path vs. symlink target.
|
||||
**Orphan detection** at `lib/clean/apps.sh:orphan_detection()`:
|
||||
|
||||
**Code:** `lib/core/file_ops.sh:safe_sudo_recursive_delete()`
|
||||
App data is only considered orphaned if the app itself is gone from all three locations: `/Applications`, `~/Applications`, `/System/Applications`. On top of that, the data must be untouched for at least 60 days. Adobe, Microsoft, and Google stuff is whitelisted regardless.
|
||||
|
||||
#### Layer 4: Permission Management
|
||||
**Uninstall matching** at `lib/clean/apps.sh:uninstall_app()`:
|
||||
|
||||
When running with `sudo`:
|
||||
App names need at least 3 characters. Otherwise "Go" would match "Google" and that's bad. Fuzzy matching is off. Receipt scans only look under `/Applications` and `/Library/Application Support`, not in shared places like `/Library/Frameworks`.
|
||||
|
||||
- Auto-corrects ownership back to user (`chown -R`).
|
||||
- Restricts operations to the user's home directory.
|
||||
- Enforces multiple validation checkpoints.
|
||||
**Dev tools:**
|
||||
|
||||
### Interactive Analyzer (Go)
|
||||
Cache dirs like `~/.cargo/registry/cache` or `~/.gradle/caches` get cleaned. But `~/.cargo/bin`, `~/.mix/archives`, `~/.rustup` toolchains, `~/.stack/programs` stay untouched.
|
||||
|
||||
The analyzer (`mo analyze`) uses a distinct security model:
|
||||
**LaunchAgent removal:**
|
||||
|
||||
- Runs with standard user permissions only.
|
||||
- Respects macOS System Integrity Protection (SIP).
|
||||
- **Two-Key Confirmation:** Deletion requires ⌫ (Delete) to enter confirmation mode, then Enter to confirm. Prevents accidental double-press of the same key.
|
||||
- **Trash Instead of Delete:** Files are moved to macOS Trash using Finder's native API, allowing easy recovery if needed.
|
||||
- OS-level enforcement (cannot delete `/System` due to Read-Only Volume).
|
||||
Only removed when uninstalling the app that owns them. All `com.apple.*` items are skipped. Services get stopped via `launchctl` first. Generic names like Music, Notes, Photos are excluded from the search.
|
||||
|
||||
**Code:** `cmd/analyze/*.go`
|
||||
See `lib/core/app_protection.sh:find_app_files()`.
|
||||
|
||||
---
|
||||
## Protected Categories
|
||||
|
||||
## Safety Mechanisms
|
||||
| Category | What's protected |
|
||||
| -------- | ---------------- |
|
||||
| System | Control Center, System Settings, TCC, `/Library/Updates`, Spotlight |
|
||||
| VPN/Proxy | Shadowsocks, V2Ray, Tailscale, Clash |
|
||||
| AI | Cursor, Claude, ChatGPT, Ollama, LM Studio |
|
||||
| Time Machine | Checks if backup is running. If status unclear, skips cleanup. |
|
||||
| Startup | `com.apple.*` LaunchAgents/Daemons always skipped |
|
||||
|
||||
### Conservative Cleaning Logic
|
||||
See `lib/core/app_protection.sh:is_critical_system_component()`.
|
||||
|
||||
#### The "60-Day Rule" for Orphaned Data
|
||||
## Analyzer
|
||||
|
||||
| Step | Verification | Criterion |
|
||||
|------|--------------|-----------|
|
||||
| 1. App Check | All installation locations | Must be missing from `/Applications`, `~/Applications`, `/System/Applications` |
|
||||
| 2. Dormancy | Modification timestamps | Untouched for ≥60 days |
|
||||
| 3. Vendor Whitelist | Cross-reference database | Adobe, Microsoft, and Google resources are protected |
|
||||
`mo analyze` runs differently:
|
||||
|
||||
**Code:** `lib/clean/apps.sh:orphan_detection()`
|
||||
- Standard user permissions, no sudo
|
||||
- Respects SIP
|
||||
- Two keys to delete: press ⌫ first, then Enter. Hard to delete by accident.
|
||||
- Files go to Trash via Finder API, not rm
|
||||
|
||||
#### Developer Tool Ecosystems (Consolidated)
|
||||
Code at `cmd/analyze/*.go`.
|
||||
|
||||
Support for 20+ languages (Rust, Go, Node, Python, JVM, Mobile, Elixir, Haskell, OCaml, etc.) with strict safety checks:
|
||||
## Timeouts
|
||||
|
||||
- **Global Optimization:** The core `safe_clean` function now intelligently checks parent directories before attempting wildcard cleanups, eliminating overhead for missing tools across the entire system.
|
||||
- **Safe Targets:** Only volatile caches are cleaned (e.g., `~/.cargo/registry/cache`, `~/.gradle/caches`).
|
||||
- **Protected Paths:** Critical directories like `~/.cargo/bin`, `~/.mix/archives`, `~/.rustup` toolchains, and `~/.stack/programs` are explicitly **excluded**.
|
||||
| Operation | Timeout | Why |
|
||||
| --------- | ------- | --- |
|
||||
| Network volume check | 5s | NFS/SMB/AFP can hang forever |
|
||||
| App bundle search | 10s | mdfind sometimes stalls |
|
||||
| SQLite vacuum | 20s | Skip if Mail/Safari/Messages is open |
|
||||
| dyld cache rebuild | 180s | Skip if done in last 24h |
|
||||
|
||||
#### Active Uninstallation Heuristics
|
||||
See `lib/core/base.sh:run_with_timeout()`.
|
||||
|
||||
For user-selected app removal:
|
||||
## User Config
|
||||
|
||||
- **Sanitized Name Matching:** "Visual Studio Code" → `VisualStudioCode`, `.vscode`
|
||||
- **Safety Limit:** 3-char minimum (prevents "Go" matching "Google")
|
||||
- **Disabled:** Fuzzy matching and wildcard expansion for short names.
|
||||
- **User Confirmation:** Required before deletion.
|
||||
- **Receipt Scans:** BOM-derived files are restricted to app-specific prefixes (e.g., `/Applications`, `/Library/Application Support`). Shared directories like `/Library/Frameworks` are **excluded** to prevent collateral damage.
|
||||
|
||||
**Code:** `lib/clean/apps.sh:uninstall_app()`
|
||||
|
||||
#### System Protection Policies
|
||||
|
||||
| Protected Category | Scope | Reason |
|
||||
|--------------------|-------|--------|
|
||||
| System Integrity Protection | `/Library/Updates`, `/System/*` | Respects macOS Read-Only Volume |
|
||||
| Spotlight & System UI | `~/Library/Metadata/CoreSpotlight` | Prevents UI corruption |
|
||||
| System Components | Control Center, System Settings, TCC | Centralized detection via `is_critical_system_component()` |
|
||||
| Time Machine | Local snapshots, backups | Runtime activity detection (backup running, snapshots mounted), fails safe if status indeterminate |
|
||||
| VPN & Proxy | Shadowsocks, V2Ray, Tailscale, Clash | Protects network configs |
|
||||
| AI & LLM Tools | Cursor, Claude, ChatGPT, Ollama, LM Studio | Protects models, tokens, and sessions |
|
||||
| Startup Items | `com.apple.*` LaunchAgents/Daemons | System items unconditionally skipped |
|
||||
|
||||
**LaunchAgent/LaunchDaemon Cleanup During Uninstallation:**
|
||||
|
||||
When users uninstall applications via `mo uninstall`, Mole automatically removes associated LaunchAgent and LaunchDaemon plists:
|
||||
|
||||
- Scans `~/Library/LaunchAgents`, `~/Library/LaunchDaemons`, `/Library/LaunchAgents`, `/Library/LaunchDaemons`
|
||||
- Matches both exact bundle ID (`com.example.app.plist`) and app name patterns (`*AppName*.plist`)
|
||||
- Skips all `com.apple.*` system items via `should_protect_path()` validation
|
||||
- Unloads services via `launchctl` before deletion (via `stop_launch_services()`)
|
||||
- **Safer than orphan detection:** Only removes plists when the associated app is explicitly being uninstalled
|
||||
- Prevents accumulation of orphaned startup items that persist after app removal
|
||||
|
||||
**Code:** `lib/core/app_protection.sh:find_app_files()`, `lib/uninstall/batch.sh:stop_launch_services()`
|
||||
|
||||
### Crash Safety & Atomic Operations
|
||||
|
||||
| Operation | Safety Mechanism | Recovery Behavior |
|
||||
|-----------|------------------|-------------------|
|
||||
| Network Interface Reset | Atomic execution blocks | Wi-Fi/AirDrop restored to pre-operation state |
|
||||
| Swap Clearing | Daemon restart | `dynamic_pager` handles recovery safely |
|
||||
| Volume Scanning | Timeout + filesystem check | Auto-skip unresponsive NFS/SMB/AFP mounts |
|
||||
| Homebrew Cache | Pre-flight size check | Skip if <50MB (avoids long delays) |
|
||||
| Network Volume Check | `diskutil info` with timeout | Prevents hangs on slow/dead mounts |
|
||||
| SQLite Vacuum | App-running check + 20s timeout | Skips if Mail/Safari/Messages active |
|
||||
| dyld Cache Update | 24-hour freshness check + 180s timeout | Skips if recently updated |
|
||||
| App Bundle Search | 10s timeout on mdfind | Fallback to standard paths |
|
||||
|
||||
**Timeout Example:**
|
||||
Put paths in `~/.config/mole/whitelist`, one per line:
|
||||
|
||||
```bash
|
||||
run_with_timeout 5 diskutil info "$mount_point" || skip_volume
|
||||
# exact matches only
|
||||
/Users/me/important-cache
|
||||
~/Library/Application Support/MyApp
|
||||
```
|
||||
|
||||
**Code:** `lib/core/base.sh:run_with_timeout()`, `lib/optimize/*.sh`
|
||||
These paths are protected from all operations.
|
||||
|
||||
---
|
||||
Run `mo clean --dry-run` or `mo optimize --dry-run` to preview what would happen without actually doing it.
|
||||
|
||||
## User Controls
|
||||
## Testing
|
||||
|
||||
### Dry-Run Mode
|
||||
| Area | Coverage |
|
||||
| ---- | -------- |
|
||||
| File ops | 95% |
|
||||
| Cleaning | 87% |
|
||||
| Optimize | 82% |
|
||||
| System | 90% |
|
||||
| Security | 100% |
|
||||
|
||||
**Command:** `mo clean --dry-run` | `mo optimize --dry-run`
|
||||
|
||||
**Behavior:**
|
||||
|
||||
- Simulates the entire operation without modifying a single file.
|
||||
- Lists every file/directory that **would** be deleted.
|
||||
- Calculates total space that **would** be freed.
|
||||
- **Zero risk** - no actual deletion commands are executed.
|
||||
|
||||
### Custom Whitelists
|
||||
|
||||
**File:** `~/.config/mole/whitelist`
|
||||
|
||||
**Format:**
|
||||
180+ test cases total, about 88% coverage.
|
||||
|
||||
```bash
|
||||
# One path per line - exact matches only
|
||||
/Users/username/important-cache
|
||||
~/Library/Application Support/CriticalApp
|
||||
bats tests/ # run all
|
||||
bats tests/security.bats # security only
|
||||
```
|
||||
|
||||
- Paths are **unconditionally protected**.
|
||||
- Applies to all operations (clean, optimize, uninstall).
|
||||
- Supports absolute paths and `~` expansion.
|
||||
|
||||
**Code:** `lib/core/file_ops.sh:is_whitelisted()`
|
||||
|
||||
### Interactive Confirmations
|
||||
|
||||
We mandate confirmation for:
|
||||
|
||||
- Uninstalling system-scope applications.
|
||||
- Removing large data directories (>1GB).
|
||||
- Deleting items from shared vendor folders.
|
||||
|
||||
---
|
||||
|
||||
## Testing & Compliance
|
||||
|
||||
### Test Coverage
|
||||
|
||||
Mole uses **BATS (Bash Automated Testing System)** for automated testing.
|
||||
|
||||
| Test Category | Coverage | Key Tests |
|
||||
|---------------|----------|-----------|
|
||||
| Core File Operations | 95% | Path validation, symlink detection, permissions |
|
||||
| Cleaning Logic | 87% | Orphan detection, 60-day rule, vendor whitelist |
|
||||
| Optimization | 82% | Cache cleanup, timeouts |
|
||||
| System Maintenance | 90% | Time Machine, network volumes, crash recovery |
|
||||
| Security Controls | 100% | Path traversal, command injection, symlinks |
|
||||
|
||||
**Total:** 180+ tests | **Overall Coverage:** ~88%
|
||||
|
||||
**Test Execution:**
|
||||
|
||||
```bash
|
||||
bats tests/ # Run all tests
|
||||
bats tests/security.bats # Run specific suite
|
||||
```
|
||||
|
||||
### Standards Compliance
|
||||
|
||||
| Standard | Implementation |
|
||||
|----------|----------------|
|
||||
| OWASP Secure Coding | Input validation, least privilege, defense-in-depth |
|
||||
| CWE-22 (Path Traversal) | Enhanced detection: rejects `/../` components, safely handles `..` in directory names |
|
||||
| CWE-78 (Command Injection) | Control character filtering |
|
||||
| CWE-59 (Link Following) | Symlink detection before privileged operations |
|
||||
| Apple File System Guidelines | Respects SIP, Read-Only Volumes, TCC |
|
||||
|
||||
### Security Development Lifecycle
|
||||
|
||||
- **Static Analysis:** `shellcheck` runs on all shell scripts.
|
||||
- **Code Review:** All changes are manually reviewed by maintainers.
|
||||
- **Dependency Scanning:** Minimal external dependencies, all carefully vetted.
|
||||
|
||||
### Known Limitations
|
||||
|
||||
| Limitation | Impact | Mitigation |
|
||||
|------------|--------|------------|
|
||||
| Requires `sudo` for system caches | Initial friction | Clear documentation explaining why |
|
||||
| 60-day rule may delay cleanup | Some orphans remain longer | Manual `mo uninstall` is always available |
|
||||
| No undo functionality | Deleted files are unrecoverable | Dry-run mode and warnings are clear |
|
||||
| English-only name matching | May miss non-English apps | Fallback to Bundle ID matching |
|
||||
|
||||
**Intentionally Out of Scope (Safety):**
|
||||
|
||||
- Automatic deletion of user documents/media.
|
||||
- Encryption key stores or password managers.
|
||||
- System configuration files (`/etc/*`).
|
||||
- Browser history or cookies.
|
||||
- Git repository cleanup.
|
||||
|
||||
---
|
||||
CI runs shellcheck and go vet on every push.
|
||||
|
||||
## Dependencies
|
||||
|
||||
### System Binaries
|
||||
System binaries used, all SIP protected:
|
||||
|
||||
Mole relies on standard, SIP-protected macOS system binaries:
|
||||
| Binary | For |
|
||||
| ------ | --- |
|
||||
| `plutil` | plist validation |
|
||||
| `tmutil` | Time Machine |
|
||||
| `dscacheutil` | cache rebuild |
|
||||
| `diskutil` | volume info |
|
||||
|
||||
| Binary | Purpose | Fallback |
|
||||
|--------|---------|----------|
|
||||
| `plutil` | Validate `.plist` integrity | Skip invalid plists |
|
||||
| `tmutil` | Time Machine interaction | Skip TM cleanup |
|
||||
| `dscacheutil` | System cache rebuilding | Optional optimization |
|
||||
| `diskutil` | Volume information | Skip network volumes |
|
||||
Go libs in analyze-go:
|
||||
|
||||
### Go Dependencies (Interactive Tools)
|
||||
| Lib | Version | License |
|
||||
| --- | ------- | ------- |
|
||||
| `bubbletea` | v0.23+ | MIT |
|
||||
| `lipgloss` | v0.6+ | MIT |
|
||||
| `gopsutil` | v3.22+ | BSD-3 |
|
||||
| `xxhash` | v2.2+ | BSD-2 |
|
||||
|
||||
The compiled Go binary (`analyze-go`) includes:
|
||||
Versions are pinned. No CVEs. Binaries built via GitHub Actions.
|
||||
|
||||
| Library | Version | Purpose | License |
|
||||
|---------|---------|---------|---------|
|
||||
| `bubbletea` | v0.23+ | TUI framework | MIT |
|
||||
| `lipgloss` | v0.6+ | Terminal styling | MIT |
|
||||
| `gopsutil` | v3.22+ | System metrics | BSD-3 |
|
||||
| `xxhash` | v2.2+ | Fast hashing | BSD-2 |
|
||||
## Limitations
|
||||
|
||||
**Supply Chain Security:**
|
||||
| What | Impact | Workaround |
|
||||
| ---- | ------ | ---------- |
|
||||
| Needs sudo for system caches | Annoying first time | Docs explain why |
|
||||
| 60-day wait for orphans | Some junk stays longer | Use `mo uninstall` manually |
|
||||
| No undo | Gone is gone | Use dry-run first |
|
||||
| English names only | Might miss localized apps | Falls back to bundle ID |
|
||||
|
||||
- All dependencies are pinned to specific versions.
|
||||
- Regular security audits.
|
||||
- No transitive dependencies with known CVEs.
|
||||
- **Automated Releases**: Binaries are compiled and signed via GitHub Actions.
|
||||
- **Source Only**: The repository contains no pre-compiled binaries.
|
||||
**Won't touch:**
|
||||
|
||||
---
|
||||
|
||||
**Our Commitment:** This document certifies that Mole implements industry-standard defensive programming practices and strictly adheres to macOS security guidelines. We prioritize system stability and data integrity above all else.
|
||||
|
||||
*For security concerns or vulnerability reports, please open an issue or contact the maintainers directly.*
|
||||
- Your documents or media
|
||||
- Password managers or keychains
|
||||
- Files under `/etc`
|
||||
- Browser history/cookies
|
||||
- Git repos
|
||||
|
||||
142
bin/clean.sh
142
bin/clean.sh
@@ -164,10 +164,6 @@ start_section() {
|
||||
echo ""
|
||||
echo -e "${PURPLE_BOLD}${ICON_ARROW} $1${NC}"
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Preparing..."
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
ensure_user_file "$EXPORT_LIST_FILE"
|
||||
echo "" >> "$EXPORT_LIST_FILE"
|
||||
@@ -308,9 +304,6 @@ safe_clean() {
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Always stop spinner before outputting results
|
||||
stop_section_spinner
|
||||
|
||||
local description
|
||||
local -a targets
|
||||
|
||||
@@ -361,6 +354,7 @@ safe_clean() {
|
||||
local show_scan_feedback=false
|
||||
if [[ ${#targets[@]} -gt 20 && -t 1 ]]; then
|
||||
show_scan_feedback=true
|
||||
stop_section_spinner
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning ${#targets[@]} items..."
|
||||
fi
|
||||
|
||||
@@ -371,6 +365,7 @@ safe_clean() {
|
||||
if should_protect_path "$path"; then
|
||||
skip=true
|
||||
((skipped_count++))
|
||||
log_operation "clean" "SKIPPED" "$path" "protected"
|
||||
fi
|
||||
|
||||
[[ "$skip" == "true" ]] && continue
|
||||
@@ -378,6 +373,7 @@ safe_clean() {
|
||||
if is_path_whitelisted "$path"; then
|
||||
skip=true
|
||||
((skipped_count++))
|
||||
log_operation "clean" "SKIPPED" "$path" "whitelist"
|
||||
fi
|
||||
[[ "$skip" == "true" ]] && continue
|
||||
[[ -e "$path" ]] && existing_paths+=("$path")
|
||||
@@ -387,7 +383,7 @@ safe_clean() {
|
||||
stop_section_spinner
|
||||
fi
|
||||
|
||||
debug_log "Cleaning: $description (${#existing_paths[@]} items)"
|
||||
debug_log "Cleaning: $description, ${#existing_paths[@]} items"
|
||||
|
||||
# Enhanced debug output with risk level and details
|
||||
if [[ "${MO_DEBUG:-}" == "1" && ${#existing_paths[@]} -gt 0 ]]; then
|
||||
@@ -437,6 +433,8 @@ safe_clean() {
|
||||
if [[ -t 1 ]]; then MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning items..."; fi
|
||||
fi
|
||||
|
||||
local cleaning_spinner_started=false
|
||||
|
||||
# For larger batches, precompute sizes in parallel for better UX/stat accuracy.
|
||||
if [[ ${#existing_paths[@]} -gt 3 ]]; then
|
||||
local temp_dir
|
||||
@@ -528,6 +526,11 @@ safe_clean() {
|
||||
fi
|
||||
|
||||
# Read results back in original order.
|
||||
# Start spinner for cleaning phase
|
||||
if [[ "$DRY_RUN" != "true" && ${#existing_paths[@]} -gt 0 && -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Cleaning..."
|
||||
cleaning_spinner_started=true
|
||||
fi
|
||||
idx=0
|
||||
if [[ ${#existing_paths[@]} -gt 0 ]]; then
|
||||
for path in "${existing_paths[@]}"; do
|
||||
@@ -536,12 +539,8 @@ safe_clean() {
|
||||
read -r size count < "$result_file" 2> /dev/null || true
|
||||
local removed=0
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if [[ -L "$path" ]]; then
|
||||
rm "$path" 2> /dev/null && removed=1
|
||||
else
|
||||
if safe_remove "$path" true; then
|
||||
removed=1
|
||||
fi
|
||||
if safe_remove "$path" true; then
|
||||
removed=1
|
||||
fi
|
||||
else
|
||||
removed=1
|
||||
@@ -564,6 +563,11 @@ safe_clean() {
|
||||
fi
|
||||
|
||||
else
|
||||
# Start spinner for cleaning phase (small batch)
|
||||
if [[ "$DRY_RUN" != "true" && ${#existing_paths[@]} -gt 0 && -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Cleaning..."
|
||||
cleaning_spinner_started=true
|
||||
fi
|
||||
local idx=0
|
||||
if [[ ${#existing_paths[@]} -gt 0 ]]; then
|
||||
for path in "${existing_paths[@]}"; do
|
||||
@@ -573,12 +577,8 @@ safe_clean() {
|
||||
|
||||
local removed=0
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if [[ -L "$path" ]]; then
|
||||
rm "$path" 2> /dev/null && removed=1
|
||||
else
|
||||
if safe_remove "$path" true; then
|
||||
removed=1
|
||||
fi
|
||||
if safe_remove "$path" true; then
|
||||
removed=1
|
||||
fi
|
||||
else
|
||||
removed=1
|
||||
@@ -600,7 +600,7 @@ safe_clean() {
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$show_spinner" == "true" ]]; then
|
||||
if [[ "$show_spinner" == "true" || "$cleaning_spinner_started" == "true" ]]; then
|
||||
stop_section_spinner
|
||||
fi
|
||||
|
||||
@@ -610,10 +610,13 @@ safe_clean() {
|
||||
debug_log "Permission denied while cleaning: $description"
|
||||
fi
|
||||
if [[ $removal_failed_count -gt 0 && "$DRY_RUN" != "true" ]]; then
|
||||
debug_log "Skipped $removal_failed_count items (permission denied or in use) for: $description"
|
||||
debug_log "Skipped $removal_failed_count items, permission denied or in use, for: $description"
|
||||
fi
|
||||
|
||||
if [[ $removed_any -eq 1 ]]; then
|
||||
# Stop spinner before output
|
||||
stop_section_spinner
|
||||
|
||||
local size_human=$(bytes_to_human "$((total_size_kb * 1024))")
|
||||
|
||||
local label="$description"
|
||||
@@ -622,7 +625,7 @@ safe_clean() {
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $label ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $label${NC}, ${YELLOW}$size_human dry${NC}"
|
||||
|
||||
local paths_temp=$(create_temp_file)
|
||||
|
||||
@@ -673,7 +676,7 @@ safe_clean() {
|
||||
' | while IFS='|' read -r display_path total_size child_count; do
|
||||
local size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ $child_count -gt 1 ]]; then
|
||||
echo "$display_path # $size_human ($child_count items)" >> "$EXPORT_LIST_FILE"
|
||||
echo "$display_path # $size_human, $child_count items" >> "$EXPORT_LIST_FILE"
|
||||
else
|
||||
echo "$display_path # $size_human" >> "$EXPORT_LIST_FILE"
|
||||
fi
|
||||
@@ -682,7 +685,7 @@ safe_clean() {
|
||||
rm -f "$paths_temp"
|
||||
fi
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label${NC}, ${GREEN}$size_human${NC}"
|
||||
fi
|
||||
((files_cleaned += total_count))
|
||||
((total_size_cleaned += total_size_kb))
|
||||
@@ -694,6 +697,10 @@ safe_clean() {
|
||||
}
|
||||
|
||||
start_cleanup() {
|
||||
# Set current command for operation logging
|
||||
export MOLE_CURRENT_COMMAND="clean"
|
||||
log_operation_session_start "clean"
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
printf '\033[2J\033[H'
|
||||
fi
|
||||
@@ -702,11 +709,11 @@ start_cleanup() {
|
||||
echo ""
|
||||
|
||||
if [[ "$DRY_RUN" != "true" && -t 0 ]]; then
|
||||
echo -e "${GRAY}${ICON_SOLID} Use --dry-run to preview, --whitelist to manage protected paths${NC}"
|
||||
echo -e "${GRAY}${ICON_WARNING} Use --dry-run to preview, --whitelist to manage protected paths${NC}"
|
||||
fi
|
||||
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e "${YELLOW}Dry Run Mode${NC} - Preview only, no deletions"
|
||||
echo -e "${YELLOW}Dry Run Mode${NC}, Preview only, no deletions"
|
||||
echo ""
|
||||
SYSTEM_CLEAN=false
|
||||
|
||||
@@ -727,42 +734,53 @@ EOF
|
||||
fi
|
||||
|
||||
if [[ -t 0 ]]; then
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} System caches need sudo — ${GREEN}Enter${NC} continue, ${GRAY}Space${NC} skip: "
|
||||
|
||||
local choice
|
||||
choice=$(read_key)
|
||||
|
||||
# ESC/Q aborts, Space skips, Enter enables system cleanup.
|
||||
if [[ "$choice" == "QUIT" ]]; then
|
||||
echo -e " ${GRAY}Canceled${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "$choice" == "SPACE" ]]; then
|
||||
echo -e " ${GRAY}Skipped${NC}"
|
||||
if sudo -n true 2> /dev/null; then
|
||||
SYSTEM_CLEAN=true
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Admin access already available"
|
||||
echo ""
|
||||
SYSTEM_CLEAN=false
|
||||
elif [[ "$choice" == "ENTER" ]]; then
|
||||
printf "\r\033[K" # Clear the prompt line
|
||||
if ensure_sudo_session "System cleanup requires admin access"; then
|
||||
SYSTEM_CLEAN=true
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Admin access granted"
|
||||
else
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} System caches need sudo. ${GREEN}Enter${NC} continue, ${GRAY}Space${NC} skip: "
|
||||
|
||||
local choice
|
||||
choice=$(read_key)
|
||||
|
||||
# ESC/Q aborts, Space skips, Enter enables system cleanup.
|
||||
if [[ "$choice" == "QUIT" ]]; then
|
||||
echo -e " ${GRAY}Canceled${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "$choice" == "SPACE" ]]; then
|
||||
echo -e " ${GRAY}Skipped${NC}"
|
||||
echo ""
|
||||
SYSTEM_CLEAN=false
|
||||
elif [[ "$choice" == "ENTER" ]]; then
|
||||
printf "\r\033[K" # Clear the prompt line
|
||||
if ensure_sudo_session "System cleanup requires admin access"; then
|
||||
SYSTEM_CLEAN=true
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Admin access granted"
|
||||
echo ""
|
||||
else
|
||||
SYSTEM_CLEAN=false
|
||||
echo ""
|
||||
echo -e "${YELLOW}Authentication failed${NC}, continuing with user-level cleanup"
|
||||
fi
|
||||
else
|
||||
SYSTEM_CLEAN=false
|
||||
echo -e " ${GRAY}Skipped${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Authentication failed${NC}, continuing with user-level cleanup"
|
||||
fi
|
||||
else
|
||||
SYSTEM_CLEAN=false
|
||||
echo -e " ${GRAY}Skipped${NC}"
|
||||
echo ""
|
||||
fi
|
||||
else
|
||||
SYSTEM_CLEAN=false
|
||||
echo ""
|
||||
echo "Running in non-interactive mode"
|
||||
echo " ${ICON_LIST} System-level cleanup skipped (requires interaction)"
|
||||
if sudo -n true 2> /dev/null; then
|
||||
SYSTEM_CLEAN=true
|
||||
echo " ${ICON_LIST} System-level cleanup enabled, sudo session active"
|
||||
else
|
||||
SYSTEM_CLEAN=false
|
||||
echo " ${ICON_LIST} System-level cleanup skipped, requires sudo"
|
||||
fi
|
||||
echo " ${ICON_LIST} User-level cleanup will proceed automatically"
|
||||
echo ""
|
||||
fi
|
||||
@@ -774,7 +792,7 @@ perform_cleanup() {
|
||||
if [[ "${MOLE_TEST_MODE:-0}" == "1" ]]; then
|
||||
test_mode_enabled=true
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e "${YELLOW}Dry Run Mode${NC} - Preview only, no deletions"
|
||||
echo -e "${YELLOW}Dry Run Mode${NC}, Preview only, no deletions"
|
||||
echo ""
|
||||
fi
|
||||
echo -e "${GREEN}${ICON_LIST}${NC} User app cache"
|
||||
@@ -867,7 +885,7 @@ perform_cleanup() {
|
||||
fda_status=$?
|
||||
if [[ $fda_status -eq 1 ]]; then
|
||||
echo ""
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} ${GRAY}Tip: Grant Full Disk Access to your terminal in System Settings for best results${NC}"
|
||||
echo -e "${GRAY}${ICON_WARNING}${NC} ${GRAY}Tip: Grant Full Disk Access to your terminal in System Settings for best results${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -892,7 +910,7 @@ perform_cleanup() {
|
||||
if [[ ${#WHITELIST_WARNINGS[@]} -gt 0 ]]; then
|
||||
echo ""
|
||||
for warning in "${WHITELIST_WARNINGS[@]}"; do
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Whitelist: $warning"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Whitelist: $warning"
|
||||
done
|
||||
fi
|
||||
|
||||
@@ -955,6 +973,7 @@ perform_cleanup() {
|
||||
# ===== 12. Orphaned app data cleanup (60+ days inactive, skip protected vendors) =====
|
||||
start_section "Uninstalled app data"
|
||||
clean_orphaned_app_data
|
||||
clean_orphaned_system_services
|
||||
end_section
|
||||
|
||||
# ===== 13. Apple Silicon optimizations =====
|
||||
@@ -970,6 +989,11 @@ perform_cleanup() {
|
||||
clean_time_machine_failed_backups
|
||||
end_section
|
||||
|
||||
# ===== 16. Large files to review (report only) =====
|
||||
start_section "Large files to review"
|
||||
check_large_file_candidates
|
||||
end_section
|
||||
|
||||
# ===== Final summary =====
|
||||
echo ""
|
||||
|
||||
@@ -1032,7 +1056,7 @@ perform_cleanup() {
|
||||
else
|
||||
summary_status="info"
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
summary_details+=("No significant reclaimable space detected (system already clean).")
|
||||
summary_details+=("No significant reclaimable space detected, system already clean.")
|
||||
else
|
||||
summary_details+=("System was already clean; no additional space freed.")
|
||||
fi
|
||||
@@ -1043,6 +1067,9 @@ perform_cleanup() {
|
||||
set -e
|
||||
fi
|
||||
|
||||
# Log session end with summary
|
||||
log_operation_session_end "clean" "$files_cleaned" "$total_size_cleaned"
|
||||
|
||||
print_summary_block "$summary_heading" "${summary_details[@]}"
|
||||
printf '\n'
|
||||
}
|
||||
@@ -1055,6 +1082,7 @@ main() {
|
||||
;;
|
||||
"--dry-run" | "-n")
|
||||
DRY_RUN=true
|
||||
export MOLE_DRY_RUN=1
|
||||
;;
|
||||
"--whitelist")
|
||||
source "$SCRIPT_DIR/../lib/manage/whitelist.sh"
|
||||
|
||||
@@ -84,7 +84,7 @@ if [[ $# -eq 0 ]]; then
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Removed stale completion entries from $config_file"
|
||||
echo ""
|
||||
fi
|
||||
log_error "mole not found in PATH - install Mole before enabling completion"
|
||||
log_error "mole not found in PATH, install Mole before enabling completion"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -387,7 +387,7 @@ select_installers() {
|
||||
scroll_indicator=" ${GRAY}[${current_pos}/${total_items}]${NC}"
|
||||
fi
|
||||
|
||||
printf "${PURPLE_BOLD}Select Installers to Remove${NC}%s ${GRAY}- ${selected_human} ($selected_count selected)${NC}\n" "$scroll_indicator"
|
||||
printf "${PURPLE_BOLD}Select Installers to Remove${NC}%s ${GRAY}, ${selected_human}, ${selected_count} selected${NC}\n" "$scroll_indicator"
|
||||
printf "%s\n" "$clear_line"
|
||||
|
||||
# Calculate visible range
|
||||
@@ -546,13 +546,13 @@ delete_selected_installers() {
|
||||
local file_size="${INSTALLER_SIZES[$idx]}"
|
||||
local size_human
|
||||
size_human=$(bytes_to_human "$file_size")
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $(basename "$file_path") ${GRAY}(${size_human})${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $(basename "$file_path") ${GRAY}, ${size_human}${NC}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Confirm deletion
|
||||
echo ""
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Delete ${#selected_indices[@]} installer(s) (${confirm_human}) ${GREEN}Enter${NC} confirm, ${GRAY}ESC${NC} cancel: "
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Delete ${#selected_indices[@]} installers, ${confirm_human} ${GREEN}Enter${NC} confirm, ${GRAY}ESC${NC} cancel: "
|
||||
|
||||
IFS= read -r -s -n1 confirm || confirm=""
|
||||
case "$confirm" in
|
||||
@@ -655,7 +655,7 @@ show_summary() {
|
||||
local freed_mb
|
||||
freed_mb=$(echo "$total_size_freed_kb" | awk '{printf "%.2f", $1/1024}')
|
||||
|
||||
summary_details+=("Removed ${GREEN}$total_deleted${NC} installer(s), freed ${GREEN}${freed_mb}MB${NC}")
|
||||
summary_details+=("Removed ${GREEN}$total_deleted${NC} installers, freed ${GREEN}${freed_mb}MB${NC}")
|
||||
summary_details+=("Your Mac is cleaner now!")
|
||||
else
|
||||
summary_details+=("No installers were removed")
|
||||
|
||||
@@ -78,7 +78,7 @@ show_optimization_summary() {
|
||||
local total_applied=$((safe_count + confirm_count))
|
||||
|
||||
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
|
||||
summary_title="Dry Run Complete - No Changes Made"
|
||||
summary_title="Dry Run Complete, No Changes Made"
|
||||
summary_details+=("Would apply ${YELLOW}${total_applied:-0}${NC} optimizations")
|
||||
summary_details+=("Run without ${YELLOW}--dry-run${NC} to apply these changes")
|
||||
else
|
||||
@@ -115,9 +115,9 @@ show_optimization_summary() {
|
||||
fi
|
||||
|
||||
if [[ -n "$key_stat" ]]; then
|
||||
summary_details+=("Applied ${GREEN}${total_applied:-0}${NC} optimizations — ${key_stat}")
|
||||
summary_details+=("Applied ${GREEN}${total_applied:-0}${NC} optimizations, ${key_stat}")
|
||||
else
|
||||
summary_details+=("Applied ${GREEN}${total_applied:-0}${NC} optimizations — all services tuned")
|
||||
summary_details+=("Applied ${GREEN}${total_applied:-0}${NC} optimizations, all services tuned")
|
||||
fi
|
||||
|
||||
local summary_line3=""
|
||||
@@ -126,11 +126,11 @@ show_optimization_summary() {
|
||||
if [[ -n "${AUTO_FIX_DETAILS:-}" ]]; then
|
||||
local detail_join
|
||||
detail_join=$(echo "${AUTO_FIX_DETAILS}" | paste -sd ", " -)
|
||||
[[ -n "$detail_join" ]] && summary_line3+=" — ${detail_join}"
|
||||
[[ -n "$detail_join" ]] && summary_line3+=": ${detail_join}"
|
||||
fi
|
||||
summary_details+=("$summary_line3")
|
||||
fi
|
||||
summary_details+=("System fully optimized — faster, more secure and responsive")
|
||||
summary_details+=("System fully optimized")
|
||||
fi
|
||||
|
||||
print_summary_block "$summary_title" "${summary_details[@]}"
|
||||
@@ -204,7 +204,7 @@ cleanup_path() {
|
||||
return
|
||||
fi
|
||||
if should_protect_path "$expanded_path"; then
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} Protected $label"
|
||||
echo -e "${GRAY}${ICON_WARNING}${NC} Protected $label"
|
||||
return
|
||||
fi
|
||||
|
||||
@@ -226,12 +226,12 @@ cleanup_path() {
|
||||
|
||||
if [[ "$removed" == "true" ]]; then
|
||||
if [[ -n "$size_display" ]]; then
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $label ${GREEN}(${size_display})${NC}"
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $label${NC}, ${GREEN}${size_display}${NC}"
|
||||
else
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $label"
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} Skipped $label ${GRAY}(grant Full Disk Access to your terminal and retry)${NC}"
|
||||
echo -e "${GRAY}${ICON_WARNING}${NC} Skipped $label${GRAY}, grant Full Disk Access to your terminal and retry${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -252,7 +252,7 @@ collect_security_fix_actions() {
|
||||
fi
|
||||
if [[ "${GATEKEEPER_DISABLED:-}" == "true" ]]; then
|
||||
if ! is_whitelisted "gatekeeper"; then
|
||||
SECURITY_FIXES+=("gatekeeper|Enable Gatekeeper (App download protection)")
|
||||
SECURITY_FIXES+=("gatekeeper|Enable Gatekeeper, app download protection")
|
||||
fi
|
||||
fi
|
||||
if touchid_supported && ! touchid_configured; then
|
||||
@@ -304,7 +304,7 @@ apply_firewall_fix() {
|
||||
FIREWALL_DISABLED=false
|
||||
return 0
|
||||
fi
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to enable firewall (check permissions)"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Failed to enable firewall, check permissions"
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -314,7 +314,7 @@ apply_gatekeeper_fix() {
|
||||
GATEKEEPER_DISABLED=false
|
||||
return 0
|
||||
fi
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to enable Gatekeeper"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Failed to enable Gatekeeper"
|
||||
return 1
|
||||
}
|
||||
|
||||
@@ -327,7 +327,7 @@ apply_touchid_fix() {
|
||||
|
||||
perform_security_fixes() {
|
||||
if ! ensure_sudo_session "Security changes require admin access"; then
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} Skipped security fixes (sudo denied)"
|
||||
echo -e "${GRAY}${ICON_WARNING}${NC} Skipped security fixes, sudo denied"
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -357,6 +357,8 @@ cleanup_all() {
|
||||
stop_inline_spinner 2> /dev/null || true
|
||||
stop_sudo_session
|
||||
cleanup_temp_files
|
||||
# Log session end
|
||||
log_operation_session_end "optimize" "${OPTIMIZE_SAFE_COUNT:-0}" "0"
|
||||
}
|
||||
|
||||
handle_interrupt() {
|
||||
@@ -365,6 +367,9 @@ handle_interrupt() {
|
||||
}
|
||||
|
||||
main() {
|
||||
# Set current command for operation logging
|
||||
export MOLE_CURRENT_COMMAND="optimize"
|
||||
|
||||
local health_json
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
@@ -381,6 +386,8 @@ main() {
|
||||
esac
|
||||
done
|
||||
|
||||
log_operation_session_start "optimize"
|
||||
|
||||
trap cleanup_all EXIT
|
||||
trap handle_interrupt INT TERM
|
||||
|
||||
@@ -391,7 +398,7 @@ main() {
|
||||
|
||||
# Dry-run indicator.
|
||||
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
|
||||
echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC} - No files will be modified\n"
|
||||
echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC}, No files will be modified\n"
|
||||
fi
|
||||
|
||||
if ! command -v jq > /dev/null 2>&1; then
|
||||
|
||||
11
bin/purge.sh
11
bin/purge.sh
@@ -42,6 +42,10 @@ note_activity() {
|
||||
|
||||
# Main purge function
|
||||
start_purge() {
|
||||
# Set current command for operation logging
|
||||
export MOLE_CURRENT_COMMAND="purge"
|
||||
log_operation_session_start "purge"
|
||||
|
||||
# Clear screen for better UX
|
||||
if [[ -t 1 ]]; then
|
||||
printf '\033[2J\033[H'
|
||||
@@ -214,13 +218,16 @@ perform_purge() {
|
||||
summary_details+=("Free space now: $(get_free_space)")
|
||||
fi
|
||||
|
||||
# Log session end
|
||||
log_operation_session_end "purge" "${total_items_cleaned:-0}" "${total_size_cleaned:-0}"
|
||||
|
||||
print_summary_block "$summary_heading" "${summary_details[@]}"
|
||||
printf '\n'
|
||||
}
|
||||
|
||||
# Show help message
|
||||
show_help() {
|
||||
echo -e "${PURPLE_BOLD}Mole Purge${NC} - Clean old project build artifacts"
|
||||
echo -e "${PURPLE_BOLD}Mole Purge${NC}, Clean old project build artifacts"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Usage:${NC} mo purge [options]"
|
||||
echo ""
|
||||
@@ -231,7 +238,7 @@ show_help() {
|
||||
echo ""
|
||||
echo -e "${YELLOW}Default Paths:${NC}"
|
||||
for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do
|
||||
echo " - $path"
|
||||
echo " * $path"
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
@@ -141,7 +141,7 @@ enable_touchid() {
|
||||
sudo mv "$temp_file" "$PAM_SUDO_FILE"
|
||||
log_success "Touch ID migrated to sudo_local"
|
||||
else
|
||||
log_success "Touch ID enabled (via sudo_local) - try: sudo ls"
|
||||
log_success "Touch ID enabled, via sudo_local, try: sudo ls"
|
||||
fi
|
||||
return 0
|
||||
else
|
||||
@@ -188,7 +188,7 @@ enable_touchid() {
|
||||
|
||||
# Apply the changes
|
||||
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
|
||||
log_success "Touch ID enabled - try: sudo ls"
|
||||
log_success "Touch ID enabled, try: sudo ls"
|
||||
return 0
|
||||
else
|
||||
log_error "Failed to enable Touch ID"
|
||||
@@ -219,7 +219,7 @@ disable_touchid() {
|
||||
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
||||
sudo mv "$temp_file" "$PAM_SUDO_FILE"
|
||||
fi
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Touch ID disabled (removed from sudo_local)${NC}"
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Touch ID disabled, removed from sudo_local${NC}"
|
||||
echo ""
|
||||
return 0
|
||||
else
|
||||
|
||||
@@ -137,6 +137,12 @@ scan_applications() {
|
||||
done < <(command find "$app_dir" -name "*.app" -maxdepth 3 -print0 2> /dev/null)
|
||||
done
|
||||
|
||||
if [[ ${#app_data_tuples[@]} -eq 0 ]]; then
|
||||
rm -f "$temp_file"
|
||||
printf "\r\033[K" >&2
|
||||
echo "No applications found to uninstall." >&2
|
||||
return 1
|
||||
fi
|
||||
# Pass 2: metadata + size in parallel (mdls is slow).
|
||||
local app_count=0
|
||||
local total_apps=${#app_data_tuples[@]}
|
||||
@@ -368,6 +374,8 @@ cleanup() {
|
||||
wait "$sudo_keepalive_pid" 2> /dev/null || true
|
||||
sudo_keepalive_pid=""
|
||||
fi
|
||||
# Log session end
|
||||
log_operation_session_end "uninstall" "${files_cleaned:-0}" "${total_size_cleaned:-0}"
|
||||
show_cursor
|
||||
exit "${1:-0}"
|
||||
}
|
||||
@@ -375,6 +383,10 @@ cleanup() {
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
main() {
|
||||
# Set current command for operation logging
|
||||
export MOLE_CURRENT_COMMAND="uninstall"
|
||||
log_operation_session_start "uninstall"
|
||||
|
||||
local force_rescan=false
|
||||
# Global flags
|
||||
for arg in "$@"; do
|
||||
@@ -490,7 +502,7 @@ main() {
|
||||
rm -f "$apps_file"
|
||||
continue
|
||||
fi
|
||||
echo -e "${BLUE}${ICON_CONFIRM}${NC} Selected ${selection_count} app(s):"
|
||||
echo -e "${BLUE}${ICON_CONFIRM}${NC} Selected ${selection_count} apps:"
|
||||
local -a summary_rows=()
|
||||
local max_name_display_width=0
|
||||
local max_size_width=0
|
||||
|
||||
@@ -565,7 +565,7 @@ main() {
|
||||
continue
|
||||
fi
|
||||
# Show selected apps with clean alignment
|
||||
echo -e "${BLUE}${ICON_CONFIRM}${NC} Selected ${selection_count} app(s):"
|
||||
echo -e "${BLUE}${ICON_CONFIRM}${NC} Selected ${selection_count} apps:"
|
||||
local -a summary_rows=()
|
||||
local max_name_width=0
|
||||
local max_size_width=0
|
||||
|
||||
@@ -3,18 +3,19 @@ package main
|
||||
import "time"
|
||||
|
||||
const (
|
||||
maxEntries = 30
|
||||
maxLargeFiles = 30
|
||||
barWidth = 24
|
||||
minLargeFileSize = 100 << 20
|
||||
defaultViewport = 12
|
||||
overviewCacheTTL = 7 * 24 * time.Hour
|
||||
overviewCacheFile = "overview_sizes.json"
|
||||
duTimeout = 30 * time.Second
|
||||
mdlsTimeout = 5 * time.Second
|
||||
maxConcurrentOverview = 8
|
||||
batchUpdateSize = 100
|
||||
cacheModTimeGrace = 30 * time.Minute
|
||||
maxEntries = 30
|
||||
maxLargeFiles = 20
|
||||
barWidth = 24
|
||||
spotlightMinFileSize = 100 << 20
|
||||
largeFileWarmupMinSize = 1 << 20
|
||||
defaultViewport = 12
|
||||
overviewCacheTTL = 7 * 24 * time.Hour
|
||||
overviewCacheFile = "overview_sizes.json"
|
||||
duTimeout = 30 * time.Second
|
||||
mdlsTimeout = 5 * time.Second
|
||||
maxConcurrentOverview = 8
|
||||
batchUpdateSize = 100
|
||||
cacheModTimeGrace = 30 * time.Minute
|
||||
|
||||
// Worker pool limits.
|
||||
minWorkers = 16
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRuneWidth(t *testing.T) {
|
||||
@@ -307,3 +308,42 @@ func TestCalculateNameWidth(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatUnusedTime(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
tests := []struct {
|
||||
name string
|
||||
daysAgo int
|
||||
want string
|
||||
}{
|
||||
{"zero time", -1, ""}, // Special case: will use time.Time{}
|
||||
{"recent file", 30, ""}, // < 90 days returns empty
|
||||
{"just under threshold", 89, ""}, // Boundary: 89 days still empty
|
||||
{"at 90 days", 90, ">3mo"}, // Boundary: exactly 90 days
|
||||
{"4 months", 120, ">4mo"},
|
||||
{"6 months", 180, ">6mo"},
|
||||
{"11 months", 330, ">11mo"},
|
||||
{"just under 1 year", 364, ">12mo"},
|
||||
{"exactly 1 year", 365, ">1yr"},
|
||||
{"18 months", 548, ">1yr"}, // Between 1 and 2 years
|
||||
{"just under 2 years", 729, ">1yr"},
|
||||
{"exactly 2 years", 730, ">2yr"},
|
||||
{"3 years", 1095, ">3yr"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var lastAccess time.Time
|
||||
if tt.daysAgo >= 0 {
|
||||
// Use a fixed UTC baseline to avoid DST-related flakiness.
|
||||
lastAccess = now.Add(-time.Duration(tt.daysAgo) * 24 * time.Hour)
|
||||
}
|
||||
// If daysAgo < 0, lastAccess remains zero value
|
||||
|
||||
got := formatUnusedTime(lastAccess)
|
||||
if got != tt.want {
|
||||
t.Errorf("formatUnusedTime(%d days ago) = %q, want %q", tt.daysAgo, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -332,9 +332,9 @@ func (m *model) scheduleOverviewScans() tea.Cmd {
|
||||
if len(pendingIndices) > 0 {
|
||||
firstEntry := m.entries[pendingIndices[0]]
|
||||
if len(pendingIndices) == 1 {
|
||||
m.status = fmt.Sprintf("Scanning %s... (%d left)", firstEntry.Name, remaining)
|
||||
m.status = fmt.Sprintf("Scanning %s..., %d left", firstEntry.Name, remaining)
|
||||
} else {
|
||||
m.status = fmt.Sprintf("Scanning %d directories... (%d left)", len(pendingIndices), remaining)
|
||||
m.status = fmt.Sprintf("Scanning %d directories..., %d left", len(pendingIndices), remaining)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -584,7 +584,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
|
||||
switch msg.String() {
|
||||
case "q", "ctrl+c":
|
||||
case "q", "ctrl+c", "Q":
|
||||
return m, tea.Quit
|
||||
case "esc":
|
||||
if m.showLargeFiles {
|
||||
@@ -592,7 +592,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
return m, nil
|
||||
}
|
||||
return m, tea.Quit
|
||||
case "up", "k":
|
||||
case "up", "k", "K":
|
||||
if m.showLargeFiles {
|
||||
if m.largeSelected > 0 {
|
||||
m.largeSelected--
|
||||
@@ -606,7 +606,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
m.offset = m.selected
|
||||
}
|
||||
}
|
||||
case "down", "j":
|
||||
case "down", "j", "J":
|
||||
if m.showLargeFiles {
|
||||
if m.largeSelected < len(m.largeFiles)-1 {
|
||||
m.largeSelected++
|
||||
@@ -622,12 +622,12 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
m.offset = m.selected - viewport + 1
|
||||
}
|
||||
}
|
||||
case "enter", "right", "l":
|
||||
case "enter", "right", "l", "L":
|
||||
if m.showLargeFiles {
|
||||
return m, nil
|
||||
}
|
||||
return m.enterSelectedDir()
|
||||
case "b", "left", "h":
|
||||
case "b", "left", "h", "B", "H":
|
||||
if m.showLargeFiles {
|
||||
m.showLargeFiles = false
|
||||
return m, nil
|
||||
@@ -679,7 +679,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
|
||||
m.scanning = false
|
||||
return m, nil
|
||||
case "r":
|
||||
case "r", "R":
|
||||
m.multiSelected = make(map[string]bool)
|
||||
m.largeMultiSelected = make(map[string]bool)
|
||||
|
||||
@@ -728,7 +728,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
|
||||
}
|
||||
case "o":
|
||||
case "o", "O":
|
||||
// Open selected entries (multi-select aware).
|
||||
const maxBatchOpen = 20
|
||||
if m.showLargeFiles {
|
||||
@@ -736,7 +736,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
if len(m.largeMultiSelected) > 0 {
|
||||
count := len(m.largeMultiSelected)
|
||||
if count > maxBatchOpen {
|
||||
m.status = fmt.Sprintf("Too many items to open (max %d, selected %d)", maxBatchOpen, count)
|
||||
m.status = fmt.Sprintf("Too many items to open, max %d, selected %d", maxBatchOpen, count)
|
||||
return m, nil
|
||||
}
|
||||
for path := range m.largeMultiSelected {
|
||||
@@ -761,7 +761,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
if len(m.multiSelected) > 0 {
|
||||
count := len(m.multiSelected)
|
||||
if count > maxBatchOpen {
|
||||
m.status = fmt.Sprintf("Too many items to open (max %d, selected %d)", maxBatchOpen, count)
|
||||
m.status = fmt.Sprintf("Too many items to open, max %d, selected %d", maxBatchOpen, count)
|
||||
return m, nil
|
||||
}
|
||||
for path := range m.multiSelected {
|
||||
@@ -790,7 +790,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
if len(m.largeMultiSelected) > 0 {
|
||||
count := len(m.largeMultiSelected)
|
||||
if count > maxBatchReveal {
|
||||
m.status = fmt.Sprintf("Too many items to reveal (max %d, selected %d)", maxBatchReveal, count)
|
||||
m.status = fmt.Sprintf("Too many items to reveal, max %d, selected %d", maxBatchReveal, count)
|
||||
return m, nil
|
||||
}
|
||||
for path := range m.largeMultiSelected {
|
||||
@@ -815,7 +815,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
if len(m.multiSelected) > 0 {
|
||||
count := len(m.multiSelected)
|
||||
if count > maxBatchReveal {
|
||||
m.status = fmt.Sprintf("Too many items to reveal (max %d, selected %d)", maxBatchReveal, count)
|
||||
m.status = fmt.Sprintf("Too many items to reveal, max %d, selected %d", maxBatchReveal, count)
|
||||
return m, nil
|
||||
}
|
||||
for path := range m.multiSelected {
|
||||
@@ -860,7 +860,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
}
|
||||
}
|
||||
m.status = fmt.Sprintf("%d selected (%s)", count, humanizeBytes(totalSize))
|
||||
m.status = fmt.Sprintf("%d selected, %s", count, humanizeBytes(totalSize))
|
||||
} else {
|
||||
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
|
||||
}
|
||||
@@ -886,7 +886,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
}
|
||||
}
|
||||
m.status = fmt.Sprintf("%d selected (%s)", count, humanizeBytes(totalSize))
|
||||
m.status = fmt.Sprintf("%d selected, %s", count, humanizeBytes(totalSize))
|
||||
} else {
|
||||
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
|
||||
}
|
||||
@@ -971,7 +971,9 @@ func (m model) enterSelectedDir() (tea.Model, tea.Cmd) {
|
||||
}
|
||||
selected := m.entries[m.selected]
|
||||
if selected.IsDir {
|
||||
m.history = append(m.history, snapshotFromModel(m))
|
||||
if len(m.history) == 0 || m.history[len(m.history)-1].Path != m.path {
|
||||
m.history = append(m.history, snapshotFromModel(m))
|
||||
}
|
||||
m.path = selected.Path
|
||||
m.selected = 0
|
||||
m.offset = 0
|
||||
@@ -1009,7 +1011,7 @@ func (m model) enterSelectedDir() (tea.Model, tea.Cmd) {
|
||||
}
|
||||
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
|
||||
}
|
||||
m.status = fmt.Sprintf("File: %s (%s)", selected.Name, humanizeBytes(selected.Size))
|
||||
m.status = fmt.Sprintf("File: %s, %s", selected.Name, humanizeBytes(selected.Size))
|
||||
return m, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
|
||||
largeFilesHeap := &largeFileHeap{}
|
||||
heap.Init(largeFilesHeap)
|
||||
largeFileMinSize := int64(largeFileWarmupMinSize)
|
||||
|
||||
// Worker pool sized for I/O-bound scanning.
|
||||
numWorkers := max(runtime.NumCPU()*cpuMultiplier, minWorkers)
|
||||
@@ -84,9 +85,13 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
for file := range largeFileChan {
|
||||
if largeFilesHeap.Len() < maxLargeFiles {
|
||||
heap.Push(largeFilesHeap, file)
|
||||
if largeFilesHeap.Len() == maxLargeFiles {
|
||||
atomic.StoreInt64(&largeFileMinSize, (*largeFilesHeap)[0].Size)
|
||||
}
|
||||
} else if file.Size > (*largeFilesHeap)[0].Size {
|
||||
heap.Pop(largeFilesHeap)
|
||||
heap.Push(largeFilesHeap, file)
|
||||
atomic.StoreInt64(&largeFileMinSize, (*largeFilesHeap)[0].Size)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -148,7 +153,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
} else if cached, err := loadCacheFromDisk(path); err == nil {
|
||||
size = cached.TotalSize
|
||||
} else {
|
||||
size = calculateDirSizeConcurrent(path, largeFileChan, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
size = calculateDirSizeConcurrent(path, largeFileChan, &largeFileMinSize, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
}
|
||||
atomic.AddInt64(&total, size)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
@@ -200,7 +205,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
size := calculateDirSizeConcurrent(path, largeFileChan, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
size := calculateDirSizeConcurrent(path, largeFileChan, &largeFileMinSize, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
atomic.AddInt64(&total, size)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
|
||||
@@ -233,8 +238,11 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
LastAccess: getLastAccessTimeFromInfo(info),
|
||||
}
|
||||
// Track large files only.
|
||||
if !shouldSkipFileForLargeTracking(fullPath) && size >= minLargeFileSize {
|
||||
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
|
||||
if !shouldSkipFileForLargeTracking(fullPath) {
|
||||
minSize := atomic.LoadInt64(&largeFileMinSize)
|
||||
if size >= minSize {
|
||||
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,8 +264,8 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
largeFiles[i] = heap.Pop(largeFilesHeap).(fileEntry)
|
||||
}
|
||||
|
||||
// Use Spotlight for large files when available.
|
||||
if spotlightFiles := findLargeFilesWithSpotlight(root, minLargeFileSize); len(spotlightFiles) > 0 {
|
||||
// Use Spotlight for large files when it expands the list.
|
||||
if spotlightFiles := findLargeFilesWithSpotlight(root, spotlightMinFileSize); len(spotlightFiles) > len(largeFiles) {
|
||||
largeFiles = spotlightFiles
|
||||
}
|
||||
|
||||
@@ -430,7 +438,7 @@ func isInFoldedDir(path string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, duSem, duQueueSem chan struct{}, filesScanned, dirsScanned, bytesScanned *int64, currentPath *atomic.Value) int64 {
|
||||
func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, largeFileMinSize *int64, duSem, duQueueSem chan struct{}, filesScanned, dirsScanned, bytesScanned *int64, currentPath *atomic.Value) int64 {
|
||||
children, err := os.ReadDir(root)
|
||||
if err != nil {
|
||||
return 0
|
||||
@@ -488,7 +496,7 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, duS
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
size := calculateDirSizeConcurrent(path, largeFileChan, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
size := calculateDirSizeConcurrent(path, largeFileChan, largeFileMinSize, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
atomic.AddInt64(&total, size)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
}(fullPath)
|
||||
@@ -505,8 +513,11 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, duS
|
||||
atomic.AddInt64(filesScanned, 1)
|
||||
atomic.AddInt64(bytesScanned, size)
|
||||
|
||||
if !shouldSkipFileForLargeTracking(fullPath) && size >= minLargeFileSize {
|
||||
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
|
||||
if !shouldSkipFileForLargeTracking(fullPath) && largeFileMinSize != nil {
|
||||
minSize := atomic.LoadInt64(largeFileMinSize)
|
||||
if size >= minSize {
|
||||
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
|
||||
}
|
||||
}
|
||||
|
||||
// Update current path occasionally to prevent UI jitter.
|
||||
@@ -583,7 +594,7 @@ func getDirectorySizeFromDuWithExclude(path string, excludePath string) (int64,
|
||||
return 0, fmt.Errorf("du timeout after %v", duTimeout)
|
||||
}
|
||||
if stderr.Len() > 0 {
|
||||
return 0, fmt.Errorf("du failed: %v (%s)", err, stderr.String())
|
||||
return 0, fmt.Errorf("du failed: %v, %s", err, stderr.String())
|
||||
}
|
||||
return 0, fmt.Errorf("du failed: %v", err)
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func (m model) View() string {
|
||||
if m.scanning && percent >= 100 {
|
||||
percent = 99
|
||||
}
|
||||
progressPrefix = fmt.Sprintf(" %s(%.0f%%)%s", colorCyan, percent, colorReset)
|
||||
progressPrefix = fmt.Sprintf(" %s%.0f%%%s", colorCyan, percent, colorReset)
|
||||
}
|
||||
|
||||
fmt.Fprintf(&b, "%s%s%s%s Scanning%s: %s%s files%s, %s%s dirs%s, %s%s%s\n",
|
||||
@@ -112,7 +112,7 @@ func (m model) View() string {
|
||||
|
||||
if m.showLargeFiles {
|
||||
if len(m.largeFiles) == 0 {
|
||||
fmt.Fprintln(&b, " No large files found (>=100MB)")
|
||||
fmt.Fprintln(&b, " No large files found")
|
||||
} else {
|
||||
viewport := calculateViewport(m.height, true)
|
||||
start := max(m.largeOffset, 0)
|
||||
@@ -342,7 +342,7 @@ func (m model) View() string {
|
||||
} else if m.showLargeFiles {
|
||||
selectCount := len(m.largeMultiSelected)
|
||||
if selectCount > 0 {
|
||||
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del(%d) | ← Back | Q Quit%s\n", colorGray, selectCount, colorReset)
|
||||
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del %d | ← Back | Q Quit%s\n", colorGray, selectCount, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del | ← Back | Q Quit%s\n", colorGray, colorReset)
|
||||
}
|
||||
@@ -351,13 +351,13 @@ func (m model) View() string {
|
||||
selectCount := len(m.multiSelected)
|
||||
if selectCount > 0 {
|
||||
if largeFileCount > 0 {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del(%d) | T Top(%d) | Q Quit%s\n", colorGray, selectCount, largeFileCount, colorReset)
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del %d | T Top %d | Q Quit%s\n", colorGray, selectCount, largeFileCount, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del(%d) | Q Quit%s\n", colorGray, selectCount, colorReset)
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del %d | Q Quit%s\n", colorGray, selectCount, colorReset)
|
||||
}
|
||||
} else {
|
||||
if largeFileCount > 0 {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | T Top(%d) | Q Quit%s\n", colorGray, largeFileCount, colorReset)
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | T Top %d | Q Quit%s\n", colorGray, largeFileCount, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | Q Quit%s\n", colorGray, colorReset)
|
||||
}
|
||||
@@ -390,12 +390,12 @@ func (m model) View() string {
|
||||
}
|
||||
|
||||
if deleteCount > 1 {
|
||||
fmt.Fprintf(&b, "%sDelete:%s %d items (%s) %sPress Enter to confirm | ESC cancel%s\n",
|
||||
fmt.Fprintf(&b, "%sDelete:%s %d items, %s %sPress Enter to confirm | ESC cancel%s\n",
|
||||
colorRed, colorReset,
|
||||
deleteCount, humanizeBytes(totalDeleteSize),
|
||||
colorGray, colorReset)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%sDelete:%s %s (%s) %sPress Enter to confirm | ESC cancel%s\n",
|
||||
fmt.Fprintf(&b, "%sDelete:%s %s, %s %sPress Enter to confirm | ESC cancel%s\n",
|
||||
colorRed, colorReset,
|
||||
m.deleteTarget.Name, humanizeBytes(m.deleteTarget.Size),
|
||||
colorGray, colorReset)
|
||||
|
||||
@@ -276,7 +276,8 @@ func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
collect(func() (err error) { proxyStats = collectProxy(); return nil })
|
||||
collect(func() (err error) { batteryStats, _ = collectBatteries(); return nil })
|
||||
collect(func() (err error) { thermalStats = collectThermal(); return nil })
|
||||
collect(func() (err error) { sensorStats, _ = collectSensors(); return nil })
|
||||
// Sensors disabled - CPU temp already shown in CPU card
|
||||
// collect(func() (err error) { sensorStats, _ = collectSensors(); return nil })
|
||||
collect(func() (err error) { gpuStats, err = c.collectGPU(now); return })
|
||||
collect(func() (err error) {
|
||||
// Bluetooth is slow; cache for 30s.
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -283,29 +281,3 @@ func collectThermal() ThermalStatus {
|
||||
|
||||
return thermal
|
||||
}
|
||||
|
||||
func collectSensors() ([]SensorReading, error) {
|
||||
temps, err := sensors.SensorsTemperatures()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var out []SensorReading
|
||||
for _, t := range temps {
|
||||
if t.Temperature <= 0 || t.Temperature > 150 {
|
||||
continue
|
||||
}
|
||||
out = append(out, SensorReading{
|
||||
Label: prettifyLabel(t.SensorKey),
|
||||
Value: t.Temperature,
|
||||
Unit: "°C",
|
||||
})
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func prettifyLabel(key string) string {
|
||||
key = strings.TrimSpace(key)
|
||||
key = strings.TrimPrefix(key, "TC")
|
||||
key = strings.ReplaceAll(key, "_", " ")
|
||||
return key
|
||||
}
|
||||
|
||||
@@ -69,7 +69,6 @@ func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, d
|
||||
issues = append(issues, "High Memory")
|
||||
}
|
||||
|
||||
// Memory pressure penalty.
|
||||
// Memory pressure penalty.
|
||||
switch mem.Pressure {
|
||||
case "warn":
|
||||
@@ -159,7 +158,8 @@ func formatUptime(secs uint64) string {
|
||||
hours := (secs % 86400) / 3600
|
||||
mins := (secs % 3600) / 60
|
||||
if days > 0 {
|
||||
return fmt.Sprintf("%dd %dh %dm", days, hours, mins)
|
||||
// Only show days and hours when uptime is over 1 day (skip minutes for brevity)
|
||||
return fmt.Sprintf("%dd %dh", days, hours)
|
||||
}
|
||||
if hours > 0 {
|
||||
return fmt.Sprintf("%dh %dm", hours, mins)
|
||||
|
||||
@@ -52,8 +52,8 @@ func TestFormatUptime(t *testing.T) {
|
||||
if got := formatUptime(3600 + 120); got != "1h 2m" {
|
||||
t.Fatalf("expected \"1h 2m\", got %s", got)
|
||||
}
|
||||
if got := formatUptime(86400*2 + 3600*3 + 60*5); got != "2d 3h 5m" {
|
||||
t.Fatalf("expected \"2d 3h 5m\", got %s", got)
|
||||
if got := formatUptime(86400*2 + 3600*3 + 60*5); got != "2d 3h" {
|
||||
t.Fatalf("expected \"2d 3h\", got %s", got)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
181
cmd/status/metrics_test.go
Normal file
181
cmd/status/metrics_test.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewRingBuffer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
capacity int
|
||||
}{
|
||||
{"small buffer", 5},
|
||||
{"standard buffer", 120},
|
||||
{"single element", 1},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rb := NewRingBuffer(tt.capacity)
|
||||
if rb == nil {
|
||||
t.Fatal("NewRingBuffer returned nil")
|
||||
}
|
||||
if rb.cap != tt.capacity {
|
||||
t.Errorf("NewRingBuffer(%d).cap = %d, want %d", tt.capacity, rb.cap, tt.capacity)
|
||||
}
|
||||
if rb.size != 0 {
|
||||
t.Errorf("NewRingBuffer(%d).size = %d, want 0", tt.capacity, rb.size)
|
||||
}
|
||||
if rb.index != 0 {
|
||||
t.Errorf("NewRingBuffer(%d).index = %d, want 0", tt.capacity, rb.index)
|
||||
}
|
||||
if len(rb.data) != tt.capacity {
|
||||
t.Errorf("len(NewRingBuffer(%d).data) = %d, want %d", tt.capacity, len(rb.data), tt.capacity)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRingBuffer_EmptyBuffer(t *testing.T) {
|
||||
rb := NewRingBuffer(5)
|
||||
got := rb.Slice()
|
||||
|
||||
if got != nil {
|
||||
t.Errorf("Slice() on empty buffer = %v, want nil", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRingBuffer_AddWithinCapacity(t *testing.T) {
|
||||
rb := NewRingBuffer(5)
|
||||
|
||||
// Add 3 elements (less than capacity)
|
||||
rb.Add(1.0)
|
||||
rb.Add(2.0)
|
||||
rb.Add(3.0)
|
||||
|
||||
if rb.size != 3 {
|
||||
t.Errorf("size after 3 adds = %d, want 3", rb.size)
|
||||
}
|
||||
|
||||
got := rb.Slice()
|
||||
want := []float64{1.0, 2.0, 3.0}
|
||||
|
||||
if !slices.Equal(got, want) {
|
||||
t.Errorf("Slice() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRingBuffer_ExactCapacity(t *testing.T) {
|
||||
rb := NewRingBuffer(5)
|
||||
|
||||
// Fill exactly to capacity
|
||||
for i := 1; i <= 5; i++ {
|
||||
rb.Add(float64(i))
|
||||
}
|
||||
|
||||
if rb.size != 5 {
|
||||
t.Errorf("size after filling to capacity = %d, want 5", rb.size)
|
||||
}
|
||||
|
||||
got := rb.Slice()
|
||||
want := []float64{1.0, 2.0, 3.0, 4.0, 5.0}
|
||||
|
||||
if !slices.Equal(got, want) {
|
||||
t.Errorf("Slice() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRingBuffer_WrapAround(t *testing.T) {
|
||||
rb := NewRingBuffer(5)
|
||||
|
||||
// Add 7 elements to trigger wrap-around (2 past capacity)
|
||||
// Internal state after: data=[6, 7, 3, 4, 5], index=2, size=5
|
||||
// Oldest element is at index 2 (value 3)
|
||||
for i := 1; i <= 7; i++ {
|
||||
rb.Add(float64(i))
|
||||
}
|
||||
|
||||
if rb.size != 5 {
|
||||
t.Errorf("size after wrap-around = %d, want 5", rb.size)
|
||||
}
|
||||
|
||||
// Verify index points to oldest element position
|
||||
if rb.index != 2 {
|
||||
t.Errorf("index after adding 7 elements to cap-5 buffer = %d, want 2", rb.index)
|
||||
}
|
||||
|
||||
got := rb.Slice()
|
||||
// Should return chronological order: oldest (3) to newest (7)
|
||||
want := []float64{3.0, 4.0, 5.0, 6.0, 7.0}
|
||||
|
||||
if !slices.Equal(got, want) {
|
||||
t.Errorf("Slice() = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRingBuffer_MultipleWrapArounds(t *testing.T) {
|
||||
rb := NewRingBuffer(3)
|
||||
|
||||
// Add 10 elements (wraps multiple times)
|
||||
for i := 1; i <= 10; i++ {
|
||||
rb.Add(float64(i))
|
||||
}
|
||||
|
||||
got := rb.Slice()
|
||||
// Should have the last 3 values: 8, 9, 10
|
||||
want := []float64{8.0, 9.0, 10.0}
|
||||
|
||||
if !slices.Equal(got, want) {
|
||||
t.Errorf("Slice() after 10 adds to cap-3 buffer = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRingBuffer_SingleElementBuffer(t *testing.T) {
|
||||
rb := NewRingBuffer(1)
|
||||
|
||||
rb.Add(5.0)
|
||||
if got := rb.Slice(); !slices.Equal(got, []float64{5.0}) {
|
||||
t.Errorf("Slice() = %v, want [5.0]", got)
|
||||
}
|
||||
|
||||
// Overwrite the single element
|
||||
rb.Add(10.0)
|
||||
if got := rb.Slice(); !slices.Equal(got, []float64{10.0}) {
|
||||
t.Errorf("Slice() after overwrite = %v, want [10.0]", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRingBuffer_SliceReturnsNewSlice(t *testing.T) {
|
||||
rb := NewRingBuffer(3)
|
||||
rb.Add(1.0)
|
||||
rb.Add(2.0)
|
||||
|
||||
slice1 := rb.Slice()
|
||||
slice2 := rb.Slice()
|
||||
|
||||
// Modify slice1 and verify slice2 is unaffected
|
||||
// This ensures Slice() returns a copy, not a reference to internal data
|
||||
slice1[0] = 999.0
|
||||
|
||||
if slice2[0] == 999.0 {
|
||||
t.Error("Slice() should return a new copy, not a reference to internal data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRingBuffer_NegativeAndZeroValues(t *testing.T) {
|
||||
rb := NewRingBuffer(4)
|
||||
|
||||
// Test that negative and zero values are handled correctly
|
||||
rb.Add(-5.0)
|
||||
rb.Add(0.0)
|
||||
rb.Add(0.0)
|
||||
rb.Add(3.5)
|
||||
|
||||
got := rb.Slice()
|
||||
want := []float64{-5.0, 0.0, 0.0, 3.5}
|
||||
|
||||
if !slices.Equal(got, want) {
|
||||
t.Errorf("Slice() with negative/zero values = %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
@@ -131,7 +131,7 @@ type cardData struct {
|
||||
}
|
||||
|
||||
func renderHeader(m MetricsSnapshot, errMsg string, animFrame int, termWidth int, catHidden bool) string {
|
||||
title := titleStyle.Render("Mole Status")
|
||||
title := titleStyle.Render("Status")
|
||||
|
||||
scoreStyle := getScoreStyle(m.HealthScore)
|
||||
scoreText := subtleStyle.Render("Health ") + scoreStyle.Render(fmt.Sprintf("● %d", m.HealthScore))
|
||||
@@ -145,7 +145,7 @@ func renderHeader(m MetricsSnapshot, errMsg string, animFrame int, termWidth int
|
||||
cpuInfo := m.Hardware.CPUModel
|
||||
// Append GPU core count when available.
|
||||
if len(m.GPU) > 0 && m.GPU[0].CoreCount > 0 {
|
||||
cpuInfo += fmt.Sprintf(" (%dGPU)", m.GPU[0].CoreCount)
|
||||
cpuInfo += fmt.Sprintf(", %dGPU", m.GPU[0].CoreCount)
|
||||
}
|
||||
infoParts = append(infoParts, cpuInfo)
|
||||
}
|
||||
@@ -165,6 +165,9 @@ func renderHeader(m MetricsSnapshot, errMsg string, animFrame int, termWidth int
|
||||
if m.Hardware.OSVersion != "" {
|
||||
infoParts = append(infoParts, m.Hardware.OSVersion)
|
||||
}
|
||||
if m.Uptime != "" {
|
||||
infoParts = append(infoParts, subtleStyle.Render("up "+m.Uptime))
|
||||
}
|
||||
|
||||
headerLine := title + " " + scoreText + " " + strings.Join(infoParts, " · ")
|
||||
|
||||
@@ -201,15 +204,6 @@ func getScoreStyle(score int) lipgloss.Style {
|
||||
}
|
||||
}
|
||||
|
||||
func hasSensorData(sensors []SensorReading) bool {
|
||||
for _, s := range sensors {
|
||||
if s.Note == "" && s.Value > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func renderCPUCard(cpu CPUStatus, thermal ThermalStatus) cardData {
|
||||
var lines []string
|
||||
|
||||
@@ -224,7 +218,7 @@ func renderCPUCard(cpu CPUStatus, thermal ThermalStatus) cardData {
|
||||
lines = append(lines, fmt.Sprintf("Total %s %s", usageBar, headerText))
|
||||
|
||||
if cpu.PerCoreEstimated {
|
||||
lines = append(lines, subtleStyle.Render("Per-core data unavailable (using averaged load)"))
|
||||
lines = append(lines, subtleStyle.Render("Per-core data unavailable, using averaged load"))
|
||||
} else if len(cpu.PerCore) > 0 {
|
||||
type coreUsage struct {
|
||||
idx int
|
||||
@@ -245,10 +239,10 @@ func renderCPUCard(cpu CPUStatus, thermal ThermalStatus) cardData {
|
||||
|
||||
// Load line at the end
|
||||
if cpu.PCoreCount > 0 && cpu.ECoreCount > 0 {
|
||||
lines = append(lines, fmt.Sprintf("Load %.2f / %.2f / %.2f (%dP+%dE)",
|
||||
lines = append(lines, fmt.Sprintf("Load %.2f / %.2f / %.2f, %dP+%dE",
|
||||
cpu.Load1, cpu.Load5, cpu.Load15, cpu.PCoreCount, cpu.ECoreCount))
|
||||
} else {
|
||||
lines = append(lines, fmt.Sprintf("Load %.2f / %.2f / %.2f (%d cores)",
|
||||
lines = append(lines, fmt.Sprintf("Load %.2f / %.2f / %.2f, %d cores",
|
||||
cpu.Load1, cpu.Load5, cpu.Load15, cpu.LogicalCPU))
|
||||
}
|
||||
|
||||
@@ -276,7 +270,7 @@ func renderMemoryCard(mem MemoryStatus) cardData {
|
||||
if mem.SwapTotal > 0 {
|
||||
swapPercent = (float64(mem.SwapUsed) / float64(mem.SwapTotal)) * 100.0
|
||||
}
|
||||
swapText := fmt.Sprintf("(%s/%s)", humanBytesCompact(mem.SwapUsed), humanBytesCompact(mem.SwapTotal))
|
||||
swapText := fmt.Sprintf("%s/%s", humanBytesCompact(mem.SwapUsed), humanBytesCompact(mem.SwapTotal))
|
||||
lines = append(lines, fmt.Sprintf("Swap %s %5.1f%% %s", progressBar(swapPercent), swapPercent, swapText))
|
||||
|
||||
lines = append(lines, fmt.Sprintf("Total %s / %s", humanBytes(mem.Used), humanBytes(mem.Total)))
|
||||
@@ -367,7 +361,7 @@ func formatDiskLine(label string, d DiskStatus) string {
|
||||
bar := progressBar(d.UsedPercent)
|
||||
used := humanBytesShort(d.Used)
|
||||
total := humanBytesShort(d.Total)
|
||||
return fmt.Sprintf("%-6s %s %5.1f%% (%s/%s)", label, bar, d.UsedPercent, used, total)
|
||||
return fmt.Sprintf("%-6s %s %5.1f%%, %s/%s", label, bar, d.UsedPercent, used, total)
|
||||
}
|
||||
|
||||
func ioBar(rate float64) string {
|
||||
@@ -411,9 +405,10 @@ func buildCards(m MetricsSnapshot, width int) []cardData {
|
||||
renderProcessCard(m.TopProcesses),
|
||||
renderNetworkCard(m.Network, m.NetworkHistory, m.Proxy, width),
|
||||
}
|
||||
if hasSensorData(m.Sensors) {
|
||||
cards = append(cards, renderSensorsCard(m.Sensors))
|
||||
}
|
||||
// Sensors card disabled - redundant with CPU temp
|
||||
// if hasSensorData(m.Sensors) {
|
||||
// cards = append(cards, renderSensorsCard(m.Sensors))
|
||||
// }
|
||||
return cards
|
||||
}
|
||||
|
||||
@@ -600,20 +595,6 @@ func renderBatteryCard(batts []BatteryStatus, thermal ThermalStatus) cardData {
|
||||
return cardData{icon: iconBattery, title: "Power", lines: lines}
|
||||
}
|
||||
|
||||
func renderSensorsCard(sensors []SensorReading) cardData {
|
||||
var lines []string
|
||||
for _, s := range sensors {
|
||||
if s.Note != "" {
|
||||
continue
|
||||
}
|
||||
lines = append(lines, fmt.Sprintf("%-12s %s", shorten(s.Label, 12), colorizeTemp(s.Value)+s.Unit))
|
||||
}
|
||||
if len(lines) == 0 {
|
||||
lines = append(lines, subtleStyle.Render("No sensors"))
|
||||
}
|
||||
return cardData{icon: iconSensors, title: "Sensors", lines: lines}
|
||||
}
|
||||
|
||||
func renderCard(data cardData, width int, height int) string {
|
||||
titleText := data.icon + " " + data.title
|
||||
lineLen := max(width-lipgloss.Width(titleText)-2, 4)
|
||||
|
||||
12
install.sh
12
install.sh
@@ -165,7 +165,7 @@ resolve_source_dir() {
|
||||
url="https://github.com/tw93/mole/archive/refs/tags/${branch}.tar.gz"
|
||||
fi
|
||||
|
||||
start_line_spinner "Fetching Mole source (${branch})..."
|
||||
start_line_spinner "Fetching Mole source, ${branch}..."
|
||||
if command -v curl > /dev/null 2>&1; then
|
||||
if curl -fsSL --connect-timeout 10 --max-time 60 -o "$tmp/mole.tar.gz" "$url" 2> /dev/null; then
|
||||
if tar -xzf "$tmp/mole.tar.gz" -C "$tmp" 2> /dev/null; then
|
||||
@@ -509,7 +509,7 @@ download_binary() {
|
||||
log_success "Downloaded ${binary_name} binary"
|
||||
else
|
||||
if [[ -t 1 ]]; then stop_line_spinner; fi
|
||||
log_warning "Could not download ${binary_name} binary (v${version}), trying local build"
|
||||
log_warning "Could not download ${binary_name} binary, v${version}, trying local build"
|
||||
if build_binary_from_source "$binary_name" "$target_path"; then
|
||||
return 0
|
||||
fi
|
||||
@@ -659,9 +659,9 @@ print_usage_summary() {
|
||||
local message="Mole ${action} successfully"
|
||||
|
||||
if [[ "$action" == "updated" && -n "$previous_version" && -n "$new_version" && "$previous_version" != "$new_version" ]]; then
|
||||
message+=" (${previous_version} -> ${new_version})"
|
||||
message+=", ${previous_version} -> ${new_version}"
|
||||
elif [[ -n "$new_version" ]]; then
|
||||
message+=" (version ${new_version})"
|
||||
message+=", version ${new_version}"
|
||||
fi
|
||||
|
||||
log_confirm "$message"
|
||||
@@ -763,7 +763,7 @@ perform_update() {
|
||||
fi
|
||||
|
||||
if [[ "$installed_version" == "$target_version" ]]; then
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version ($installed_version)"
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version, $installed_version"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -794,7 +794,7 @@ perform_update() {
|
||||
updated_version="$target_version"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version ($updated_version)"
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version, $updated_version"
|
||||
}
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
@@ -48,7 +48,7 @@ check_touchid_sudo() {
|
||||
fi
|
||||
|
||||
if [[ "$is_supported" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Touch ID ${YELLOW}Not configured for sudo${NC}"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Touch ID ${YELLOW}Not configured for sudo${NC}"
|
||||
export TOUCHID_NOT_CONFIGURED=true
|
||||
fi
|
||||
fi
|
||||
@@ -57,13 +57,12 @@ check_touchid_sudo() {
|
||||
check_rosetta() {
|
||||
# Check whitelist
|
||||
if command -v is_whitelisted > /dev/null && is_whitelisted "check_rosetta"; then return; fi
|
||||
# Check Rosetta 2 (for Apple Silicon Macs)
|
||||
# Check Rosetta 2 (for Apple Silicon Macs) - informational only, not auto-fixed
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
if [[ -f "/Library/Apple/usr/share/rosetta/rosetta" ]]; then
|
||||
echo -e " ${GREEN}✓${NC} Rosetta 2 Intel app translation ready"
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Rosetta 2 ${YELLOW}Intel app support missing${NC}"
|
||||
export ROSETTA_NOT_INSTALLED=true
|
||||
echo -e " ${GRAY}${ICON_EMPTY}${NC} Rosetta 2 ${GRAY}Not installed${NC}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
@@ -79,7 +78,7 @@ check_git_config() {
|
||||
if [[ -n "$git_name" && -n "$git_email" ]]; then
|
||||
echo -e " ${GREEN}✓${NC} Git Global identity configured"
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Git ${YELLOW}User identity not set${NC}"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Git ${YELLOW}User identity not set${NC}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
@@ -119,7 +118,7 @@ check_firewall() {
|
||||
if [[ "$firewall_output" == *"State = 1"* ]] || [[ "$firewall_output" == *"State = 2"* ]]; then
|
||||
echo -e " ${GREEN}✓${NC} Firewall Network protection enabled"
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Firewall ${YELLOW}Network protection disabled${NC}"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Firewall ${YELLOW}Network protection disabled${NC}"
|
||||
export FIREWALL_DISABLED=true
|
||||
fi
|
||||
}
|
||||
@@ -134,7 +133,7 @@ check_gatekeeper() {
|
||||
echo -e " ${GREEN}✓${NC} Gatekeeper App download protection active"
|
||||
unset GATEKEEPER_DISABLED
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Gatekeeper ${YELLOW}App security disabled${NC}"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Gatekeeper ${YELLOW}App security disabled${NC}"
|
||||
export GATEKEEPER_DISABLED=true
|
||||
fi
|
||||
fi
|
||||
@@ -149,7 +148,7 @@ check_sip() {
|
||||
if echo "$sip_status" | grep -q "enabled"; then
|
||||
echo -e " ${GREEN}✓${NC} SIP System integrity protected"
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} SIP ${YELLOW}System protection disabled${NC}"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} SIP ${YELLOW}System protection disabled${NC}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
@@ -276,7 +275,7 @@ check_macos_update() {
|
||||
export MACOS_UPDATE_AVAILABLE="$updates_available"
|
||||
|
||||
if [[ "$updates_available" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} macOS ${YELLOW}Update available${NC}"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} macOS ${YELLOW}Update available${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} macOS System up to date"
|
||||
fi
|
||||
@@ -344,7 +343,7 @@ check_mole_update() {
|
||||
# Compare versions
|
||||
if [[ "$(printf '%s\n' "$current_version" "$latest_version" | sort -V | head -1)" == "$current_version" ]]; then
|
||||
export MOLE_UPDATE_AVAILABLE="true"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Mole ${YELLOW}${latest_version} available${NC} (running ${current_version})"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Mole ${YELLOW}${latest_version} available${NC}, running ${current_version}"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} Mole Latest version ${current_version}"
|
||||
fi
|
||||
@@ -406,9 +405,9 @@ check_disk_space() {
|
||||
export DISK_FREE_GB=$free_num
|
||||
|
||||
if [[ $free_num -lt 20 ]]; then
|
||||
echo -e " ${RED}✗${NC} Disk Space ${RED}${free_gb}GB free${NC} (Critical)"
|
||||
echo -e " ${RED}✗${NC} Disk Space ${RED}${free_gb}GB free${NC}, Critical"
|
||||
elif [[ $free_num -lt 50 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Disk Space ${YELLOW}${free_gb}GB free${NC} (Low)"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Disk Space ${YELLOW}${free_gb}GB free${NC}, Low"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} Disk Space ${free_gb}GB free"
|
||||
fi
|
||||
@@ -452,9 +451,9 @@ check_memory_usage() {
|
||||
((used_percent < 0)) && used_percent=0
|
||||
|
||||
if [[ $used_percent -gt 90 ]]; then
|
||||
echo -e " ${RED}✗${NC} Memory ${RED}${used_percent}% used${NC} (Critical)"
|
||||
echo -e " ${RED}✗${NC} Memory ${RED}${used_percent}% used${NC}, Critical"
|
||||
elif [[ $used_percent -gt 80 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Memory ${YELLOW}${used_percent}% used${NC} (High)"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Memory ${YELLOW}${used_percent}% used${NC}, High"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} Memory ${used_percent}% used"
|
||||
fi
|
||||
@@ -484,7 +483,7 @@ check_login_items() {
|
||||
fi
|
||||
|
||||
if [[ $login_items_count -gt 15 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Login Items ${YELLOW}${login_items_count} apps${NC}"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Login Items ${YELLOW}${login_items_count} apps${NC}"
|
||||
elif [[ $login_items_count -gt 0 ]]; then
|
||||
echo -e " ${GREEN}✓${NC} Login Items ${login_items_count} apps"
|
||||
else
|
||||
@@ -548,9 +547,9 @@ check_cache_size() {
|
||||
local cache_size_int=$(echo "$cache_size_gb" | cut -d'.' -f1)
|
||||
|
||||
if [[ $cache_size_int -gt 10 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Cache Size ${YELLOW}${cache_size_gb}GB${NC} cleanable"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Cache Size ${YELLOW}${cache_size_gb}GB${NC} cleanable"
|
||||
elif [[ $cache_size_int -gt 5 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Cache Size ${YELLOW}${cache_size_gb}GB${NC} cleanable"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Cache Size ${YELLOW}${cache_size_gb}GB${NC} cleanable"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} Cache Size ${cache_size_gb}GB"
|
||||
fi
|
||||
@@ -568,7 +567,7 @@ check_swap_usage() {
|
||||
if [[ "$swap_used" == *"G"* ]]; then
|
||||
local swap_gb=${swap_num%.*}
|
||||
if [[ $swap_gb -gt 2 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Swap Usage ${YELLOW}${swap_used}${NC} (High)"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Swap Usage ${YELLOW}${swap_used}${NC}, High"
|
||||
else
|
||||
echo -e " ${GREEN}✓${NC} Swap Usage ${swap_used}"
|
||||
fi
|
||||
|
||||
@@ -18,7 +18,7 @@ clean_xcode_tools() {
|
||||
safe_clean ~/Library/Developer/Xcode/DerivedData/* "Xcode derived data"
|
||||
safe_clean ~/Library/Developer/Xcode/Archives/* "Xcode archives"
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Xcode is running, skipping DerivedData and Archives cleanup"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode is running, skipping DerivedData and Archives cleanup"
|
||||
fi
|
||||
}
|
||||
# Code editors.
|
||||
@@ -107,7 +107,7 @@ clean_media_players() {
|
||||
fi
|
||||
fi
|
||||
if [[ "$has_offline_music" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Spotify cache protected · offline music detected"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Spotify cache protected · offline music detected"
|
||||
note_activity
|
||||
else
|
||||
safe_clean ~/Library/Caches/com.spotify.client/* "Spotify cache"
|
||||
|
||||
@@ -46,9 +46,9 @@ clean_ds_store_tree() {
|
||||
local size_human
|
||||
size_human=$(bytes_to_human "$total_bytes")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $label ${YELLOW}($file_count files, $size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $label${NC}, ${YELLOW}$file_count files, $size_human dry${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label ${GREEN}($file_count files, $size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label${NC}, ${GREEN}$file_count files, $size_human${NC}"
|
||||
fi
|
||||
local size_kb=$(((total_bytes + 1023) / 1024))
|
||||
((files_cleaned += file_count))
|
||||
@@ -70,7 +70,7 @@ scan_installed_apps() {
|
||||
current_time=$(get_epoch_seconds)
|
||||
local age=$((current_time - cache_mtime))
|
||||
if [[ $age -lt $cache_age_seconds ]]; then
|
||||
debug_log "Using cached app list (age: ${age}s)"
|
||||
debug_log "Using cached app list, age: ${age}s"
|
||||
if [[ -r "$cache_file" ]] && [[ -s "$cache_file" ]]; then
|
||||
if cat "$cache_file" > "$installed_bundles" 2> /dev/null; then
|
||||
return 0
|
||||
@@ -82,7 +82,7 @@ scan_installed_apps() {
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
debug_log "Scanning installed applications (cache expired or missing)"
|
||||
debug_log "Scanning installed applications, cache expired or missing"
|
||||
local -a app_dirs=(
|
||||
"/Applications"
|
||||
"/System/Applications"
|
||||
@@ -246,7 +246,7 @@ is_bundle_orphaned() {
|
||||
clean_orphaned_app_data() {
|
||||
if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then
|
||||
stop_section_spinner
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Library folders"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Skipped: No permission to access Library folders"
|
||||
return 0
|
||||
fi
|
||||
start_section_spinner "Scanning installed apps..."
|
||||
@@ -310,8 +310,210 @@ clean_orphaned_app_data() {
|
||||
stop_section_spinner
|
||||
if [[ $orphaned_count -gt 0 ]]; then
|
||||
local orphaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}')
|
||||
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count items (~${orphaned_mb}MB)"
|
||||
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count items, about ${orphaned_mb}MB"
|
||||
note_activity
|
||||
fi
|
||||
rm -f "$installed_bundles"
|
||||
}
|
||||
|
||||
# Clean orphaned system-level services (LaunchDaemons, LaunchAgents, PrivilegedHelperTools)
|
||||
# These are left behind when apps are uninstalled but their system services remain
|
||||
clean_orphaned_system_services() {
|
||||
# Requires sudo
|
||||
if ! sudo -n true 2> /dev/null; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
start_section_spinner "Scanning orphaned system services..."
|
||||
|
||||
local orphaned_count=0
|
||||
local total_orphaned_kb=0
|
||||
local -a orphaned_files=()
|
||||
|
||||
# Known bundle ID patterns for common apps that leave system services behind
|
||||
# Format: "file_pattern:app_check_command"
|
||||
local -a known_orphan_patterns=(
|
||||
# Sogou Input Method
|
||||
"com.sogou.*:/Library/Input Methods/SogouInput.app"
|
||||
# ClashX
|
||||
"com.west2online.ClashX.*:/Applications/ClashX.app"
|
||||
# ClashMac
|
||||
"com.clashmac.*:/Applications/ClashMac.app"
|
||||
# Nektony App Cleaner
|
||||
"com.nektony.AC*:/Applications/App Cleaner & Uninstaller.app"
|
||||
# i4tools (爱思助手)
|
||||
"cn.i4tools.*:/Applications/i4Tools.app"
|
||||
)
|
||||
|
||||
local mdfind_cache_file=""
|
||||
_system_service_app_exists() {
|
||||
local bundle_id="$1"
|
||||
local app_path="$2"
|
||||
|
||||
[[ -n "$app_path" && -d "$app_path" ]] && return 0
|
||||
|
||||
if [[ -n "$app_path" ]]; then
|
||||
local app_name
|
||||
app_name=$(basename "$app_path")
|
||||
case "$app_path" in
|
||||
/Applications/*)
|
||||
[[ -d "$HOME/Applications/$app_name" ]] && return 0
|
||||
[[ -d "/Applications/Setapp/$app_name" ]] && return 0
|
||||
;;
|
||||
/Library/Input\ Methods/*)
|
||||
[[ -d "$HOME/Library/Input Methods/$app_name" ]] && return 0
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [[ -n "$bundle_id" ]] && [[ "$bundle_id" =~ ^[a-zA-Z0-9._-]+$ ]] && [[ ${#bundle_id} -ge 5 ]]; then
|
||||
if [[ -z "$mdfind_cache_file" ]]; then
|
||||
mdfind_cache_file=$(mktemp "${TMPDIR:-/tmp}/mole_mdfind_cache.XXXXXX")
|
||||
register_temp_file "$mdfind_cache_file"
|
||||
fi
|
||||
|
||||
if grep -Fxq "FOUND:$bundle_id" "$mdfind_cache_file" 2> /dev/null; then
|
||||
return 0
|
||||
fi
|
||||
if ! grep -Fxq "NOTFOUND:$bundle_id" "$mdfind_cache_file" 2> /dev/null; then
|
||||
local app_found
|
||||
app_found=$(run_with_timeout 2 mdfind "kMDItemCFBundleIdentifier == '$bundle_id'" 2> /dev/null | head -1 || echo "")
|
||||
if [[ -n "$app_found" ]]; then
|
||||
echo "FOUND:$bundle_id" >> "$mdfind_cache_file"
|
||||
return 0
|
||||
fi
|
||||
echo "NOTFOUND:$bundle_id" >> "$mdfind_cache_file"
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
# Scan system LaunchDaemons
|
||||
if [[ -d /Library/LaunchDaemons ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
local filename
|
||||
filename=$(basename "$plist")
|
||||
|
||||
# Skip Apple system files
|
||||
[[ "$filename" == com.apple.* ]] && continue
|
||||
|
||||
# Extract bundle ID from filename (remove .plist extension)
|
||||
local bundle_id="${filename%.plist}"
|
||||
|
||||
# Check against known orphan patterns
|
||||
for pattern_entry in "${known_orphan_patterns[@]}"; do
|
||||
local file_pattern="${pattern_entry%%:*}"
|
||||
local app_path="${pattern_entry#*:}"
|
||||
|
||||
# shellcheck disable=SC2053
|
||||
if [[ "$bundle_id" == $file_pattern ]] && [[ ! -d "$app_path" ]]; then
|
||||
if _system_service_app_exists "$bundle_id" "$app_path"; then
|
||||
continue
|
||||
fi
|
||||
orphaned_files+=("$plist")
|
||||
local size_kb
|
||||
size_kb=$(sudo du -sk "$plist" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||
((total_orphaned_kb += size_kb))
|
||||
((orphaned_count++))
|
||||
break
|
||||
fi
|
||||
done
|
||||
done < <(sudo find /Library/LaunchDaemons -maxdepth 1 -name "*.plist" -print0 2> /dev/null)
|
||||
fi
|
||||
|
||||
# Scan system LaunchAgents
|
||||
if [[ -d /Library/LaunchAgents ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
local filename
|
||||
filename=$(basename "$plist")
|
||||
|
||||
# Skip Apple system files
|
||||
[[ "$filename" == com.apple.* ]] && continue
|
||||
|
||||
local bundle_id="${filename%.plist}"
|
||||
|
||||
for pattern_entry in "${known_orphan_patterns[@]}"; do
|
||||
local file_pattern="${pattern_entry%%:*}"
|
||||
local app_path="${pattern_entry#*:}"
|
||||
|
||||
# shellcheck disable=SC2053
|
||||
if [[ "$bundle_id" == $file_pattern ]] && [[ ! -d "$app_path" ]]; then
|
||||
if _system_service_app_exists "$bundle_id" "$app_path"; then
|
||||
continue
|
||||
fi
|
||||
orphaned_files+=("$plist")
|
||||
local size_kb
|
||||
size_kb=$(sudo du -sk "$plist" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||
((total_orphaned_kb += size_kb))
|
||||
((orphaned_count++))
|
||||
break
|
||||
fi
|
||||
done
|
||||
done < <(sudo find /Library/LaunchAgents -maxdepth 1 -name "*.plist" -print0 2> /dev/null)
|
||||
fi
|
||||
|
||||
# Scan PrivilegedHelperTools
|
||||
if [[ -d /Library/PrivilegedHelperTools ]]; then
|
||||
while IFS= read -r -d '' helper; do
|
||||
local filename
|
||||
filename=$(basename "$helper")
|
||||
local bundle_id="$filename"
|
||||
|
||||
# Skip Apple system files
|
||||
[[ "$filename" == com.apple.* ]] && continue
|
||||
|
||||
for pattern_entry in "${known_orphan_patterns[@]}"; do
|
||||
local file_pattern="${pattern_entry%%:*}"
|
||||
local app_path="${pattern_entry#*:}"
|
||||
|
||||
# shellcheck disable=SC2053
|
||||
if [[ "$filename" == $file_pattern ]] && [[ ! -d "$app_path" ]]; then
|
||||
if _system_service_app_exists "$bundle_id" "$app_path"; then
|
||||
continue
|
||||
fi
|
||||
orphaned_files+=("$helper")
|
||||
local size_kb
|
||||
size_kb=$(sudo du -sk "$helper" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||
((total_orphaned_kb += size_kb))
|
||||
((orphaned_count++))
|
||||
break
|
||||
fi
|
||||
done
|
||||
done < <(sudo find /Library/PrivilegedHelperTools -maxdepth 1 -type f -print0 2> /dev/null)
|
||||
fi
|
||||
|
||||
stop_section_spinner
|
||||
|
||||
# Report and clean
|
||||
if [[ $orphaned_count -gt 0 ]]; then
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Found $orphaned_count orphaned system services"
|
||||
|
||||
for orphan_file in "${orphaned_files[@]}"; do
|
||||
local filename
|
||||
filename=$(basename "$orphan_file")
|
||||
|
||||
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
|
||||
debug_log "[DRY RUN] Would remove orphaned service: $orphan_file"
|
||||
else
|
||||
# Unload if it's a LaunchDaemon/LaunchAgent
|
||||
if [[ "$orphan_file" == *.plist ]]; then
|
||||
sudo launchctl unload "$orphan_file" 2> /dev/null || true
|
||||
fi
|
||||
if safe_sudo_remove "$orphan_file"; then
|
||||
debug_log "Removed orphaned service: $orphan_file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
local orphaned_kb_display
|
||||
if [[ $total_orphaned_kb -gt 1024 ]]; then
|
||||
orphaned_kb_display=$(echo "$total_orphaned_kb" | awk '{printf "%.1fMB", $1/1024}')
|
||||
else
|
||||
orphaned_kb_display="${total_orphaned_kb}KB"
|
||||
fi
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count orphaned services, about $orphaned_kb_display"
|
||||
note_activity
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
@@ -86,13 +86,13 @@ clean_homebrew() {
|
||||
freed_space=$(printf '%s\n' "$brew_output" | grep -o "[0-9.]*[KMGT]B freed" 2> /dev/null | tail -1 || true)
|
||||
if [[ $removed_count -gt 0 ]] || [[ -n "$freed_space" ]]; then
|
||||
if [[ -n "$freed_space" ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup ${GREEN}($freed_space)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup${NC}, ${GREEN}$freed_space${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup (${removed_count} items)"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup, ${removed_count} items"
|
||||
fi
|
||||
fi
|
||||
elif [[ $brew_exit -eq 124 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Homebrew cleanup timed out · run ${GRAY}brew cleanup${NC} manually"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Homebrew cleanup timed out · run ${GRAY}brew cleanup${NC} manually"
|
||||
fi
|
||||
# Process autoremove output - only show if packages were removed
|
||||
# Only surface autoremove output when packages were removed.
|
||||
@@ -102,10 +102,10 @@ clean_homebrew() {
|
||||
local removed_packages
|
||||
removed_packages=$(printf '%s\n' "$autoremove_output" | grep -c "^Uninstalling" 2> /dev/null || true)
|
||||
if [[ $removed_packages -gt 0 ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed orphaned dependencies (${removed_packages} packages)"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed orphaned dependencies, ${removed_packages} packages"
|
||||
fi
|
||||
elif [[ $autoremove_exit -eq 124 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Autoremove timed out · run ${GRAY}brew autoremove${NC} manually"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Autoremove timed out · run ${GRAY}brew autoremove${NC} manually"
|
||||
fi
|
||||
# Update cache timestamp on successful completion or when cleanup was intelligently skipped
|
||||
# This prevents repeated cache size checks within the 7-day window
|
||||
|
||||
@@ -22,7 +22,7 @@ check_tcc_permissions() {
|
||||
echo ""
|
||||
echo -e "${BLUE}First-time setup${NC}"
|
||||
echo -e "${GRAY}macOS will request permissions to access Library folders.${NC}"
|
||||
echo -e "${GRAY}You may see ${GREEN}${#tcc_dirs[@]} permission dialogs${NC}${GRAY} - please approve them all.${NC}"
|
||||
echo -e "${GRAY}You may see ${GREEN}${#tcc_dirs[@]} permission dialogs${NC}${GRAY}, please approve them all.${NC}"
|
||||
echo ""
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to continue: "
|
||||
read -r
|
||||
@@ -75,12 +75,12 @@ clean_service_worker_cache() {
|
||||
local cleaned_mb=$((cleaned_size / 1024))
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if [[ $protected_count -gt 0 ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $browser_name Service Worker (${cleaned_mb}MB, ${protected_count} protected)"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $browser_name Service Worker, ${cleaned_mb}MB, ${protected_count} protected"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $browser_name Service Worker (${cleaned_mb}MB)"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $browser_name Service Worker, ${cleaned_mb}MB"
|
||||
fi
|
||||
else
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $browser_name Service Worker (would clean ${cleaned_mb}MB, ${protected_count} protected)"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $browser_name Service Worker, would clean ${cleaned_mb}MB, ${protected_count} protected"
|
||||
fi
|
||||
note_activity
|
||||
if [[ "$spinner_was_running" == "true" ]]; then
|
||||
|
||||
137
lib/clean/dev.sh
137
lib/clean/dev.sh
@@ -45,7 +45,7 @@ clean_dev_npm() {
|
||||
# Python/pip ecosystem caches.
|
||||
clean_dev_python() {
|
||||
if command -v pip3 > /dev/null 2>&1; then
|
||||
clean_tool_cache "pip cache" bash -c 'pip3 cache purge >/dev/null 2>&1 || true'
|
||||
clean_tool_cache "pip cache" bash -c 'pip3 cache purge > /dev/null 2>&1 || true'
|
||||
note_activity
|
||||
fi
|
||||
safe_clean ~/.pyenv/cache/* "pyenv cache"
|
||||
@@ -65,7 +65,7 @@ clean_dev_python() {
|
||||
# Go build/module caches.
|
||||
clean_dev_go() {
|
||||
if command -v go > /dev/null 2>&1; then
|
||||
clean_tool_cache "Go cache" bash -c 'go clean -modcache >/dev/null 2>&1 || true; go clean -cache >/dev/null 2>&1 || true'
|
||||
clean_tool_cache "Go cache" bash -c 'go clean -modcache > /dev/null 2>&1 || true; go clean -cache > /dev/null 2>&1 || true'
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
@@ -77,12 +77,12 @@ clean_dev_rust() {
|
||||
}
|
||||
|
||||
# Helper: Check for multiple versions in a directory.
|
||||
# Args: $1=directory, $2=tool_name, $3+=additional_lines
|
||||
# Args: $1=directory, $2=tool_name, $3=list_command, $4=remove_command
|
||||
check_multiple_versions() {
|
||||
local dir="$1"
|
||||
local tool_name="$2"
|
||||
shift 2
|
||||
local -a additional_lines=("$@")
|
||||
local list_cmd="${3:-}"
|
||||
local remove_cmd="${4:-}"
|
||||
|
||||
if [[ ! -d "$dir" ]]; then
|
||||
return 0
|
||||
@@ -93,10 +93,11 @@ check_multiple_versions() {
|
||||
|
||||
if [[ "$count" -gt 1 ]]; then
|
||||
note_activity
|
||||
echo -e " Found ${GREEN}${count}${NC} ${tool_name}"
|
||||
for line in "${additional_lines[@]}"; do
|
||||
echo -e " $line"
|
||||
done
|
||||
local hint=""
|
||||
if [[ -n "$list_cmd" ]]; then
|
||||
hint=" · ${GRAY}${list_cmd}${NC}"
|
||||
fi
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} ${tool_name}: ${count} found${hint}"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -107,8 +108,7 @@ check_rust_toolchains() {
|
||||
check_multiple_versions \
|
||||
"$HOME/.rustup/toolchains" \
|
||||
"Rust toolchains" \
|
||||
"You can list them with: ${GRAY}rustup toolchain list${NC}" \
|
||||
"Remove unused with: ${GRAY}rustup toolchain uninstall <name>${NC}"
|
||||
"rustup toolchain list"
|
||||
}
|
||||
# Docker caches (guarded by daemon check).
|
||||
clean_dev_docker() {
|
||||
@@ -170,8 +170,7 @@ check_android_ndk() {
|
||||
check_multiple_versions \
|
||||
"$HOME/Library/Android/sdk/ndk" \
|
||||
"Android NDK versions" \
|
||||
"Manage in: ${GRAY}Android Studio → SDK Manager${NC}" \
|
||||
"Or manually at: ${GRAY}\$HOME/Library/Android/sdk/ndk${NC}"
|
||||
"Android Studio → SDK Manager"
|
||||
}
|
||||
|
||||
clean_dev_mobile() {
|
||||
@@ -200,8 +199,8 @@ clean_dev_mobile() {
|
||||
# Simulator runtime caches.
|
||||
safe_clean ~/Library/Developer/CoreSimulator/Profiles/Runtimes/*/Contents/Resources/RuntimeRoot/System/Library/Caches/* "Simulator runtime cache"
|
||||
safe_clean ~/Library/Caches/Google/AndroidStudio*/* "Android Studio cache"
|
||||
safe_clean ~/Library/Caches/CocoaPods/* "CocoaPods cache"
|
||||
safe_clean ~/.cache/flutter/* "Flutter cache"
|
||||
# safe_clean ~/Library/Caches/CocoaPods/* "CocoaPods cache"
|
||||
# safe_clean ~/.cache/flutter/* "Flutter cache"
|
||||
safe_clean ~/.android/build-cache/* "Android build cache"
|
||||
safe_clean ~/.android/cache/* "Android SDK cache"
|
||||
safe_clean ~/Library/Developer/Xcode/UserData/IB\ Support/* "Xcode Interface Builder cache"
|
||||
@@ -214,12 +213,117 @@ clean_dev_jvm() {
|
||||
safe_clean ~/.sbt/* "SBT cache"
|
||||
safe_clean ~/.ivy2/cache/* "Ivy cache"
|
||||
}
|
||||
# JetBrains Toolbox old IDE versions (keep current + recent backup).
|
||||
clean_dev_jetbrains_toolbox() {
|
||||
local toolbox_root="$HOME/Library/Application Support/JetBrains/Toolbox/apps"
|
||||
[[ -d "$toolbox_root" ]] || return 0
|
||||
|
||||
local keep_previous="${MOLE_JETBRAINS_TOOLBOX_KEEP:-1}"
|
||||
[[ "$keep_previous" =~ ^[0-9]+$ ]] || keep_previous=1
|
||||
|
||||
# Save and filter whitelist patterns for toolbox path
|
||||
local whitelist_overridden="false"
|
||||
local -a original_whitelist=()
|
||||
if [[ ${#WHITELIST_PATTERNS[@]} -gt 0 ]]; then
|
||||
original_whitelist=("${WHITELIST_PATTERNS[@]}")
|
||||
local -a filtered_whitelist=()
|
||||
local pattern
|
||||
for pattern in "${WHITELIST_PATTERNS[@]}"; do
|
||||
[[ "$toolbox_root" == "$pattern" || "$pattern" == "$toolbox_root"* ]] && continue
|
||||
filtered_whitelist+=("$pattern")
|
||||
done
|
||||
WHITELIST_PATTERNS=("${filtered_whitelist[@]+${filtered_whitelist[@]}}")
|
||||
whitelist_overridden="true"
|
||||
fi
|
||||
|
||||
# Helper to restore whitelist on exit
|
||||
_restore_whitelist() {
|
||||
[[ "$whitelist_overridden" == "true" ]] && WHITELIST_PATTERNS=("${original_whitelist[@]}")
|
||||
return 0
|
||||
}
|
||||
|
||||
local -a product_dirs=()
|
||||
while IFS= read -r -d '' product_dir; do
|
||||
product_dirs+=("$product_dir")
|
||||
done < <(command find "$toolbox_root" -mindepth 1 -maxdepth 1 -type d -print0 2> /dev/null)
|
||||
|
||||
if [[ ${#product_dirs[@]} -eq 0 ]]; then
|
||||
_restore_whitelist
|
||||
return 0
|
||||
fi
|
||||
|
||||
local product_dir
|
||||
for product_dir in "${product_dirs[@]}"; do
|
||||
while IFS= read -r -d '' channel_dir; do
|
||||
local current_link=""
|
||||
local current_real=""
|
||||
if [[ -L "$channel_dir/current" ]]; then
|
||||
current_link=$(readlink "$channel_dir/current" 2> /dev/null || true)
|
||||
if [[ -n "$current_link" ]]; then
|
||||
if [[ "$current_link" == /* ]]; then
|
||||
current_real="$current_link"
|
||||
else
|
||||
current_real="$channel_dir/$current_link"
|
||||
fi
|
||||
fi
|
||||
elif [[ -d "$channel_dir/current" ]]; then
|
||||
current_real="$channel_dir/current"
|
||||
fi
|
||||
|
||||
local -a version_dirs=()
|
||||
while IFS= read -r -d '' version_dir; do
|
||||
local name
|
||||
name=$(basename "$version_dir")
|
||||
|
||||
[[ "$name" == "current" ]] && continue
|
||||
[[ "$name" == .* ]] && continue
|
||||
[[ "$name" == "plugins" || "$name" == "plugins-lib" || "$name" == "plugins-libs" ]] && continue
|
||||
[[ -n "$current_real" && "$version_dir" == "$current_real" ]] && continue
|
||||
[[ ! "$name" =~ ^[0-9] ]] && continue
|
||||
|
||||
version_dirs+=("$version_dir")
|
||||
done < <(command find "$channel_dir" -mindepth 1 -maxdepth 1 -type d -print0 2> /dev/null)
|
||||
|
||||
[[ ${#version_dirs[@]} -eq 0 ]] && continue
|
||||
|
||||
local -a sorted_dirs=()
|
||||
while IFS= read -r line; do
|
||||
local dir_path="${line#* }"
|
||||
sorted_dirs+=("$dir_path")
|
||||
done < <(
|
||||
for version_dir in "${version_dirs[@]}"; do
|
||||
local mtime
|
||||
mtime=$(stat -f%m "$version_dir" 2> /dev/null || echo "0")
|
||||
printf '%s %s\n' "$mtime" "$version_dir"
|
||||
done | sort -rn
|
||||
)
|
||||
|
||||
if [[ ${#sorted_dirs[@]} -le "$keep_previous" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local idx=0
|
||||
local dir_path
|
||||
for dir_path in "${sorted_dirs[@]}"; do
|
||||
if [[ $idx -lt $keep_previous ]]; then
|
||||
((idx++))
|
||||
continue
|
||||
fi
|
||||
safe_clean "$dir_path" "JetBrains Toolbox old IDE version"
|
||||
note_activity
|
||||
((idx++))
|
||||
done
|
||||
done < <(command find "$product_dir" -mindepth 1 -maxdepth 1 -type d -name "ch-*" -print0 2> /dev/null)
|
||||
done
|
||||
|
||||
_restore_whitelist
|
||||
}
|
||||
# Other language tool caches.
|
||||
clean_dev_other_langs() {
|
||||
safe_clean ~/.bundle/cache/* "Ruby Bundler cache"
|
||||
safe_clean ~/.composer/cache/* "PHP Composer cache"
|
||||
safe_clean ~/.nuget/packages/* "NuGet packages cache"
|
||||
safe_clean ~/.pub-cache/* "Dart Pub cache"
|
||||
# safe_clean ~/.pub-cache/* "Dart Pub cache"
|
||||
safe_clean ~/.cache/bazel/* "Bazel cache"
|
||||
safe_clean ~/.cache/zig/* "Zig cache"
|
||||
safe_clean ~/Library/Caches/deno/* "Deno cache"
|
||||
@@ -343,6 +447,7 @@ clean_developer_tools() {
|
||||
clean_project_caches
|
||||
clean_dev_mobile
|
||||
clean_dev_jvm
|
||||
clean_dev_jetbrains_toolbox
|
||||
clean_dev_other_langs
|
||||
clean_dev_cicd
|
||||
clean_dev_database
|
||||
|
||||
@@ -606,7 +606,7 @@ select_purge_categories() {
|
||||
fi
|
||||
|
||||
printf "%s\n" "$clear_line"
|
||||
printf "%s${PURPLE_BOLD}Select Categories to Clean${NC}%s ${GRAY}- ${selected_gb}GB ($selected_count selected)${NC}\n" "$clear_line" "$scroll_indicator"
|
||||
printf "%s${PURPLE_BOLD}Select Categories to Clean${NC}%s ${GRAY}, ${selected_gb}GB, ${selected_count} selected${NC}\n" "$clear_line" "$scroll_indicator"
|
||||
printf "%s\n" "$clear_line"
|
||||
|
||||
IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}"
|
||||
@@ -1135,7 +1135,7 @@ clean_project_artifacts() {
|
||||
fi
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $project_path - $artifact_type ${GREEN}($size_human)${NC}"
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} $project_path, $artifact_type${NC}, ${GREEN}$size_human${NC}"
|
||||
fi
|
||||
done
|
||||
# Update count
|
||||
|
||||
@@ -19,40 +19,38 @@ clean_deep_system() {
|
||||
safe_sudo_find_delete "/private/var/log" "*.gz" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
log_success "System logs"
|
||||
if [[ -d "/Library/Updates" && ! -L "/Library/Updates" ]]; then
|
||||
if ! is_sip_enabled; then
|
||||
local updates_cleaned=0
|
||||
while IFS= read -r -d '' item; do
|
||||
if [[ -z "$item" ]] || [[ ! "$item" =~ ^/Library/Updates/[^/]+$ ]]; then
|
||||
debug_log "Skipping malformed path: $item"
|
||||
continue
|
||||
fi
|
||||
local item_flags
|
||||
item_flags=$($STAT_BSD -f%Sf "$item" 2> /dev/null || echo "")
|
||||
if [[ "$item_flags" == *"restricted"* ]]; then
|
||||
continue
|
||||
fi
|
||||
if safe_sudo_remove "$item"; then
|
||||
((updates_cleaned++))
|
||||
fi
|
||||
done < <(find /Library/Updates -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
|
||||
[[ $updates_cleaned -gt 0 ]] && log_success "System library updates"
|
||||
fi
|
||||
local updates_cleaned=0
|
||||
while IFS= read -r -d '' item; do
|
||||
if [[ -z "$item" ]] || [[ ! "$item" =~ ^/Library/Updates/[^/]+$ ]]; then
|
||||
debug_log "Skipping malformed path: $item"
|
||||
continue
|
||||
fi
|
||||
local item_flags
|
||||
item_flags=$($STAT_BSD -f%Sf "$item" 2> /dev/null || echo "")
|
||||
if [[ "$item_flags" == *"restricted"* ]]; then
|
||||
continue
|
||||
fi
|
||||
if safe_sudo_remove "$item"; then
|
||||
((updates_cleaned++))
|
||||
fi
|
||||
done < <(find /Library/Updates -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
|
||||
[[ $updates_cleaned -gt 0 ]] && log_success "System library updates"
|
||||
fi
|
||||
if [[ -d "/macOS Install Data" ]]; then
|
||||
local mtime=$(get_file_mtime "/macOS Install Data")
|
||||
local age_days=$((($(get_epoch_seconds) - mtime) / 86400))
|
||||
debug_log "Found macOS Install Data (age: ${age_days} days)"
|
||||
debug_log "Found macOS Install Data, age ${age_days} days"
|
||||
if [[ $age_days -ge 30 ]]; then
|
||||
local size_kb=$(get_path_size_kb "/macOS Install Data")
|
||||
if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
debug_log "Cleaning macOS Install Data: $size_human (${age_days} days old)"
|
||||
debug_log "Cleaning macOS Install Data: $size_human, ${age_days} days old"
|
||||
if safe_sudo_remove "/macOS Install Data"; then
|
||||
log_success "macOS Install Data ($size_human)"
|
||||
log_success "macOS Install Data, $size_human"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
debug_log "Keeping macOS Install Data (only ${age_days} days old, needs 30+)"
|
||||
debug_log "Keeping macOS Install Data, only ${age_days} days old, needs 30+"
|
||||
fi
|
||||
fi
|
||||
start_section_spinner "Scanning system caches..."
|
||||
@@ -72,27 +70,31 @@ clean_deep_system() {
|
||||
local current_time
|
||||
current_time=$(get_epoch_seconds)
|
||||
if [[ $((current_time - last_update_time)) -ge $update_interval ]]; then
|
||||
start_section_spinner "Scanning system caches... ($found_count found)"
|
||||
start_section_spinner "Scanning system caches... $found_count found"
|
||||
last_update_time=$current_time
|
||||
fi
|
||||
fi
|
||||
done < <(run_with_timeout 5 command find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true)
|
||||
stop_section_spinner
|
||||
[[ $code_sign_cleaned -gt 0 ]] && log_success "Browser code signature caches ($code_sign_cleaned items)"
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
log_success "System diagnostic logs"
|
||||
safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
|
||||
log_success "Power logs"
|
||||
safe_sudo_find_delete "/private/var/db/reportmemoryexception/MemoryLimitViolations" "*" "30" "f" || true
|
||||
log_success "Memory exception reports"
|
||||
start_section_spinner "Cleaning diagnostic trace logs..."
|
||||
local diag_logs_cleaned=0
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*.tracev3" "30" "f" && diag_logs_cleaned=1 || true
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*.tracev3" "30" "f" && diag_logs_cleaned=1 || true
|
||||
[[ $code_sign_cleaned -gt 0 ]] && log_success "Browser code signature caches, $code_sign_cleaned items"
|
||||
|
||||
start_section_spinner "Cleaning system diagnostic logs..."
|
||||
local diag_cleaned=0
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*" "$MOLE_LOG_AGE_DAYS" "f" && diag_cleaned=1 || true
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*" "$MOLE_LOG_AGE_DAYS" "f" && diag_cleaned=1 || true
|
||||
safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" && diag_cleaned=1 || true
|
||||
safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" && diag_cleaned=1 || true
|
||||
safe_sudo_find_delete "/private/var/db/reportmemoryexception/MemoryLimitViolations" "*" "30" "f" && diag_cleaned=1 || true
|
||||
stop_section_spinner
|
||||
[[ $diag_logs_cleaned -eq 1 ]] && log_success "System diagnostic trace logs"
|
||||
|
||||
[[ $diag_cleaned -eq 1 ]] && log_success "System diagnostic logs"
|
||||
|
||||
start_section_spinner "Cleaning diagnostic trace logs..."
|
||||
local trace_cleaned=0
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*.tracev3" "30" "f" && trace_cleaned=1 || true
|
||||
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*.tracev3" "30" "f" && trace_cleaned=1 || true
|
||||
stop_section_spinner
|
||||
[[ $trace_cleaned -eq 1 ]] && log_success "System diagnostic trace logs"
|
||||
}
|
||||
# Incomplete Time Machine backups.
|
||||
clean_time_machine_failed_backups() {
|
||||
@@ -176,7 +178,7 @@ clean_time_machine_failed_backups() {
|
||||
local backup_name=$(basename "$inprogress_file")
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete backup: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete backup: $backup_name${NC}, ${YELLOW}$size_human dry${NC}"
|
||||
((tm_cleaned++))
|
||||
note_activity
|
||||
continue
|
||||
@@ -186,7 +188,7 @@ clean_time_machine_failed_backups() {
|
||||
continue
|
||||
fi
|
||||
if tmutil delete "$inprogress_file" 2> /dev/null; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete backup: $backup_name ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete backup: $backup_name${NC}, ${GREEN}$size_human${NC}"
|
||||
((tm_cleaned++))
|
||||
((files_cleaned++))
|
||||
((total_size_cleaned += size_kb))
|
||||
@@ -222,7 +224,7 @@ clean_time_machine_failed_backups() {
|
||||
local backup_name=$(basename "$inprogress_file")
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete APFS backup in $bundle_name: $backup_name${NC}, ${YELLOW}$size_human dry${NC}"
|
||||
((tm_cleaned++))
|
||||
note_activity
|
||||
continue
|
||||
@@ -231,7 +233,7 @@ clean_time_machine_failed_backups() {
|
||||
continue
|
||||
fi
|
||||
if tmutil delete "$inprogress_file" 2> /dev/null; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete APFS backup in $bundle_name: $backup_name${NC}, ${GREEN}$size_human${NC}"
|
||||
((tm_cleaned++))
|
||||
((files_cleaned++))
|
||||
((total_size_cleaned += size_kb))
|
||||
@@ -267,7 +269,7 @@ tm_is_running() {
|
||||
grep -qE '(^|[[:space:]])("Running"|Running)[[:space:]]*=[[:space:]]*1([[:space:]]*;|$)' <<< "$st"
|
||||
}
|
||||
|
||||
# Local APFS snapshots (keep the most recent).
|
||||
# Local APFS snapshots (report only).
|
||||
clean_local_snapshots() {
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
return 0
|
||||
@@ -277,93 +279,25 @@ clean_local_snapshots() {
|
||||
tm_is_running || rc_running=$?
|
||||
|
||||
if [[ $rc_running -eq 2 ]]; then
|
||||
echo -e " ${YELLOW}!${NC} Could not determine Time Machine status; skipping snapshot cleanup"
|
||||
echo -e " ${YELLOW}!${NC} Could not determine Time Machine status; skipping snapshot check"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ $rc_running -eq 0 ]]; then
|
||||
echo -e " ${YELLOW}!${NC} Time Machine is active; skipping snapshot cleanup"
|
||||
echo -e " ${YELLOW}!${NC} Time Machine is active; skipping snapshot check"
|
||||
return 0
|
||||
fi
|
||||
|
||||
start_section_spinner "Checking local snapshots..."
|
||||
local snapshot_list
|
||||
snapshot_list=$(tmutil listlocalsnapshots / 2> /dev/null)
|
||||
snapshot_list=$(run_with_timeout 3 tmutil listlocalsnapshots / 2> /dev/null || true)
|
||||
stop_section_spinner
|
||||
[[ -z "$snapshot_list" ]] && return 0
|
||||
local cleaned_count=0
|
||||
local total_cleaned_size=0 # Estimation not possible without thin
|
||||
local newest_ts=0
|
||||
local newest_name=""
|
||||
local -a snapshots=()
|
||||
while IFS= read -r line; do
|
||||
if [[ "$line" =~ com\.apple\.TimeMachine\.([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{6}) ]]; then
|
||||
local snap_name="${BASH_REMATCH[0]}"
|
||||
snapshots+=("$snap_name")
|
||||
local date_str="${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]} ${BASH_REMATCH[4]:0:2}:${BASH_REMATCH[4]:2:2}:${BASH_REMATCH[4]:4:2}"
|
||||
local snap_ts=$(date -j -f "%Y-%m-%d %H:%M:%S" "$date_str" "+%s" 2> /dev/null || echo "0")
|
||||
[[ "$snap_ts" == "0" ]] && continue
|
||||
if [[ "$snap_ts" -gt "$newest_ts" ]]; then
|
||||
newest_ts="$snap_ts"
|
||||
newest_name="$snap_name"
|
||||
fi
|
||||
fi
|
||||
done <<< "$snapshot_list"
|
||||
|
||||
[[ ${#snapshots[@]} -eq 0 ]] && return 0
|
||||
[[ -z "$newest_name" ]] && return 0
|
||||
|
||||
local deletable_count=$((${#snapshots[@]} - 1))
|
||||
[[ $deletable_count -le 0 ]] && return 0
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if [[ ! -t 0 ]]; then
|
||||
echo -e " ${YELLOW}!${NC} ${#snapshots[@]} local snapshot(s) found, skipping non-interactive mode"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} ${GRAY}Tip: Snapshots may cause Disk Utility to show different 'Available' values${NC}"
|
||||
return 0
|
||||
fi
|
||||
echo -e " ${YELLOW}!${NC} Time Machine local snapshots found"
|
||||
echo -e " ${GRAY}macOS can recreate them if needed.${NC}"
|
||||
echo -e " ${GRAY}The most recent snapshot will be kept.${NC}"
|
||||
echo -ne " ${PURPLE}${ICON_ARROW}${NC} Remove all local snapshots except the most recent one? ${GREEN}Enter${NC} continue, ${GRAY}Space${NC} skip: "
|
||||
local choice
|
||||
if type read_key > /dev/null 2>&1; then
|
||||
choice=$(read_key)
|
||||
else
|
||||
IFS= read -r -s -n 1 choice || choice=""
|
||||
if [[ -z "$choice" || "$choice" == $'\n' || "$choice" == $'\r' ]]; then
|
||||
choice="ENTER"
|
||||
fi
|
||||
fi
|
||||
if [[ "$choice" == "ENTER" ]]; then
|
||||
printf "\r\033[K" # Clear the prompt line
|
||||
else
|
||||
echo -e " ${GRAY}Skipped${NC}"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
local snap_name
|
||||
for snap_name in "${snapshots[@]}"; do
|
||||
if [[ "$snap_name" =~ com\.apple\.TimeMachine\.([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{6}) ]]; then
|
||||
if [[ "${BASH_REMATCH[0]}" != "$newest_name" ]]; then
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Local snapshot: $snap_name ${YELLOW}dry-run${NC}"
|
||||
((cleaned_count++))
|
||||
note_activity
|
||||
else
|
||||
if sudo tmutil deletelocalsnapshots "${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]}-${BASH_REMATCH[4]}" > /dev/null 2>&1; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed snapshot: $snap_name"
|
||||
((cleaned_count++))
|
||||
note_activity
|
||||
else
|
||||
echo -e " ${YELLOW}!${NC} Failed to remove: $snap_name"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ $cleaned_count -gt 0 && "$DRY_RUN" != "true" ]]; then
|
||||
log_success "Cleaned $cleaned_count local snapshots, kept latest"
|
||||
local snapshot_count
|
||||
snapshot_count=$(echo "$snapshot_list" | { grep -Eo 'com\.apple\.TimeMachine\.[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{6}' || true; } | wc -l | awk '{print $1}')
|
||||
if [[ "$snapshot_count" =~ ^[0-9]+$ && "$snapshot_count" -gt 0 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Time Machine local snapshots: ${GREEN}${snapshot_count}${NC}${GRAY}, Review: tmutil listlocalsnapshots /${NC}"
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -7,11 +7,24 @@ clean_user_essentials() {
|
||||
stop_section_spinner
|
||||
|
||||
safe_clean ~/Library/Logs/* "User app logs"
|
||||
if is_path_whitelisted "$HOME/.Trash"; then
|
||||
note_activity
|
||||
echo -e " ${GREEN}${ICON_EMPTY}${NC} Trash · whitelist protected"
|
||||
else
|
||||
safe_clean ~/.Trash/* "Trash"
|
||||
|
||||
if ! is_path_whitelisted "$HOME/.Trash"; then
|
||||
local trash_count
|
||||
trash_count=$(osascript -e 'tell application "Finder" to count items in trash' 2> /dev/null || echo "0")
|
||||
[[ "$trash_count" =~ ^[0-9]+$ ]] || trash_count="0"
|
||||
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
[[ $trash_count -gt 0 ]] && echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Trash · would empty, $trash_count items" || echo -e " ${GRAY}${ICON_EMPTY}${NC} Trash · already empty"
|
||||
elif [[ $trash_count -gt 0 ]]; then
|
||||
if osascript -e 'tell application "Finder" to empty trash' > /dev/null 2>&1; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Trash · emptied, $trash_count items"
|
||||
note_activity
|
||||
else
|
||||
safe_clean ~/.Trash/* "Trash"
|
||||
fi
|
||||
else
|
||||
echo -e " ${GRAY}${ICON_EMPTY}${NC} Trash · already empty"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -24,7 +37,7 @@ clean_chrome_old_versions() {
|
||||
|
||||
# Match the exact Chrome process name to avoid false positives
|
||||
if pgrep -x "Google Chrome" > /dev/null 2>&1; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Google Chrome running · old versions cleanup skipped"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Google Chrome running · old versions cleanup skipped"
|
||||
return 0
|
||||
fi
|
||||
|
||||
@@ -84,9 +97,9 @@ clean_chrome_old_versions() {
|
||||
local size_human
|
||||
size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Chrome old versions ${YELLOW}(${cleaned_count} dirs, $size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Chrome old versions${NC}, ${YELLOW}${cleaned_count} dirs, $size_human dry${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Chrome old versions ${GREEN}(${cleaned_count} dirs, $size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Chrome old versions${NC}, ${GREEN}${cleaned_count} dirs, $size_human${NC}"
|
||||
fi
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += total_size))
|
||||
@@ -97,14 +110,20 @@ clean_chrome_old_versions() {
|
||||
|
||||
# Remove old Microsoft Edge versions while keeping Current.
|
||||
clean_edge_old_versions() {
|
||||
local -a app_paths=(
|
||||
"/Applications/Microsoft Edge.app"
|
||||
"$HOME/Applications/Microsoft Edge.app"
|
||||
)
|
||||
# Allow override for testing
|
||||
local -a app_paths
|
||||
if [[ -n "${MOLE_EDGE_APP_PATHS:-}" ]]; then
|
||||
IFS=':' read -ra app_paths <<< "$MOLE_EDGE_APP_PATHS"
|
||||
else
|
||||
app_paths=(
|
||||
"/Applications/Microsoft Edge.app"
|
||||
"$HOME/Applications/Microsoft Edge.app"
|
||||
)
|
||||
fi
|
||||
|
||||
# Match the exact Edge process name to avoid false positives (e.g., Microsoft Teams)
|
||||
if pgrep -x "Microsoft Edge" > /dev/null 2>&1; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Microsoft Edge running · old versions cleanup skipped"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Microsoft Edge running · old versions cleanup skipped"
|
||||
return 0
|
||||
fi
|
||||
|
||||
@@ -164,9 +183,9 @@ clean_edge_old_versions() {
|
||||
local size_human
|
||||
size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Edge old versions ${YELLOW}(${cleaned_count} dirs, $size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Edge old versions${NC}, ${YELLOW}${cleaned_count} dirs, $size_human dry${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Edge old versions ${GREEN}(${cleaned_count} dirs, $size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Edge old versions${NC}, ${GREEN}${cleaned_count} dirs, $size_human${NC}"
|
||||
fi
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += total_size))
|
||||
@@ -181,7 +200,7 @@ clean_edge_updater_old_versions() {
|
||||
[[ -d "$updater_dir" ]] || return 0
|
||||
|
||||
if pgrep -x "Microsoft Edge" > /dev/null 2>&1; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Microsoft Edge running · updater cleanup skipped"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Microsoft Edge running · updater cleanup skipped"
|
||||
return 0
|
||||
fi
|
||||
|
||||
@@ -226,9 +245,9 @@ clean_edge_updater_old_versions() {
|
||||
local size_human
|
||||
size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Edge updater old versions ${YELLOW}(${cleaned_count} dirs, $size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Edge updater old versions${NC}, ${YELLOW}${cleaned_count} dirs, $size_human dry${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Edge updater old versions ${GREEN}(${cleaned_count} dirs, $size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Edge updater old versions${NC}, ${GREEN}${cleaned_count} dirs, $size_human${NC}"
|
||||
fi
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += total_size))
|
||||
@@ -266,12 +285,12 @@ scan_external_volumes() {
|
||||
local network_count=${#network_volumes[@]}
|
||||
if [[ $volume_count -eq 0 ]]; then
|
||||
if [[ $network_count -gt 0 ]]; then
|
||||
echo -e " ${GRAY}${ICON_LIST}${NC} External volumes (${network_count} network volume(s) skipped)"
|
||||
echo -e " ${GRAY}${ICON_LIST}${NC} External volumes, ${network_count} network volumes skipped"
|
||||
note_activity
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
start_section_spinner "Scanning $volume_count external volume(s)..."
|
||||
start_section_spinner "Scanning $volume_count external volumes..."
|
||||
for volume in "${candidate_volumes[@]}"; do
|
||||
[[ -d "$volume" && -r "$volume" ]] || continue
|
||||
local volume_trash="$volume/.Trashes"
|
||||
@@ -281,24 +300,20 @@ scan_external_volumes() {
|
||||
done < <(command find "$volume_trash" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
|
||||
fi
|
||||
if [[ "$PROTECT_FINDER_METADATA" != "true" ]]; then
|
||||
clean_ds_store_tree "$volume" "$(basename "$volume") volume (.DS_Store)"
|
||||
clean_ds_store_tree "$volume" "$(basename "$volume") volume, .DS_Store"
|
||||
fi
|
||||
done
|
||||
stop_section_spinner
|
||||
}
|
||||
# Finder metadata (.DS_Store).
|
||||
clean_finder_metadata() {
|
||||
stop_section_spinner
|
||||
if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then
|
||||
note_activity
|
||||
echo -e " ${GREEN}${ICON_EMPTY}${NC} Finder metadata · whitelist protected"
|
||||
return
|
||||
fi
|
||||
clean_ds_store_tree "$HOME" "Home directory (.DS_Store)"
|
||||
clean_ds_store_tree "$HOME" "Home directory, .DS_Store"
|
||||
}
|
||||
# macOS system caches and user-level leftovers.
|
||||
clean_macos_system_caches() {
|
||||
stop_section_spinner
|
||||
# safe_clean already checks protected paths.
|
||||
safe_clean ~/Library/Saved\ Application\ State/* "Saved application states" || true
|
||||
safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache" || true
|
||||
@@ -318,7 +333,6 @@ clean_macos_system_caches() {
|
||||
safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache" || true
|
||||
}
|
||||
clean_recent_items() {
|
||||
stop_section_spinner
|
||||
local shared_dir="$HOME/Library/Application Support/com.apple.sharedfilelist"
|
||||
local -a recent_lists=(
|
||||
"$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl2"
|
||||
@@ -338,7 +352,6 @@ clean_recent_items() {
|
||||
safe_clean ~/Library/Preferences/com.apple.recentitems.plist "Recent items preferences" || true
|
||||
}
|
||||
clean_mail_downloads() {
|
||||
stop_section_spinner
|
||||
local mail_age_days=${MOLE_MAIL_AGE_DAYS:-}
|
||||
if ! [[ "$mail_age_days" =~ ^[0-9]+$ ]]; then
|
||||
mail_age_days=30
|
||||
@@ -376,7 +389,7 @@ clean_mail_downloads() {
|
||||
done
|
||||
if [[ $count -gt 0 ]]; then
|
||||
local cleaned_mb=$(echo "$cleaned_kb" | awk '{printf "%.1f", $1/1024}' || echo "0.0")
|
||||
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $count mail attachments (~${cleaned_mb}MB)"
|
||||
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $count mail attachments, about ${cleaned_mb}MB"
|
||||
note_activity
|
||||
fi
|
||||
}
|
||||
@@ -405,9 +418,9 @@ clean_sandboxed_app_caches() {
|
||||
if [[ "$found_any" == "true" ]]; then
|
||||
local size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Sandboxed app caches ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Sandboxed app caches${NC}, ${YELLOW}$size_human dry${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Sandboxed app caches ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Sandboxed app caches${NC}, ${GREEN}$size_human${NC}"
|
||||
fi
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += total_size))
|
||||
@@ -449,13 +462,13 @@ process_container_cache() {
|
||||
}
|
||||
# Browser caches (Safari/Chrome/Edge/Firefox).
|
||||
clean_browsers() {
|
||||
stop_section_spinner
|
||||
safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache"
|
||||
# Chrome/Chromium.
|
||||
safe_clean ~/Library/Caches/Google/Chrome/* "Chrome cache"
|
||||
safe_clean ~/Library/Application\ Support/Google/Chrome/*/Application\ Cache/* "Chrome app cache"
|
||||
safe_clean ~/Library/Application\ Support/Google/Chrome/*/GPUCache/* "Chrome GPU cache"
|
||||
safe_clean ~/Library/Caches/Chromium/* "Chromium cache"
|
||||
safe_clean ~/.cache/puppeteer/* "Puppeteer browser cache"
|
||||
safe_clean ~/Library/Caches/com.microsoft.edgemac/* "Edge cache"
|
||||
safe_clean ~/Library/Caches/company.thebrowser.Browser/* "Arc cache"
|
||||
safe_clean ~/Library/Caches/company.thebrowser.dia/* "Dia cache"
|
||||
@@ -465,7 +478,7 @@ clean_browsers() {
|
||||
firefox_running=true
|
||||
fi
|
||||
if [[ "$firefox_running" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Firefox is running · cache cleanup skipped"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Firefox is running · cache cleanup skipped"
|
||||
else
|
||||
safe_clean ~/Library/Caches/Firefox/* "Firefox cache"
|
||||
fi
|
||||
@@ -475,7 +488,7 @@ clean_browsers() {
|
||||
safe_clean ~/Library/Caches/com.kagi.kagimacOS/* "Orion cache"
|
||||
safe_clean ~/Library/Caches/zen/* "Zen cache"
|
||||
if [[ "$firefox_running" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Firefox is running · profile cache cleanup skipped"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Firefox is running · profile cache cleanup skipped"
|
||||
else
|
||||
safe_clean ~/Library/Application\ Support/Firefox/Profiles/*/cache2/* "Firefox profile cache"
|
||||
fi
|
||||
@@ -485,7 +498,6 @@ clean_browsers() {
|
||||
}
|
||||
# Cloud storage caches.
|
||||
clean_cloud_storage() {
|
||||
stop_section_spinner
|
||||
safe_clean ~/Library/Caches/com.dropbox.* "Dropbox cache"
|
||||
safe_clean ~/Library/Caches/com.getdropbox.dropbox "Dropbox cache"
|
||||
safe_clean ~/Library/Caches/com.google.GoogleDrive "Google Drive cache"
|
||||
@@ -496,7 +508,6 @@ clean_cloud_storage() {
|
||||
}
|
||||
# Office app caches.
|
||||
clean_office_applications() {
|
||||
stop_section_spinner
|
||||
safe_clean ~/Library/Caches/com.microsoft.Word "Microsoft Word cache"
|
||||
safe_clean ~/Library/Caches/com.microsoft.Excel "Microsoft Excel cache"
|
||||
safe_clean ~/Library/Caches/com.microsoft.Powerpoint "Microsoft PowerPoint cache"
|
||||
@@ -516,10 +527,9 @@ clean_virtualization_tools() {
|
||||
}
|
||||
# Application Support logs/caches.
|
||||
clean_application_support_logs() {
|
||||
stop_section_spinner
|
||||
if [[ ! -d "$HOME/Library/Application Support" ]] || ! ls "$HOME/Library/Application Support" > /dev/null 2>&1; then
|
||||
note_activity
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Application Support"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} Skipped: No permission to access Application Support"
|
||||
return 0
|
||||
fi
|
||||
start_section_spinner "Scanning Application Support..."
|
||||
@@ -593,9 +603,9 @@ clean_application_support_logs() {
|
||||
if [[ "$found_any" == "true" ]]; then
|
||||
local size_human=$(bytes_to_human "$((total_size * 1024))")
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Application Support logs/caches ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Application Support logs/caches${NC}, ${YELLOW}$size_human dry${NC}"
|
||||
else
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Application Support logs/caches ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Application Support logs/caches${NC}, ${GREEN}$size_human${NC}"
|
||||
fi
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += total_size))
|
||||
@@ -613,13 +623,107 @@ check_ios_device_backups() {
|
||||
local backup_human=$(command du -sh "$backup_dir" 2> /dev/null | awk '{print $1}')
|
||||
if [[ -n "$backup_human" ]]; then
|
||||
note_activity
|
||||
echo -e " Found ${GREEN}${backup_human}${NC} iOS backups"
|
||||
echo -e " You can delete them manually: ${backup_dir}"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} iOS backups: ${GREEN}${backup_human}${NC}${GRAY}, Path: $backup_dir${NC}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Large file candidates (report only, no deletion).
|
||||
check_large_file_candidates() {
|
||||
local threshold_kb=$((1024 * 1024)) # 1GB
|
||||
local found_any=false
|
||||
|
||||
local mail_dir="$HOME/Library/Mail"
|
||||
if [[ -d "$mail_dir" ]]; then
|
||||
local mail_kb
|
||||
mail_kb=$(get_path_size_kb "$mail_dir")
|
||||
if [[ "$mail_kb" -ge "$threshold_kb" ]]; then
|
||||
local mail_human
|
||||
mail_human=$(bytes_to_human "$((mail_kb * 1024))")
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Mail data: ${GREEN}${mail_human}${NC}${GRAY}, Path: $mail_dir${NC}"
|
||||
found_any=true
|
||||
fi
|
||||
fi
|
||||
|
||||
local mail_downloads="$HOME/Library/Mail Downloads"
|
||||
if [[ -d "$mail_downloads" ]]; then
|
||||
local downloads_kb
|
||||
downloads_kb=$(get_path_size_kb "$mail_downloads")
|
||||
if [[ "$downloads_kb" -ge "$threshold_kb" ]]; then
|
||||
local downloads_human
|
||||
downloads_human=$(bytes_to_human "$((downloads_kb * 1024))")
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Mail downloads: ${GREEN}${downloads_human}${NC}${GRAY}, Path: $mail_downloads${NC}"
|
||||
found_any=true
|
||||
fi
|
||||
fi
|
||||
|
||||
local installer_path
|
||||
for installer_path in /Applications/Install\ macOS*.app; do
|
||||
if [[ -e "$installer_path" ]]; then
|
||||
local installer_kb
|
||||
installer_kb=$(get_path_size_kb "$installer_path")
|
||||
if [[ "$installer_kb" -gt 0 ]]; then
|
||||
local installer_human
|
||||
installer_human=$(bytes_to_human "$((installer_kb * 1024))")
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} macOS installer: ${GREEN}${installer_human}${NC}${GRAY}, Path: $installer_path${NC}"
|
||||
found_any=true
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
local updates_dir="$HOME/Library/Updates"
|
||||
if [[ -d "$updates_dir" ]]; then
|
||||
local updates_kb
|
||||
updates_kb=$(get_path_size_kb "$updates_dir")
|
||||
if [[ "$updates_kb" -ge "$threshold_kb" ]]; then
|
||||
local updates_human
|
||||
updates_human=$(bytes_to_human "$((updates_kb * 1024))")
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} macOS updates cache: ${GREEN}${updates_human}${NC}${GRAY}, Path: $updates_dir${NC}"
|
||||
found_any=true
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${SYSTEM_CLEAN:-false}" != "true" ]] && command -v tmutil > /dev/null 2>&1; then
|
||||
local snapshot_list snapshot_count
|
||||
snapshot_list=$(run_with_timeout 3 tmutil listlocalsnapshots / 2> /dev/null || true)
|
||||
if [[ -n "$snapshot_list" ]]; then
|
||||
snapshot_count=$(echo "$snapshot_list" | { grep -Eo 'com\.apple\.TimeMachine\.[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{6}' || true; } | wc -l | awk '{print $1}')
|
||||
if [[ "$snapshot_count" =~ ^[0-9]+$ && "$snapshot_count" -gt 0 ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Time Machine local snapshots: ${GREEN}${snapshot_count}${NC}${GRAY}, Review: tmutil listlocalsnapshots /${NC}"
|
||||
found_any=true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if command -v docker > /dev/null 2>&1; then
|
||||
local docker_output
|
||||
docker_output=$(run_with_timeout 3 docker system df --format '{{.Type}}\t{{.Size}}\t{{.Reclaimable}}' 2> /dev/null || true)
|
||||
if [[ -n "$docker_output" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Docker storage:"
|
||||
while IFS=$'\t' read -r dtype dsize dreclaim; do
|
||||
[[ -z "$dtype" ]] && continue
|
||||
echo -e " ${GRAY}• $dtype: $dsize, Reclaimable: $dreclaim${NC}"
|
||||
done <<< "$docker_output"
|
||||
found_any=true
|
||||
else
|
||||
docker_output=$(run_with_timeout 3 docker system df 2> /dev/null || true)
|
||||
if [[ -n "$docker_output" ]]; then
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Docker storage:"
|
||||
echo -e " ${GRAY}• Run: docker system df${NC}"
|
||||
found_any=true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$found_any" == "false" ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No large items detected in common locations"
|
||||
fi
|
||||
|
||||
note_activity
|
||||
return 0
|
||||
}
|
||||
# Apple Silicon specific caches (IS_M_SERIES).
|
||||
clean_apple_silicon_caches() {
|
||||
if [[ "${IS_M_SERIES:-false}" != "true" ]]; then
|
||||
|
||||
@@ -493,6 +493,9 @@ should_protect_data() {
|
||||
# Check if a path is protected from deletion
|
||||
# Centralized logic to protect system settings, control center, and critical apps
|
||||
#
|
||||
# In uninstall mode (MOLE_UNINSTALL_MODE=1), only system-critical components are protected.
|
||||
# Data-protected apps (VPNs, dev tools, etc.) can be uninstalled when user explicitly chooses to.
|
||||
#
|
||||
# Args: $1 - path to check
|
||||
# Returns: 0 if protected, 1 if safe to delete
|
||||
should_protect_path() {
|
||||
@@ -577,17 +580,31 @@ should_protect_path() {
|
||||
|
||||
# 6. Match full path against protected patterns
|
||||
# This catches things like /Users/tw93/Library/Caches/Claude when pattern is *Claude*
|
||||
for pattern in "${SYSTEM_CRITICAL_BUNDLES[@]}" "${DATA_PROTECTED_BUNDLES[@]}"; do
|
||||
if bundle_matches_pattern "$path" "$pattern"; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
# In uninstall mode, only check system-critical bundles (user explicitly chose to uninstall)
|
||||
if [[ "${MOLE_UNINSTALL_MODE:-0}" == "1" ]]; then
|
||||
# Uninstall mode: only protect system-critical components
|
||||
for pattern in "${SYSTEM_CRITICAL_BUNDLES[@]}"; do
|
||||
if bundle_matches_pattern "$path" "$pattern"; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
else
|
||||
# Normal mode (cleanup): protect both system-critical and data-protected bundles
|
||||
for pattern in "${SYSTEM_CRITICAL_BUNDLES[@]}" "${DATA_PROTECTED_BUNDLES[@]}"; do
|
||||
if bundle_matches_pattern "$path" "$pattern"; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# 7. Check if the filename itself matches any protected patterns
|
||||
local filename
|
||||
filename=$(basename "$path")
|
||||
if should_protect_data "$filename"; then
|
||||
return 0
|
||||
# Skip in uninstall mode - user explicitly chose to remove this app
|
||||
if [[ "${MOLE_UNINSTALL_MODE:-0}" != "1" ]]; then
|
||||
local filename
|
||||
filename=$(basename "$path")
|
||||
if should_protect_data "$filename"; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
@@ -643,6 +660,14 @@ is_path_whitelisted() {
|
||||
find_app_files() {
|
||||
local bundle_id="$1"
|
||||
local app_name="$2"
|
||||
|
||||
# Early validation: require at least one valid identifier
|
||||
# Skip scanning if both bundle_id and app_name are invalid
|
||||
if [[ -z "$bundle_id" || "$bundle_id" == "unknown" ]] &&
|
||||
[[ -z "$app_name" || ${#app_name} -lt 2 ]]; then
|
||||
return 0 # Silent return to avoid invalid scanning
|
||||
fi
|
||||
|
||||
local -a files_to_clean=()
|
||||
|
||||
# Normalize app name for matching
|
||||
@@ -665,7 +690,6 @@ find_app_files() {
|
||||
"$HOME/Library/HTTPStorages/$bundle_id"
|
||||
"$HOME/Library/Cookies/$bundle_id.binarycookies"
|
||||
"$HOME/Library/LaunchAgents/$bundle_id.plist"
|
||||
"$HOME/Library/LaunchDaemons/$bundle_id.plist"
|
||||
"$HOME/Library/Application Scripts/$bundle_id"
|
||||
"$HOME/Library/Services/$app_name.workflow"
|
||||
"$HOME/Library/QuickLook/$app_name.qlgenerator"
|
||||
@@ -740,17 +764,26 @@ find_app_files() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# Launch Agents and Daemons by name (special handling)
|
||||
if [[ ${#app_name} -gt 3 ]]; then
|
||||
if [[ -d ~/Library/LaunchAgents ]]; then
|
||||
# Launch Agents by name (special handling)
|
||||
# Note: LaunchDaemons are system-level and handled in find_app_system_files()
|
||||
# Minimum 5-char threshold prevents false positives (e.g., "Time" matching system agents)
|
||||
# Short-name apps (e.g., Zoom, Arc) are still cleaned via bundle_id matching above
|
||||
# Security: Common words are excluded to prevent matching unrelated plist files
|
||||
if [[ ${#app_name} -ge 5 ]] && [[ -d ~/Library/LaunchAgents ]]; then
|
||||
# Skip common words that could match many unrelated LaunchAgents
|
||||
# These are either generic terms or names that overlap with system/common utilities
|
||||
local common_words="Music|Notes|Photos|Finder|Safari|Preview|Calendar|Contacts|Messages|Reminders|Clock|Weather|Stocks|Books|News|Podcasts|Voice|Files|Store|System|Helper|Agent|Daemon|Service|Update|Sync|Backup|Cloud|Manager|Monitor|Server|Client|Worker|Runner|Launcher|Driver|Plugin|Extension|Widget|Utility"
|
||||
if [[ "$app_name" =~ ^($common_words)$ ]]; then
|
||||
debug_log "Skipping LaunchAgent name search for common word: $app_name"
|
||||
else
|
||||
while IFS= read -r -d '' plist; do
|
||||
local plist_name=$(basename "$plist")
|
||||
# Skip Apple's LaunchAgents
|
||||
if [[ "$plist_name" =~ ^com\.apple\. ]]; then
|
||||
continue
|
||||
fi
|
||||
files_to_clean+=("$plist")
|
||||
done < <(command find ~/Library/LaunchAgents -maxdepth 1 \( -name "*$app_name*.plist" \) -print0 2> /dev/null)
|
||||
fi
|
||||
if [[ -d ~/Library/LaunchDaemons ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
files_to_clean+=("$plist")
|
||||
done < <(command find ~/Library/LaunchDaemons -maxdepth 1 \( -name "*$app_name*.plist" \) -print0 2> /dev/null)
|
||||
done < <(command find ~/Library/LaunchAgents -maxdepth 1 -name "*$app_name*.plist" -print0 2> /dev/null)
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -764,7 +797,7 @@ find_app_files() {
|
||||
|
||||
# 2. Android Studio (Google)
|
||||
if [[ "$app_name" =~ Android.*Studio|android.*studio ]] || [[ "$bundle_id" =~ google.*android.*studio|jetbrains.*android ]]; then
|
||||
for d in ~/AndroidStudioProjects ~/Library/Android ~/.android ~/.gradle; do
|
||||
for d in ~/AndroidStudioProjects ~/Library/Android ~/.android; do
|
||||
[[ -d "$d" ]] && files_to_clean+=("$d")
|
||||
done
|
||||
[[ -d ~/Library/Application\ Support/Google ]] && while IFS= read -r -d '' d; do files_to_clean+=("$d"); done < <(command find ~/Library/Application\ Support/Google -maxdepth 1 -name "AndroidStudio*" -print0 2> /dev/null)
|
||||
@@ -904,6 +937,13 @@ find_app_receipt_files() {
|
||||
# Skip if no bundle ID
|
||||
[[ -z "$bundle_id" || "$bundle_id" == "unknown" ]] && return 0
|
||||
|
||||
# Validate bundle_id format to prevent wildcard injection
|
||||
# Only allow alphanumeric characters, dots, hyphens, and underscores
|
||||
if [[ ! "$bundle_id" =~ ^[a-zA-Z0-9._-]+$ ]]; then
|
||||
debug_log "Invalid bundle_id format: $bundle_id"
|
||||
return 0
|
||||
fi
|
||||
|
||||
local -a receipt_files=()
|
||||
local -a bom_files=()
|
||||
|
||||
@@ -935,6 +975,15 @@ find_app_receipt_files() {
|
||||
clean_path="/$clean_path"
|
||||
fi
|
||||
|
||||
# Path traversal protection: reject paths containing ..
|
||||
if [[ "$clean_path" =~ \.\. ]]; then
|
||||
debug_log "Rejected path traversal in BOM: $clean_path"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Normalize path (remove duplicate slashes)
|
||||
clean_path=$(tr -s "/" <<< "$clean_path")
|
||||
|
||||
# ------------------------------------------------------------------------
|
||||
# Safety check: restrict removal to trusted paths
|
||||
# ------------------------------------------------------------------------
|
||||
|
||||
@@ -31,7 +31,7 @@ readonly ICON_CONFIRM="◎"
|
||||
readonly ICON_ADMIN="⚙"
|
||||
readonly ICON_SUCCESS="✓"
|
||||
readonly ICON_ERROR="☻"
|
||||
readonly ICON_WARNING="●"
|
||||
readonly ICON_WARNING="◎"
|
||||
readonly ICON_EMPTY="○"
|
||||
readonly ICON_SOLID="●"
|
||||
readonly ICON_LIST="•"
|
||||
@@ -687,7 +687,7 @@ update_progress_if_needed() {
|
||||
if [[ $((current_time - last_time)) -ge $interval ]]; then
|
||||
# Update the spinner with progress
|
||||
stop_section_spinner
|
||||
start_section_spinner "Scanning items... ($completed/$total)"
|
||||
start_section_spinner "Scanning items... $completed/$total"
|
||||
|
||||
# Update the last_update_time variable
|
||||
eval "$last_update_var=$current_time"
|
||||
@@ -717,7 +717,7 @@ push_spinner_state() {
|
||||
fi
|
||||
|
||||
MOLE_SPINNER_STACK+=("$current_state")
|
||||
debug_log "Pushed spinner state: $current_state (stack depth: ${#MOLE_SPINNER_STACK[@]})"
|
||||
debug_log "Pushed spinner state: $current_state, stack depth: ${#MOLE_SPINNER_STACK[@]}"
|
||||
}
|
||||
|
||||
# Pop and restore spinner state from stack
|
||||
@@ -730,7 +730,7 @@ pop_spinner_state() {
|
||||
|
||||
# Stack depth safety check
|
||||
if [[ ${#MOLE_SPINNER_STACK[@]} -gt 10 ]]; then
|
||||
debug_log "Warning: Spinner stack depth excessive (${#MOLE_SPINNER_STACK[@]}), possible leak"
|
||||
debug_log "Warning: Spinner stack depth excessive, ${#MOLE_SPINNER_STACK[@]}, possible leak"
|
||||
fi
|
||||
|
||||
local last_idx=$((${#MOLE_SPINNER_STACK[@]} - 1))
|
||||
@@ -745,7 +745,7 @@ pop_spinner_state() {
|
||||
done
|
||||
MOLE_SPINNER_STACK=("${new_stack[@]}")
|
||||
|
||||
debug_log "Popped spinner state: $state (remaining depth: ${#MOLE_SPINNER_STACK[@]})"
|
||||
debug_log "Popped spinner state: $state, remaining depth: ${#MOLE_SPINNER_STACK[@]}"
|
||||
|
||||
# Restore state if needed
|
||||
if [[ "$state" == running:* ]]; then
|
||||
@@ -822,7 +822,7 @@ get_terminal_info() {
|
||||
local info="Terminal: ${TERM:-unknown}"
|
||||
|
||||
if is_ansi_supported; then
|
||||
info+=" (ANSI supported)"
|
||||
info+=", ANSI supported"
|
||||
|
||||
if command -v tput > /dev/null 2>&1; then
|
||||
local cols=$(tput cols 2> /dev/null || echo "?")
|
||||
@@ -831,7 +831,7 @@ get_terminal_info() {
|
||||
info+=" ${cols}x${lines}, ${colors} colors"
|
||||
fi
|
||||
else
|
||||
info+=" (ANSI not supported)"
|
||||
info+=", ANSI not supported"
|
||||
fi
|
||||
|
||||
echo "$info"
|
||||
@@ -852,11 +852,11 @@ validate_terminal_environment() {
|
||||
# Check if running in a known problematic terminal
|
||||
case "${TERM:-}" in
|
||||
dumb)
|
||||
log_warning "Running in 'dumb' terminal - limited functionality"
|
||||
log_warning "Running in 'dumb' terminal, limited functionality"
|
||||
((warnings++))
|
||||
;;
|
||||
unknown)
|
||||
log_warning "Terminal type unknown - may have display issues"
|
||||
log_warning "Terminal type unknown, may have display issues"
|
||||
((warnings++))
|
||||
;;
|
||||
esac
|
||||
@@ -865,7 +865,7 @@ validate_terminal_environment() {
|
||||
if command -v tput > /dev/null 2>&1; then
|
||||
local cols=$(tput cols 2> /dev/null || echo "80")
|
||||
if [[ "$cols" -lt 60 ]]; then
|
||||
log_warning "Terminal width ($cols cols) is narrow - output may wrap"
|
||||
log_warning "Terminal width, $cols cols, is narrow, output may wrap"
|
||||
((warnings++))
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -79,8 +79,9 @@ update_via_homebrew() {
|
||||
if echo "$upgrade_output" | grep -q "already installed"; then
|
||||
local installed_version
|
||||
installed_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}')
|
||||
[[ -z "$installed_version" ]] && installed_version=$(mo --version 2> /dev/null | awk '/Mole version/ {print $3; exit}')
|
||||
echo ""
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version (${installed_version:-$current_version})"
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version, ${installed_version:-$current_version}"
|
||||
echo ""
|
||||
elif echo "$upgrade_output" | grep -q "Error:"; then
|
||||
log_error "Homebrew upgrade failed"
|
||||
@@ -90,8 +91,9 @@ update_via_homebrew() {
|
||||
echo "$upgrade_output" | grep -Ev "^(==>|Updating Homebrew|Warning:)" || true
|
||||
local new_version
|
||||
new_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}')
|
||||
[[ -z "$new_version" ]] && new_version=$(mo --version 2> /dev/null | awk '/Mole version/ {print $3; exit}')
|
||||
echo ""
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version (${new_version:-$current_version})"
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version, ${new_version:-$current_version}"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
@@ -125,24 +127,29 @@ remove_apps_from_dock() {
|
||||
local changed=false
|
||||
for target in "${targets[@]}"; do
|
||||
local app_path="$target"
|
||||
# Normalize path for comparison - use original path if app already deleted
|
||||
local full_path
|
||||
if full_path=$(cd "$(dirname "$app_path")" 2> /dev/null && pwd); then
|
||||
full_path="$full_path/$(basename "$app_path")"
|
||||
else
|
||||
# App already deleted - use the original path as-is
|
||||
# Remove ~/ prefix and expand to full path if needed
|
||||
if [[ "$app_path" == ~/* ]]; then
|
||||
full_path="$HOME/${app_path#~/}"
|
||||
elif [[ "$app_path" != /* ]]; then
|
||||
# Relative path - skip this entry
|
||||
continue
|
||||
else
|
||||
full_path="$app_path"
|
||||
fi
|
||||
local full_path=""
|
||||
|
||||
if [[ "$app_path" =~ [[:cntrl:]] ]]; then
|
||||
debug_log "Skipping dock removal for path with control chars: $app_path"
|
||||
continue
|
||||
fi
|
||||
|
||||
# URL-encode the path for matching against Dock URLs (spaces -> %20)
|
||||
if [[ -e "$app_path" ]]; then
|
||||
if full_path=$(cd "$(dirname "$app_path")" 2> /dev/null && pwd); then
|
||||
full_path="$full_path/$(basename "$app_path")"
|
||||
else
|
||||
continue
|
||||
fi
|
||||
else
|
||||
case "$app_path" in
|
||||
~/*) full_path="$HOME/${app_path#~/}" ;;
|
||||
/*) full_path="$app_path" ;;
|
||||
*) continue ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
[[ -z "$full_path" ]] && continue
|
||||
|
||||
local encoded_path="${full_path// /%20}"
|
||||
|
||||
# Find the index of the app in persistent-apps
|
||||
|
||||
@@ -39,6 +39,33 @@ validate_path_for_deletion() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check symlink target if path is a symbolic link
|
||||
if [[ -L "$path" ]]; then
|
||||
local link_target
|
||||
link_target=$(readlink "$path" 2> /dev/null) || {
|
||||
log_error "Cannot read symlink: $path"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Resolve relative symlinks to absolute paths for validation
|
||||
local resolved_target="$link_target"
|
||||
if [[ "$link_target" != /* ]]; then
|
||||
local link_dir
|
||||
link_dir=$(dirname "$path")
|
||||
resolved_target=$(cd "$link_dir" 2> /dev/null && cd "$(dirname "$link_target")" 2> /dev/null && pwd)/$(basename "$link_target") || resolved_target=""
|
||||
fi
|
||||
|
||||
# Validate resolved target against protected paths
|
||||
if [[ -n "$resolved_target" ]]; then
|
||||
case "$resolved_target" in
|
||||
/System/* | /usr/bin/* | /usr/lib/* | /bin/* | /sbin/* | /private/etc/*)
|
||||
log_error "Symlink points to protected system path: $path -> $resolved_target"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check path is absolute
|
||||
if [[ "$path" != /* ]]; then
|
||||
log_error "Path validation failed: path must be absolute: $path"
|
||||
@@ -75,7 +102,8 @@ validate_path_for_deletion() {
|
||||
/private/var/db/diagnostics | /private/var/db/diagnostics/* | \
|
||||
/private/var/db/DiagnosticPipeline | /private/var/db/DiagnosticPipeline/* | \
|
||||
/private/var/db/powerlog | /private/var/db/powerlog/* | \
|
||||
/private/var/db/reportmemoryexception | /private/var/db/reportmemoryexception/*)
|
||||
/private/var/db/reportmemoryexception | /private/var/db/reportmemoryexception/* | \
|
||||
/private/var/db/receipts/*.bom | /private/var/db/receipts/*.plist)
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
@@ -169,6 +197,18 @@ safe_remove() {
|
||||
|
||||
debug_log "Removing: $path"
|
||||
|
||||
# Calculate size before deletion for logging
|
||||
local size_kb=0
|
||||
local size_human=""
|
||||
if oplog_enabled; then
|
||||
if [[ -e "$path" ]]; then
|
||||
size_kb=$(get_path_size_kb "$path" 2> /dev/null || echo "0")
|
||||
if [[ "$size_kb" =~ ^[0-9]+$ ]] && [[ "$size_kb" -gt 0 ]]; then
|
||||
size_human=$(bytes_to_human "$((size_kb * 1024))" 2> /dev/null || echo "${size_kb}KB")
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Perform the deletion
|
||||
# Use || to capture the exit code so set -e won't abort on rm failures
|
||||
local error_msg
|
||||
@@ -176,6 +216,8 @@ safe_remove() {
|
||||
error_msg=$(rm -rf "$path" 2>&1) || rm_exit=$? # safe_remove
|
||||
|
||||
if [[ $rm_exit -eq 0 ]]; then
|
||||
# Log successful removal
|
||||
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "REMOVED" "$path" "$size_human"
|
||||
return 0
|
||||
else
|
||||
# Check if it's a permission error
|
||||
@@ -183,9 +225,11 @@ safe_remove() {
|
||||
MOLE_PERMISSION_DENIED_COUNT=${MOLE_PERMISSION_DENIED_COUNT:-0}
|
||||
MOLE_PERMISSION_DENIED_COUNT=$((MOLE_PERMISSION_DENIED_COUNT + 1))
|
||||
export MOLE_PERMISSION_DENIED_COUNT
|
||||
debug_log "Permission denied: $path (may need Full Disk Access)"
|
||||
debug_log "Permission denied: $path, may need Full Disk Access"
|
||||
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "permission denied"
|
||||
else
|
||||
[[ "$silent" != "true" ]] && log_error "Failed to remove: $path"
|
||||
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "error"
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
@@ -239,20 +283,34 @@ safe_sudo_remove() {
|
||||
fi
|
||||
fi
|
||||
|
||||
debug_file_action "[DRY RUN] Would remove (sudo)" "$path" "$file_size" "$file_age"
|
||||
debug_file_action "[DRY RUN] Would remove, sudo" "$path" "$file_size" "$file_age"
|
||||
else
|
||||
debug_log "[DRY RUN] Would remove (sudo): $path"
|
||||
debug_log "[DRY RUN] Would remove, sudo: $path"
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
debug_log "Removing (sudo): $path"
|
||||
debug_log "Removing, sudo: $path"
|
||||
|
||||
# Calculate size before deletion for logging
|
||||
local size_kb=0
|
||||
local size_human=""
|
||||
if oplog_enabled; then
|
||||
if sudo test -e "$path" 2> /dev/null; then
|
||||
size_kb=$(sudo du -sk "$path" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||
if [[ "$size_kb" =~ ^[0-9]+$ ]] && [[ "$size_kb" -gt 0 ]]; then
|
||||
size_human=$(bytes_to_human "$((size_kb * 1024))" 2> /dev/null || echo "${size_kb}KB")
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Perform the deletion
|
||||
if sudo rm -rf "$path" 2> /dev/null; then # SAFE: safe_sudo_remove implementation
|
||||
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "REMOVED" "$path" "$size_human"
|
||||
return 0
|
||||
else
|
||||
log_error "Failed to remove (sudo): $path"
|
||||
log_error "Failed to remove, sudo: $path"
|
||||
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "sudo error"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
@@ -281,11 +339,11 @@ safe_find_delete() {
|
||||
|
||||
# Validate type filter
|
||||
if [[ "$type_filter" != "f" && "$type_filter" != "d" ]]; then
|
||||
log_error "Invalid type filter: $type_filter (must be 'f' or 'd')"
|
||||
log_error "Invalid type filter: $type_filter, must be 'f' or 'd'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
debug_log "Finding in $base_dir: $pattern (age: ${age_days}d, type: $type_filter)"
|
||||
debug_log "Finding in $base_dir: $pattern, age: ${age_days}d, type: $type_filter"
|
||||
|
||||
local find_args=("-maxdepth" "5" "-name" "$pattern" "-type" "$type_filter")
|
||||
if [[ "$age_days" -gt 0 ]]; then
|
||||
@@ -312,7 +370,7 @@ safe_sudo_find_delete() {
|
||||
|
||||
# Validate base directory (use sudo for permission-restricted dirs)
|
||||
if ! sudo test -d "$base_dir" 2> /dev/null; then
|
||||
debug_log "Directory does not exist (skipping): $base_dir"
|
||||
debug_log "Directory does not exist, skipping: $base_dir"
|
||||
return 0
|
||||
fi
|
||||
|
||||
@@ -323,11 +381,11 @@ safe_sudo_find_delete() {
|
||||
|
||||
# Validate type filter
|
||||
if [[ "$type_filter" != "f" && "$type_filter" != "d" ]]; then
|
||||
log_error "Invalid type filter: $type_filter (must be 'f' or 'd')"
|
||||
log_error "Invalid type filter: $type_filter, must be 'f' or 'd'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
debug_log "Finding (sudo) in $base_dir: $pattern (age: ${age_days}d, type: $type_filter)"
|
||||
debug_log "Finding, sudo, in $base_dir: $pattern, age: ${age_days}d, type: $type_filter"
|
||||
|
||||
local find_args=("-maxdepth" "5" "-name" "$pattern" "-type" "$type_filter")
|
||||
if [[ "$age_days" -gt 0 ]]; then
|
||||
|
||||
109
lib/core/log.sh
109
lib/core/log.sh
@@ -23,10 +23,15 @@ fi
|
||||
|
||||
readonly LOG_FILE="${HOME}/.config/mole/mole.log"
|
||||
readonly DEBUG_LOG_FILE="${HOME}/.config/mole/mole_debug_session.log"
|
||||
readonly LOG_MAX_SIZE_DEFAULT=1048576 # 1MB
|
||||
readonly OPERATIONS_LOG_FILE="${HOME}/.config/mole/operations.log"
|
||||
readonly LOG_MAX_SIZE_DEFAULT=1048576 # 1MB
|
||||
readonly OPLOG_MAX_SIZE_DEFAULT=5242880 # 5MB
|
||||
|
||||
# Ensure log directory and file exist with correct ownership
|
||||
ensure_user_file "$LOG_FILE"
|
||||
if [[ "${MO_NO_OPLOG:-}" != "1" ]]; then
|
||||
ensure_user_file "$OPERATIONS_LOG_FILE"
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# Log Rotation
|
||||
@@ -43,6 +48,15 @@ rotate_log_once() {
|
||||
mv "$LOG_FILE" "${LOG_FILE}.old" 2> /dev/null || true
|
||||
ensure_user_file "$LOG_FILE"
|
||||
fi
|
||||
|
||||
# Rotate operations log (5MB limit)
|
||||
if [[ "${MO_NO_OPLOG:-}" != "1" ]]; then
|
||||
local oplog_max_size="$OPLOG_MAX_SIZE_DEFAULT"
|
||||
if [[ -f "$OPERATIONS_LOG_FILE" ]] && [[ $(get_file_size "$OPERATIONS_LOG_FILE") -gt "$oplog_max_size" ]]; then
|
||||
mv "$OPERATIONS_LOG_FILE" "${OPERATIONS_LOG_FILE}.old" 2> /dev/null || true
|
||||
ensure_user_file "$OPERATIONS_LOG_FILE"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
@@ -97,6 +111,80 @@ debug_log() {
|
||||
fi
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Operation Logging (Enabled by default)
|
||||
# ============================================================================
|
||||
# Records all file operations for user troubleshooting
|
||||
# Disable with MO_NO_OPLOG=1
|
||||
|
||||
oplog_enabled() {
|
||||
[[ "${MO_NO_OPLOG:-}" != "1" ]]
|
||||
}
|
||||
|
||||
# Log an operation to the operations log file
|
||||
# Usage: log_operation <command> <action> <path> [detail]
|
||||
# Example: log_operation "clean" "REMOVED" "/path/to/file" "15.2MB"
|
||||
# Example: log_operation "clean" "SKIPPED" "/path/to/file" "whitelist"
|
||||
# Example: log_operation "uninstall" "REMOVED" "/Applications/App.app" "150MB"
|
||||
log_operation() {
|
||||
# Allow disabling via environment variable
|
||||
oplog_enabled || return 0
|
||||
|
||||
local command="${1:-unknown}" # clean/uninstall/optimize/purge
|
||||
local action="${2:-UNKNOWN}" # REMOVED/SKIPPED/FAILED/REBUILT
|
||||
local path="${3:-}"
|
||||
local detail="${4:-}"
|
||||
|
||||
# Skip if no path provided
|
||||
[[ -z "$path" ]] && return 0
|
||||
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
|
||||
local log_line="[$timestamp] [$command] $action $path"
|
||||
[[ -n "$detail" ]] && log_line+=" ($detail)"
|
||||
|
||||
echo "$log_line" >> "$OPERATIONS_LOG_FILE" 2> /dev/null || true
|
||||
}
|
||||
|
||||
# Log session start marker
|
||||
# Usage: log_operation_session_start <command>
|
||||
log_operation_session_start() {
|
||||
oplog_enabled || return 0
|
||||
|
||||
local command="${1:-mole}"
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
|
||||
{
|
||||
echo ""
|
||||
echo "# ========== $command session started at $timestamp =========="
|
||||
} >> "$OPERATIONS_LOG_FILE" 2> /dev/null || true
|
||||
}
|
||||
|
||||
# Log session end with summary
|
||||
# Usage: log_operation_session_end <command> <items_count> <total_size>
|
||||
log_operation_session_end() {
|
||||
oplog_enabled || return 0
|
||||
|
||||
local command="${1:-mole}"
|
||||
local items="${2:-0}"
|
||||
local size="${3:-0}"
|
||||
local timestamp
|
||||
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
||||
|
||||
local size_human=""
|
||||
if [[ "$size" =~ ^[0-9]+$ ]] && [[ "$size" -gt 0 ]]; then
|
||||
size_human=$(bytes_to_human "$((size * 1024))" 2> /dev/null || echo "${size}KB")
|
||||
else
|
||||
size_human="0B"
|
||||
fi
|
||||
|
||||
{
|
||||
echo "# ========== $command session ended at $timestamp, $items items, $size_human =========="
|
||||
} >> "$OPERATIONS_LOG_FILE" 2> /dev/null || true
|
||||
}
|
||||
|
||||
# Enhanced debug logging for operations
|
||||
debug_operation_start() {
|
||||
local operation_name="$1"
|
||||
@@ -138,10 +226,9 @@ debug_file_action() {
|
||||
local file_age="${4:-}"
|
||||
|
||||
if [[ "${MO_DEBUG:-}" == "1" ]]; then
|
||||
local msg=" - $file_path"
|
||||
[[ -n "$file_size" ]] && msg+=" ($file_size"
|
||||
local msg=" * $file_path"
|
||||
[[ -n "$file_size" ]] && msg+=", $file_size"
|
||||
[[ -n "$file_age" ]] && msg+=", ${file_age} days old"
|
||||
[[ -n "$file_size" ]] && msg+=")"
|
||||
|
||||
# Output to stderr
|
||||
echo -e "${GRAY}[DEBUG] $action: $msg${NC}" >&2
|
||||
@@ -165,10 +252,10 @@ debug_risk_level() {
|
||||
esac
|
||||
|
||||
# Output to stderr with color
|
||||
echo -e "${GRAY}[DEBUG] Risk Level: ${color}${risk_level}${GRAY} ($reason)${NC}" >&2
|
||||
echo -e "${GRAY}[DEBUG] Risk Level: ${color}${risk_level}${GRAY}, $reason${NC}" >&2
|
||||
|
||||
# Also log to file
|
||||
echo "Risk Level: $risk_level ($reason)" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
|
||||
echo "Risk Level: $risk_level, $reason" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -180,21 +267,23 @@ log_system_info() {
|
||||
|
||||
# Reset debug log file for this new session
|
||||
ensure_user_file "$DEBUG_LOG_FILE"
|
||||
: > "$DEBUG_LOG_FILE"
|
||||
if ! : > "$DEBUG_LOG_FILE" 2> /dev/null; then
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} Debug log not writable: $DEBUG_LOG_FILE" >&2
|
||||
fi
|
||||
|
||||
# Start block in debug log file
|
||||
{
|
||||
echo "----------------------------------------------------------------------"
|
||||
echo "Mole Debug Session - $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo "Mole Debug Session, $(date '+%Y-%m-%d %H:%M:%S')"
|
||||
echo "----------------------------------------------------------------------"
|
||||
echo "User: $USER"
|
||||
echo "Hostname: $(hostname)"
|
||||
echo "Architecture: $(uname -m)"
|
||||
echo "Kernel: $(uname -r)"
|
||||
if command -v sw_vers > /dev/null; then
|
||||
echo "macOS: $(sw_vers -productVersion) ($(sw_vers -buildVersion))"
|
||||
echo "macOS: $(sw_vers -productVersion), $(sw_vers -buildVersion)"
|
||||
fi
|
||||
echo "Shell: ${SHELL:-unknown} (${TERM:-unknown})"
|
||||
echo "Shell: ${SHELL:-unknown}, ${TERM:-unknown}"
|
||||
|
||||
# Check sudo status non-interactively
|
||||
if sudo -n true 2> /dev/null; then
|
||||
|
||||
@@ -60,7 +60,7 @@ _request_password() {
|
||||
|
||||
# Show hint on first attempt about Touch ID appearing again
|
||||
if [[ $show_hint == true ]] && check_touchid_support; then
|
||||
echo -e "${GRAY}Note: Touch ID dialog may appear once more - just cancel it${NC}" > "$tty_path"
|
||||
echo -e "${GRAY}Note: Touch ID dialog may appear once more, just cancel it${NC}" > "$tty_path"
|
||||
show_hint=false
|
||||
fi
|
||||
|
||||
@@ -78,7 +78,7 @@ _request_password() {
|
||||
unset password
|
||||
((attempts++))
|
||||
if [[ $attempts -lt 3 ]]; then
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} Password cannot be empty" > "$tty_path"
|
||||
echo -e "${GRAY}${ICON_WARNING}${NC} Password cannot be empty" > "$tty_path"
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
@@ -93,7 +93,7 @@ _request_password() {
|
||||
unset password
|
||||
((attempts++))
|
||||
if [[ $attempts -lt 3 ]]; then
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} Incorrect password, try again" > "$tty_path"
|
||||
echo -e "${GRAY}${ICON_WARNING}${NC} Incorrect password, try again" > "$tty_path"
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -143,7 +143,7 @@ request_sudo_access() {
|
||||
fi
|
||||
|
||||
# Touch ID is available and not in clamshell mode
|
||||
echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg} ${GRAY}(Touch ID or password)${NC}"
|
||||
echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg} ${GRAY}, Touch ID or password${NC}"
|
||||
|
||||
# Start sudo in background so we can monitor and control it
|
||||
sudo -v < /dev/null > /dev/null 2>&1 &
|
||||
|
||||
@@ -100,7 +100,7 @@ run_with_timeout() {
|
||||
# ========================================================================
|
||||
|
||||
if [[ "${MO_DEBUG:-0}" == "1" ]]; then
|
||||
echo "[TIMEOUT] Shell fallback (${duration}s): $*" >&2
|
||||
echo "[TIMEOUT] Shell fallback, ${duration}s: $*" >&2
|
||||
fi
|
||||
|
||||
# Start command in background
|
||||
|
||||
@@ -220,7 +220,6 @@ read_key() {
|
||||
case "$key" in
|
||||
$'\n' | $'\r') echo "ENTER" ;;
|
||||
' ') echo "SPACE" ;;
|
||||
'/') echo "FILTER" ;;
|
||||
'q' | 'Q') echo "QUIT" ;;
|
||||
'R') echo "RETRY" ;;
|
||||
'm' | 'M') echo "MORE" ;;
|
||||
@@ -308,7 +307,7 @@ start_inline_spinner() {
|
||||
# Output to stderr to avoid interfering with stdout
|
||||
printf "\r${MOLE_SPINNER_PREFIX:-}${BLUE}%s${NC} %s" "$c" "$message" >&2 || break
|
||||
((i++))
|
||||
sleep 0.1
|
||||
sleep 0.05
|
||||
done
|
||||
|
||||
# Clean up stop file before exiting
|
||||
@@ -316,7 +315,7 @@ start_inline_spinner() {
|
||||
exit 0
|
||||
) &
|
||||
INLINE_SPINNER_PID=$!
|
||||
disown 2> /dev/null || true
|
||||
disown "$INLINE_SPINNER_PID" 2> /dev/null || true
|
||||
else
|
||||
echo -n " ${BLUE}|${NC} $message" >&2 || true
|
||||
fi
|
||||
|
||||
@@ -56,7 +56,7 @@ show_suggestions() {
|
||||
|
||||
if [[ -n "${DISK_FREE_GB:-}" && "${DISK_FREE_GB:-0}" -lt 50 ]]; then
|
||||
if [[ -z "${CACHE_SIZE_GB:-}" ]] || (($(echo "${CACHE_SIZE_GB:-0} <= 5" | bc -l 2> /dev/null || echo 1))); then
|
||||
manual_items+=("Low disk space (${DISK_FREE_GB}GB free)|Run: mo analyze to find large files")
|
||||
manual_items+=("Low disk space, ${DISK_FREE_GB}GB free|Run: mo analyze to find large files")
|
||||
has_suggestions=true
|
||||
fi
|
||||
fi
|
||||
@@ -73,7 +73,7 @@ show_suggestions() {
|
||||
# Show auto-fix items
|
||||
if [[ ${#auto_fix_items[@]} -gt 0 ]]; then
|
||||
for item in "${auto_fix_items[@]}"; do
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} ${item} ${GREEN}[auto]${NC}"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} ${item} ${GREEN}[auto]${NC}"
|
||||
done
|
||||
fi
|
||||
|
||||
@@ -82,7 +82,7 @@ show_suggestions() {
|
||||
for item in "${manual_items[@]}"; do
|
||||
local title="${item%%|*}"
|
||||
local hint="${item#*|}"
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} ${title}"
|
||||
echo -e " ${GRAY}${ICON_WARNING}${NC} ${title}"
|
||||
echo -e " ${GRAY}${hint}${NC}"
|
||||
done
|
||||
fi
|
||||
@@ -127,7 +127,7 @@ perform_auto_fix() {
|
||||
# Ensure sudo access
|
||||
if ! has_sudo_session; then
|
||||
if ! ensure_sudo_session "System fixes require admin access"; then
|
||||
echo -e "${YELLOW}Skipping auto fixes (admin authentication required)${NC}"
|
||||
echo -e "${YELLOW}Skipping auto fixes, admin authentication required${NC}"
|
||||
echo ""
|
||||
return 0
|
||||
fi
|
||||
@@ -176,7 +176,7 @@ auth sufficient pam_tid.so
|
||||
fi
|
||||
|
||||
if [[ $fixed_count -gt 0 ]]; then
|
||||
AUTO_FIX_SUMMARY="Auto fixes applied: ${fixed_count} issue(s)"
|
||||
AUTO_FIX_SUMMARY="Auto fixes applied: ${fixed_count} issues"
|
||||
if [[ ${#fixed_items[@]} -gt 0 ]]; then
|
||||
AUTO_FIX_DETAILS=$(printf '%s\n' "${fixed_items[@]}")
|
||||
else
|
||||
|
||||
@@ -58,7 +58,7 @@ manage_purge_paths() {
|
||||
if [[ -d "$path" ]]; then
|
||||
echo -e " ${GREEN}✓${NC} $display_path"
|
||||
else
|
||||
echo -e " ${GRAY}○${NC} $display_path ${GRAY}(not found)${NC}"
|
||||
echo -e " ${GRAY}○${NC} $display_path${GRAY}, not found${NC}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
@@ -76,7 +76,7 @@ manage_purge_paths() {
|
||||
|
||||
echo ""
|
||||
if [[ $custom_count -gt 0 ]]; then
|
||||
echo -e "${GRAY}Using custom config with $custom_count path(s)${NC}"
|
||||
echo -e "${GRAY}Using custom config with $custom_count paths${NC}"
|
||||
else
|
||||
echo -e "${GRAY}Using ${#DEFAULT_PURGE_SEARCH_PATHS[@]} default paths${NC}"
|
||||
fi
|
||||
|
||||
@@ -18,14 +18,14 @@ format_brew_update_label() {
|
||||
((formulas > 0)) && details+=("${formulas} formula")
|
||||
((casks > 0)) && details+=("${casks} cask")
|
||||
|
||||
local detail_str="(${total} updates)"
|
||||
local detail_str=", ${total} updates"
|
||||
if ((${#details[@]} > 0)); then
|
||||
detail_str="($(
|
||||
detail_str=", $(
|
||||
IFS=', '
|
||||
printf '%s' "${details[*]}"
|
||||
))"
|
||||
)"
|
||||
fi
|
||||
printf " • Homebrew %s" "$detail_str"
|
||||
printf " • Homebrew%s" "$detail_str"
|
||||
}
|
||||
|
||||
brew_has_outdated() {
|
||||
@@ -54,7 +54,7 @@ ask_for_updates() {
|
||||
|
||||
if [[ -n "${APPSTORE_UPDATE_COUNT:-}" && "${APPSTORE_UPDATE_COUNT:-0}" -gt 0 ]]; then
|
||||
has_updates=true
|
||||
update_list+=(" • App Store (${APPSTORE_UPDATE_COUNT} apps)")
|
||||
update_list+=(" • App Store, ${APPSTORE_UPDATE_COUNT} apps")
|
||||
fi
|
||||
|
||||
if [[ -n "${MACOS_UPDATE_AVAILABLE:-}" && "${MACOS_UPDATE_AVAILABLE}" == "true" ]]; then
|
||||
@@ -132,10 +132,10 @@ perform_updates() {
|
||||
echo -e "${GRAY}No updates to perform${NC}"
|
||||
return 0
|
||||
elif [[ $updated_count -eq $total_count ]]; then
|
||||
echo -e "${GREEN}All updates completed (${updated_count}/${total_count})${NC}"
|
||||
echo -e "${GREEN}All updates completed, ${updated_count}/${total_count}${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}Update failed (${updated_count}/${total_count})${NC}"
|
||||
echo -e "${RED}Update failed, ${updated_count}/${total_count}${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ CloudKit cache|$HOME/Library/Caches/CloudKit/*|system_cache
|
||||
Trash|$HOME/.Trash|system_cache
|
||||
EOF
|
||||
# Add FINDER_METADATA with constant reference
|
||||
echo "Finder metadata (.DS_Store)|$FINDER_METADATA_SENTINEL|system_cache"
|
||||
echo "Finder metadata, .DS_Store|$FINDER_METADATA_SENTINEL|system_cache"
|
||||
}
|
||||
|
||||
# Get all optimize items with their patterns
|
||||
@@ -284,13 +284,13 @@ manage_whitelist_categories() {
|
||||
items_source=$(get_optimize_whitelist_items)
|
||||
active_config_file="$WHITELIST_CONFIG_OPTIMIZE"
|
||||
local display_config="${active_config_file/#$HOME/~}"
|
||||
menu_title="Whitelist Manager – Select system checks to ignore
|
||||
menu_title="Whitelist Manager, Select system checks to ignore
|
||||
${GRAY}Edit: ${display_config}${NC}"
|
||||
else
|
||||
items_source=$(get_all_cache_items)
|
||||
active_config_file="$WHITELIST_CONFIG_CLEAN"
|
||||
local display_config="${active_config_file/#$HOME/~}"
|
||||
menu_title="Whitelist Manager – Select caches to protect
|
||||
menu_title="Whitelist Manager, Select caches to protect
|
||||
${GRAY}Edit: ${display_config}${NC}"
|
||||
fi
|
||||
|
||||
@@ -416,7 +416,7 @@ ${GRAY}Edit: ${display_config}${NC}"
|
||||
if [[ ${#custom_patterns[@]} -gt 0 ]]; then
|
||||
summary_lines+=("Protected ${#selected_patterns[@]} predefined + ${#custom_patterns[@]} custom patterns")
|
||||
else
|
||||
summary_lines+=("Protected ${total_protected} cache(s)")
|
||||
summary_lines+=("Protected ${total_protected} caches")
|
||||
fi
|
||||
local display_config="${active_config_file/#$HOME/~}"
|
||||
summary_lines+=("Config: ${GRAY}${display_config}${NC}")
|
||||
|
||||
@@ -263,7 +263,7 @@ opt_sqlite_vacuum() {
|
||||
fi
|
||||
|
||||
if ! command -v sqlite3 > /dev/null 2>&1; then
|
||||
echo -e " ${GRAY}-${NC} Database optimization already optimal (sqlite3 unavailable)"
|
||||
echo -e " ${GRAY}-${NC} Database optimization already optimal, sqlite3 unavailable"
|
||||
return 0
|
||||
fi
|
||||
|
||||
@@ -584,7 +584,7 @@ opt_disk_permissions_repair() {
|
||||
opt_msg "User directory permissions repaired"
|
||||
opt_msg "File access issues resolved"
|
||||
else
|
||||
echo -e " ${YELLOW}!${NC} Failed to repair permissions (may not be needed)"
|
||||
echo -e " ${YELLOW}!${NC} Failed to repair permissions, may not be needed"
|
||||
fi
|
||||
else
|
||||
opt_msg "User directory permissions repaired"
|
||||
@@ -705,7 +705,7 @@ opt_spotlight_index_optimize() {
|
||||
fi
|
||||
|
||||
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
|
||||
echo -e " ${BLUE}ℹ${NC} Spotlight search is slow, rebuilding index (may take 1-2 hours)"
|
||||
echo -e " ${BLUE}ℹ${NC} Spotlight search is slow, rebuilding index, may take 1-2 hours"
|
||||
if sudo mdutil -E / > /dev/null 2>&1; then
|
||||
opt_msg "Spotlight index rebuild started"
|
||||
echo -e " ${GRAY}Indexing will continue in background${NC}"
|
||||
@@ -733,7 +733,7 @@ opt_dock_refresh() {
|
||||
if [[ -f "$db_file" ]]; then
|
||||
safe_remove "$db_file" true > /dev/null 2>&1 && refreshed=true
|
||||
fi
|
||||
done < <(find "$dock_support" -name "*.db" -type f 2> /dev/null || true)
|
||||
done < <(command find "$dock_support" -name "*.db" -type f 2> /dev/null || true)
|
||||
fi
|
||||
|
||||
local dock_plist="$HOME/Library/Preferences/com.apple.dock.plist"
|
||||
|
||||
@@ -87,13 +87,8 @@ paginated_multi_select() {
|
||||
local items_per_page=$(_pm_calculate_items_per_page)
|
||||
local cursor_pos=0
|
||||
local top_index=0
|
||||
local filter_query=""
|
||||
local filter_mode="false" # filter mode toggle
|
||||
local sort_mode="${MOLE_MENU_SORT_MODE:-${MOLE_MENU_SORT_DEFAULT:-date}}" # date|name|size
|
||||
local sort_reverse="${MOLE_MENU_SORT_REVERSE:-false}"
|
||||
# Live query vs applied query
|
||||
local applied_query=""
|
||||
local searching="false"
|
||||
|
||||
# Metadata (optional)
|
||||
# epochs[i] -> last_used_epoch (numeric) for item i
|
||||
@@ -124,36 +119,6 @@ paginated_multi_select() {
|
||||
view_indices[i]=$i
|
||||
done
|
||||
|
||||
# Escape for shell globbing without upsetting highlighters
|
||||
_pm_escape_glob() {
|
||||
local s="${1-}" out="" c
|
||||
local i len=${#s}
|
||||
for ((i = 0; i < len; i++)); do
|
||||
c="${s:i:1}"
|
||||
case "$c" in
|
||||
$'\\' | '*' | '?' | '[' | ']') out+="\\$c" ;;
|
||||
*) out+="$c" ;;
|
||||
esac
|
||||
done
|
||||
printf '%s' "$out"
|
||||
}
|
||||
|
||||
# Case-insensitive fuzzy match (substring search)
|
||||
_pm_match() {
|
||||
local hay="$1" q="$2"
|
||||
q="$(_pm_escape_glob "$q")"
|
||||
local pat="*${q}*"
|
||||
|
||||
shopt -s nocasematch
|
||||
local ok=1
|
||||
# shellcheck disable=SC2254 # intentional glob match with a computed pattern
|
||||
case "$hay" in
|
||||
$pat) ok=0 ;;
|
||||
esac
|
||||
shopt -u nocasematch
|
||||
return $ok
|
||||
}
|
||||
|
||||
local -a selected=()
|
||||
local selected_count=0 # Cache selection count to avoid O(n) loops on every draw
|
||||
|
||||
@@ -267,44 +232,13 @@ paginated_multi_select() {
|
||||
printf "%s%s\n" "$clear_line" "$line" >&2
|
||||
}
|
||||
|
||||
# Rebuild the view_indices applying filter and sort
|
||||
# Rebuild the view_indices applying sort
|
||||
rebuild_view() {
|
||||
# Filter
|
||||
local -a filtered=()
|
||||
local effective_query=""
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
# Live editing: empty query -> show all items
|
||||
effective_query="$filter_query"
|
||||
if [[ -z "$effective_query" ]]; then
|
||||
filtered=("${orig_indices[@]}")
|
||||
else
|
||||
local idx
|
||||
for ((idx = 0; idx < total_items; idx++)); do
|
||||
if _pm_match "${items[idx]}" "$effective_query"; then
|
||||
filtered+=("$idx")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
else
|
||||
# Normal mode: use applied query; empty -> show all
|
||||
effective_query="$applied_query"
|
||||
if [[ -z "$effective_query" ]]; then
|
||||
filtered=("${orig_indices[@]}")
|
||||
else
|
||||
local idx
|
||||
for ((idx = 0; idx < total_items; idx++)); do
|
||||
if _pm_match "${items[idx]}" "$effective_query"; then
|
||||
filtered+=("$idx")
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Sort (skip if no metadata)
|
||||
if [[ "$has_metadata" == "false" ]]; then
|
||||
# No metadata: just use filtered list (already sorted by name naturally)
|
||||
view_indices=("${filtered[@]}")
|
||||
elif [[ ${#filtered[@]} -eq 0 ]]; then
|
||||
# No metadata: just use original indices
|
||||
view_indices=("${orig_indices[@]}")
|
||||
elif [[ ${#orig_indices[@]} -eq 0 ]]; then
|
||||
view_indices=()
|
||||
else
|
||||
# Build sort key
|
||||
@@ -328,7 +262,7 @@ paginated_multi_select() {
|
||||
tmpfile=$(mktemp 2> /dev/null) || tmpfile=""
|
||||
if [[ -n "$tmpfile" ]]; then
|
||||
local k id
|
||||
for id in "${filtered[@]}"; do
|
||||
for id in "${orig_indices[@]}"; do
|
||||
case "$sort_mode" in
|
||||
date) k="${epochs[id]:-0}" ;;
|
||||
size) k="${sizekb[id]:-0}" ;;
|
||||
@@ -346,7 +280,7 @@ paginated_multi_select() {
|
||||
rm -f "$tmpfile"
|
||||
else
|
||||
# Fallback: no sorting
|
||||
view_indices=("${filtered[@]}")
|
||||
view_indices=("${orig_indices[@]}")
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -404,34 +338,13 @@ paginated_multi_select() {
|
||||
# Visible slice
|
||||
local visible_total=${#view_indices[@]}
|
||||
if [[ $visible_total -eq 0 ]]; then
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
# While editing: do not show "No items available"
|
||||
for ((i = 0; i < items_per_page; i++)); do
|
||||
printf "${clear_line}\n" >&2
|
||||
done
|
||||
printf "${clear_line}${GRAY}Type to filter | Delete | Enter Confirm | ESC Cancel${NC}\n" >&2
|
||||
printf "${clear_line}" >&2
|
||||
return
|
||||
else
|
||||
if [[ "$searching" == "true" ]]; then
|
||||
printf "${clear_line}Searching…\n" >&2
|
||||
for ((i = 0; i < items_per_page; i++)); do
|
||||
printf "${clear_line}\n" >&2
|
||||
done
|
||||
printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | / Filter | Q Exit${NC}\n" >&2
|
||||
printf "${clear_line}" >&2
|
||||
return
|
||||
else
|
||||
# Post-search: truly empty list
|
||||
printf "${clear_line}No items available\n" >&2
|
||||
for ((i = 0; i < items_per_page; i++)); do
|
||||
printf "${clear_line}\n" >&2
|
||||
done
|
||||
printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | / Filter | Q Exit${NC}\n" >&2
|
||||
printf "${clear_line}" >&2
|
||||
return
|
||||
fi
|
||||
fi
|
||||
printf "${clear_line}No items available\n" >&2
|
||||
for ((i = 0; i < items_per_page; i++)); do
|
||||
printf "${clear_line}\n" >&2
|
||||
done
|
||||
printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | Q Exit${NC}\n" >&2
|
||||
printf "${clear_line}" >&2
|
||||
return
|
||||
fi
|
||||
|
||||
local visible_count=$((visible_total - top_index))
|
||||
@@ -465,7 +378,7 @@ paginated_multi_select() {
|
||||
|
||||
printf "${clear_line}\n" >&2
|
||||
|
||||
# Build sort and filter status
|
||||
# Build sort status
|
||||
local sort_label=""
|
||||
case "$sort_mode" in
|
||||
date) sort_label="Date" ;;
|
||||
@@ -474,15 +387,6 @@ paginated_multi_select() {
|
||||
esac
|
||||
local sort_status="${sort_label}"
|
||||
|
||||
local filter_status=""
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_status="${filter_query:-_}"
|
||||
elif [[ -n "$applied_query" ]]; then
|
||||
filter_status="${applied_query}"
|
||||
else
|
||||
filter_status="—"
|
||||
fi
|
||||
|
||||
# Footer: single line with controls
|
||||
local sep=" ${GRAY}|${NC} "
|
||||
|
||||
@@ -497,77 +401,54 @@ paginated_multi_select() {
|
||||
# Common menu items
|
||||
local nav="${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN}${NC}"
|
||||
local space_select="${GRAY}Space Select${NC}"
|
||||
local space="${GRAY}Space${NC}"
|
||||
local enter="${GRAY}Enter${NC}"
|
||||
local exit="${GRAY}Q Exit${NC}"
|
||||
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
# Filter mode: simple controls without sort
|
||||
local -a _segs_filter=(
|
||||
"${GRAY}Search: ${filter_status}${NC}"
|
||||
"${GRAY}Delete${NC}"
|
||||
"${GRAY}Enter Confirm${NC}"
|
||||
"${GRAY}ESC Cancel${NC}"
|
||||
)
|
||||
_print_wrapped_controls "$sep" "${_segs_filter[@]}"
|
||||
else
|
||||
# Normal mode - prepare dynamic items
|
||||
local reverse_arrow="↑"
|
||||
[[ "$sort_reverse" == "true" ]] && reverse_arrow="↓"
|
||||
local reverse_arrow="↑"
|
||||
[[ "$sort_reverse" == "true" ]] && reverse_arrow="↓"
|
||||
|
||||
local filter_text="/ Search"
|
||||
[[ -n "$applied_query" ]] && filter_text="/ Clear"
|
||||
local refresh="${GRAY}R Refresh${NC}"
|
||||
local sort_ctrl="${GRAY}S ${sort_status}${NC}"
|
||||
local order_ctrl="${GRAY}O ${reverse_arrow}${NC}"
|
||||
|
||||
local refresh="${GRAY}R Refresh${NC}"
|
||||
local search="${GRAY}${filter_text}${NC}"
|
||||
local sort_ctrl="${GRAY}S ${sort_status}${NC}"
|
||||
local order_ctrl="${GRAY}O ${reverse_arrow}${NC}"
|
||||
if [[ "$has_metadata" == "true" ]]; then
|
||||
# With metadata: show sort controls
|
||||
local term_width="${COLUMNS:-}"
|
||||
[[ -z "$term_width" ]] && term_width=$(tput cols 2> /dev/null || echo 80)
|
||||
[[ "$term_width" =~ ^[0-9]+$ ]] || term_width=80
|
||||
|
||||
if [[ "$has_metadata" == "true" ]]; then
|
||||
if [[ -n "$applied_query" ]]; then
|
||||
# Filtering active: hide sort controls
|
||||
local -a _segs_all=("$nav" "$space" "$enter" "$refresh" "$search" "$exit")
|
||||
_print_wrapped_controls "$sep" "${_segs_all[@]}"
|
||||
else
|
||||
# Normal: show full controls with dynamic reduction
|
||||
local term_width="${COLUMNS:-}"
|
||||
[[ -z "$term_width" ]] && term_width=$(tput cols 2> /dev/null || echo 80)
|
||||
[[ "$term_width" =~ ^[0-9]+$ ]] || term_width=80
|
||||
# Full controls
|
||||
local -a _segs=("$nav" "$space_select" "$enter" "$refresh" "$sort_ctrl" "$order_ctrl" "$exit")
|
||||
|
||||
# Level 0: Full controls
|
||||
local -a _segs=("$nav" "$space_select" "$enter" "$refresh" "$search" "$sort_ctrl" "$order_ctrl" "$exit")
|
||||
# Calculate width
|
||||
local total_len=0 seg_count=${#_segs[@]}
|
||||
for i in "${!_segs[@]}"; do
|
||||
total_len=$((total_len + $(_calc_len "${_segs[i]}")))
|
||||
[[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3))
|
||||
done
|
||||
|
||||
# Calculate width
|
||||
local total_len=0 seg_count=${#_segs[@]}
|
||||
for i in "${!_segs[@]}"; do
|
||||
total_len=$((total_len + $(_calc_len "${_segs[i]}")))
|
||||
[[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3))
|
||||
done
|
||||
# Level 1: Remove "Space Select" if too wide
|
||||
if [[ $total_len -gt $term_width ]]; then
|
||||
_segs=("$nav" "$enter" "$refresh" "$sort_ctrl" "$order_ctrl" "$exit")
|
||||
|
||||
# Level 1: Remove "Space Select"
|
||||
if [[ $total_len -gt $term_width ]]; then
|
||||
_segs=("$nav" "$enter" "$refresh" "$search" "$sort_ctrl" "$order_ctrl" "$exit")
|
||||
total_len=0
|
||||
seg_count=${#_segs[@]}
|
||||
for i in "${!_segs[@]}"; do
|
||||
total_len=$((total_len + $(_calc_len "${_segs[i]}")))
|
||||
[[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3))
|
||||
done
|
||||
|
||||
total_len=0
|
||||
seg_count=${#_segs[@]}
|
||||
for i in "${!_segs[@]}"; do
|
||||
total_len=$((total_len + $(_calc_len "${_segs[i]}")))
|
||||
[[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3))
|
||||
done
|
||||
|
||||
# Level 2: Remove "S ${sort_status}"
|
||||
if [[ $total_len -gt $term_width ]]; then
|
||||
_segs=("$nav" "$enter" "$refresh" "$search" "$order_ctrl" "$exit")
|
||||
fi
|
||||
fi
|
||||
|
||||
_print_wrapped_controls "$sep" "${_segs[@]}"
|
||||
# Level 2: Remove sort label if still too wide
|
||||
if [[ $total_len -gt $term_width ]]; then
|
||||
_segs=("$nav" "$enter" "$refresh" "$order_ctrl" "$exit")
|
||||
fi
|
||||
else
|
||||
# Without metadata: basic controls
|
||||
local -a _segs_simple=("$nav" "$space_select" "$enter" "$refresh" "$search" "$exit")
|
||||
_print_wrapped_controls "$sep" "${_segs_simple[@]}"
|
||||
fi
|
||||
|
||||
_print_wrapped_controls "$sep" "${_segs[@]}"
|
||||
else
|
||||
# Without metadata: basic controls
|
||||
local -a _segs_simple=("$nav" "$space_select" "$enter" "$refresh" "$exit")
|
||||
_print_wrapped_controls "$sep" "${_segs_simple[@]}"
|
||||
fi
|
||||
printf "${clear_line}" >&2
|
||||
}
|
||||
@@ -592,16 +473,6 @@ paginated_multi_select() {
|
||||
|
||||
case "$key" in
|
||||
"QUIT")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_mode="false"
|
||||
filter_query=""
|
||||
applied_query=""
|
||||
top_index=0
|
||||
cursor_pos=0
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
cleanup
|
||||
return 1
|
||||
;;
|
||||
@@ -759,13 +630,7 @@ paginated_multi_select() {
|
||||
fi
|
||||
;;
|
||||
"CHAR:s" | "CHAR:S")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
local ch="${key#CHAR:}"
|
||||
filter_query+="$ch"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
elif [[ "$has_metadata" == "true" ]]; then
|
||||
if [[ "$has_metadata" == "true" ]]; then
|
||||
# Cycle sort mode (only if metadata available)
|
||||
case "$sort_mode" in
|
||||
date) sort_mode="name" ;;
|
||||
@@ -776,135 +641,43 @@ paginated_multi_select() {
|
||||
need_full_redraw=true
|
||||
fi
|
||||
;;
|
||||
"FILTER")
|
||||
# / key: toggle between filter and return
|
||||
if [[ -n "$applied_query" ]]; then
|
||||
# Already filtering, clear and return to full list
|
||||
applied_query=""
|
||||
filter_query=""
|
||||
top_index=0
|
||||
cursor_pos=0
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
else
|
||||
# Enter filter mode
|
||||
filter_mode="true"
|
||||
filter_query=""
|
||||
top_index=0
|
||||
cursor_pos=0
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
fi
|
||||
;;
|
||||
"CHAR:j")
|
||||
if [[ "$filter_mode" != "true" ]]; then
|
||||
# Down navigation
|
||||
if [[ ${#view_indices[@]} -gt 0 ]]; then
|
||||
local absolute_index=$((top_index + cursor_pos))
|
||||
local last_index=$((${#view_indices[@]} - 1))
|
||||
if [[ $absolute_index -lt $last_index ]]; then
|
||||
local visible_count=$((${#view_indices[@]} - top_index))
|
||||
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
|
||||
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
|
||||
((cursor_pos++))
|
||||
elif [[ $((top_index + visible_count)) -lt ${#view_indices[@]} ]]; then
|
||||
((top_index++))
|
||||
fi
|
||||
# Down navigation (vim style)
|
||||
if [[ ${#view_indices[@]} -gt 0 ]]; then
|
||||
local absolute_index=$((top_index + cursor_pos))
|
||||
local last_index=$((${#view_indices[@]} - 1))
|
||||
if [[ $absolute_index -lt $last_index ]]; then
|
||||
local visible_count=$((${#view_indices[@]} - top_index))
|
||||
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
|
||||
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
|
||||
((cursor_pos++))
|
||||
elif [[ $((top_index + visible_count)) -lt ${#view_indices[@]} ]]; then
|
||||
((top_index++))
|
||||
fi
|
||||
need_full_redraw=true
|
||||
fi
|
||||
else
|
||||
filter_query+="j"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
"CHAR:k")
|
||||
if [[ "$filter_mode" != "true" ]]; then
|
||||
# Up navigation
|
||||
if [[ ${#view_indices[@]} -gt 0 ]]; then
|
||||
if [[ $cursor_pos -gt 0 ]]; then
|
||||
((cursor_pos--))
|
||||
elif [[ $top_index -gt 0 ]]; then
|
||||
((top_index--))
|
||||
fi
|
||||
# Up navigation (vim style)
|
||||
if [[ ${#view_indices[@]} -gt 0 ]]; then
|
||||
if [[ $cursor_pos -gt 0 ]]; then
|
||||
((cursor_pos--))
|
||||
need_full_redraw=true
|
||||
elif [[ $top_index -gt 0 ]]; then
|
||||
((top_index--))
|
||||
need_full_redraw=true
|
||||
fi
|
||||
else
|
||||
filter_query+="k"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
"TOUCHID")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_query+="t"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
"RIGHT")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_query+="l"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
"LEFT")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_query+="h"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
"MORE")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_query+="m"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
"UPDATE")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_query+="u"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
"CHAR:f" | "CHAR:F")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_query+="${key#CHAR:}"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
# F is currently unbound in normal mode to avoid conflict with Refresh (R)
|
||||
;;
|
||||
"CHAR:r" | "CHAR:R")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_query+="${key#CHAR:}"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
else
|
||||
# Trigger Refresh signal (Unified with Analyze)
|
||||
cleanup
|
||||
return 10
|
||||
fi
|
||||
# Trigger Refresh signal
|
||||
cleanup
|
||||
return 10
|
||||
;;
|
||||
"CHAR:o" | "CHAR:O")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
filter_query+="${key#CHAR:}"
|
||||
rebuild_view
|
||||
need_full_redraw=true
|
||||
continue
|
||||
elif [[ "$has_metadata" == "true" ]]; then
|
||||
# O toggles reverse order (Unified Sort Order)
|
||||
if [[ "$has_metadata" == "true" ]]; then
|
||||
# O toggles reverse order
|
||||
if [[ "$sort_reverse" == "true" ]]; then
|
||||
sort_reverse="false"
|
||||
else
|
||||
@@ -914,40 +687,8 @@ paginated_multi_select() {
|
||||
need_full_redraw=true
|
||||
fi
|
||||
;;
|
||||
"DELETE")
|
||||
# Backspace filter
|
||||
if [[ "$filter_mode" == "true" && -n "$filter_query" ]]; then
|
||||
filter_query="${filter_query%?}"
|
||||
# Rebuild view to apply filter in real-time
|
||||
rebuild_view
|
||||
# Trigger redraw and continue to avoid drain_pending_input
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
;;
|
||||
CHAR:*)
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
local ch="${key#CHAR:}"
|
||||
# avoid accidental leading spaces
|
||||
if [[ -n "$filter_query" || "$ch" != " " ]]; then
|
||||
filter_query+="$ch"
|
||||
# Rebuild view to apply filter in real-time
|
||||
rebuild_view
|
||||
# Trigger redraw and continue to avoid drain_pending_input
|
||||
need_full_redraw=true
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
"ENTER")
|
||||
if [[ "$filter_mode" == "true" ]]; then
|
||||
applied_query="$filter_query"
|
||||
filter_mode="false"
|
||||
# Preserve cursor/top_index so navigation during search is respected
|
||||
rebuild_view
|
||||
# Fall through to confirmation logic
|
||||
fi
|
||||
# In normal mode: smart Enter behavior
|
||||
# Smart Enter behavior
|
||||
# 1. Check if any items are already selected
|
||||
local has_selection=false
|
||||
for ((i = 0; i < total_items; i++)); do
|
||||
|
||||
@@ -11,24 +11,31 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
|
||||
# Batch uninstall with a single confirmation.
|
||||
|
||||
# User data detection patterns (prompt user to backup if found).
|
||||
readonly SENSITIVE_DATA_PATTERNS=(
|
||||
"\.warp" # Warp terminal configs/themes
|
||||
"/\.config/" # Standard Unix config directory
|
||||
"/themes/" # Theme customizations
|
||||
"/settings/" # Settings directories
|
||||
"/Application Support/[^/]+/User Data" # Chrome/Electron user data
|
||||
"/Preferences/[^/]+\.plist" # User preference files
|
||||
"/Documents/" # User documents
|
||||
"/\.ssh/" # SSH keys and configs (critical)
|
||||
"/\.gnupg/" # GPG keys (critical)
|
||||
)
|
||||
# High-performance sensitive data detection (pure Bash, no subprocess)
|
||||
# Faster than grep for batch operations, especially when processing many apps
|
||||
has_sensitive_data() {
|
||||
local files="$1"
|
||||
[[ -z "$files" ]] && return 1
|
||||
|
||||
# Join patterns into a single regex for grep.
|
||||
SENSITIVE_DATA_REGEX=$(
|
||||
IFS='|'
|
||||
echo "${SENSITIVE_DATA_PATTERNS[*]}"
|
||||
)
|
||||
while IFS= read -r file; do
|
||||
[[ -z "$file" ]] && continue
|
||||
|
||||
# Use Bash native pattern matching (faster than spawning grep)
|
||||
case "$file" in
|
||||
*/.warp* | */.config/* | */themes/* | */settings/* | */User\ Data/* | \
|
||||
*/.ssh/* | */.gnupg/* | */Documents/* | */Preferences/*.plist | \
|
||||
*/Desktop/* | */Downloads/* | */Movies/* | */Music/* | */Pictures/* | \
|
||||
*/.password* | */.token* | */.auth* | */keychain* | \
|
||||
*/Passwords/* | */Accounts/* | */Cookies/* | \
|
||||
*/.aws/* | */.docker/config.json | */.kube/* | \
|
||||
*/credentials/* | */secrets/*)
|
||||
return 0 # Found sensitive data
|
||||
;;
|
||||
esac
|
||||
done <<< "$files"
|
||||
|
||||
return 1 # Not found
|
||||
}
|
||||
|
||||
# Decode and validate base64 file list (safe for set -e).
|
||||
decode_file_list() {
|
||||
@@ -65,12 +72,20 @@ decode_file_list() {
|
||||
# Note: find_app_files() and calculate_total_size() are in lib/core/common.sh.
|
||||
|
||||
# Stop Launch Agents/Daemons for an app.
|
||||
# Security: bundle_id is validated to be reverse-DNS format before use in find patterns
|
||||
stop_launch_services() {
|
||||
local bundle_id="$1"
|
||||
local has_system_files="${2:-false}"
|
||||
|
||||
[[ -z "$bundle_id" || "$bundle_id" == "unknown" ]] && return 0
|
||||
|
||||
# Validate bundle_id format: must be reverse-DNS style (e.g., com.example.app)
|
||||
# This prevents glob injection attacks if bundle_id contains special characters
|
||||
if [[ ! "$bundle_id" =~ ^[a-zA-Z0-9][-a-zA-Z0-9]*(\.[a-zA-Z0-9][-a-zA-Z0-9]*)+$ ]]; then
|
||||
debug_log "Invalid bundle_id format for LaunchAgent search: $bundle_id"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -d ~/Library/LaunchAgents ]]; then
|
||||
while IFS= read -r -d '' plist; do
|
||||
launchctl unload "$plist" 2> /dev/null || true
|
||||
@@ -128,6 +143,7 @@ remove_login_item() {
|
||||
}
|
||||
|
||||
# Remove files (handles symlinks, optional sudo).
|
||||
# Security: All paths pass validate_path_for_deletion() before any deletion.
|
||||
remove_file_list() {
|
||||
local file_list="$1"
|
||||
local use_sudo="${2:-false}"
|
||||
@@ -140,6 +156,12 @@ remove_file_list() {
|
||||
continue
|
||||
fi
|
||||
|
||||
# Symlinks are handled separately using rm (not safe_remove/safe_sudo_remove)
|
||||
# because safe_sudo_remove() refuses symlinks entirely as a TOCTOU protection.
|
||||
# This is safe because:
|
||||
# 1. The path has already passed validate_path_for_deletion() above
|
||||
# 2. rm on a symlink only removes the link itself, NOT the target
|
||||
# 3. The symlink deletion is logged via operations.log
|
||||
if [[ -L "$file" ]]; then
|
||||
if [[ "$use_sudo" == "true" ]]; then
|
||||
sudo rm "$file" 2> /dev/null && ((++count)) || true
|
||||
@@ -168,39 +190,69 @@ batch_uninstall_applications() {
|
||||
return 0
|
||||
fi
|
||||
|
||||
local old_trap_int old_trap_term
|
||||
old_trap_int=$(trap -p INT)
|
||||
old_trap_term=$(trap -p TERM)
|
||||
|
||||
_restore_uninstall_traps() {
|
||||
if [[ -n "$old_trap_int" ]]; then
|
||||
eval "$old_trap_int"
|
||||
else
|
||||
trap - INT
|
||||
fi
|
||||
if [[ -n "$old_trap_term" ]]; then
|
||||
eval "$old_trap_term"
|
||||
else
|
||||
trap - TERM
|
||||
fi
|
||||
}
|
||||
|
||||
# Trap to clean up spinner and uninstall mode on interrupt
|
||||
trap 'stop_inline_spinner 2>/dev/null; unset MOLE_UNINSTALL_MODE; echo ""; _restore_uninstall_traps; return 130' INT TERM
|
||||
|
||||
# Pre-scan: running apps, sudo needs, size.
|
||||
local -a running_apps=()
|
||||
local -a sudo_apps=()
|
||||
local total_estimated_size=0
|
||||
local -a app_details=()
|
||||
|
||||
# Cache current user outside loop
|
||||
local current_user=$(whoami)
|
||||
|
||||
if [[ -t 1 ]]; then start_inline_spinner "Scanning files..."; fi
|
||||
for selected_app in "${selected_apps[@]}"; do
|
||||
[[ -z "$selected_app" ]] && continue
|
||||
IFS='|' read -r _ app_path app_name bundle_id _ _ <<< "$selected_app"
|
||||
|
||||
# Check running app by bundle executable if available.
|
||||
# Check running app by bundle executable if available
|
||||
local exec_name=""
|
||||
if [[ -e "$app_path/Contents/Info.plist" ]]; then
|
||||
exec_name=$(defaults read "$app_path/Contents/Info.plist" CFBundleExecutable 2> /dev/null || echo "")
|
||||
local info_plist="$app_path/Contents/Info.plist"
|
||||
if [[ -e "$info_plist" ]]; then
|
||||
exec_name=$(defaults read "$info_plist" CFBundleExecutable 2> /dev/null || echo "")
|
||||
fi
|
||||
local check_pattern="${exec_name:-$app_name}"
|
||||
if pgrep -x "$check_pattern" > /dev/null 2>&1; then
|
||||
if pgrep -qx "${exec_name:-$app_name}" 2> /dev/null; then
|
||||
running_apps+=("$app_name")
|
||||
fi
|
||||
|
||||
# Check if it's a Homebrew cask (deterministic: resolved path in Caskroom)
|
||||
local cask_name=""
|
||||
cask_name=$(get_brew_cask_name "$app_path" || echo "")
|
||||
local is_brew_cask="false"
|
||||
[[ -n "$cask_name" ]] && is_brew_cask="true"
|
||||
local cask_name="" is_brew_cask="false"
|
||||
local resolved_path=$(readlink "$app_path" 2> /dev/null || echo "")
|
||||
if [[ "$resolved_path" == */Caskroom/* ]]; then
|
||||
# Extract cask name using bash parameter expansion (faster than sed)
|
||||
local tmp="${resolved_path#*/Caskroom/}"
|
||||
cask_name="${tmp%%/*}"
|
||||
[[ -n "$cask_name" ]] && is_brew_cask="true"
|
||||
elif command -v get_brew_cask_name > /dev/null 2>&1; then
|
||||
local detected_cask
|
||||
detected_cask=$(get_brew_cask_name "$app_path" 2> /dev/null || true)
|
||||
if [[ -n "$detected_cask" ]]; then
|
||||
cask_name="$detected_cask"
|
||||
is_brew_cask="true"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Full file scanning for ALL apps (including Homebrew casks)
|
||||
# brew uninstall --cask does NOT remove user data (caches, prefs, app support)
|
||||
# Mole's value is cleaning those up, so we must scan for them
|
||||
# Check if sudo is needed
|
||||
local needs_sudo=false
|
||||
local app_owner=$(get_file_owner "$app_path")
|
||||
local current_user=$(whoami)
|
||||
if [[ ! -w "$(dirname "$app_path")" ]] ||
|
||||
[[ "$app_owner" == "root" ]] ||
|
||||
[[ -n "$app_owner" && "$app_owner" != "$current_user" ]]; then
|
||||
@@ -230,7 +282,7 @@ batch_uninstall_applications() {
|
||||
|
||||
# Check for sensitive user data once.
|
||||
local has_sensitive_data="false"
|
||||
if [[ -n "$related_files" ]] && echo "$related_files" | grep -qE "$SENSITIVE_DATA_REGEX"; then
|
||||
if has_sensitive_data "$related_files"; then
|
||||
has_sensitive_data="true"
|
||||
fi
|
||||
|
||||
@@ -260,7 +312,7 @@ batch_uninstall_applications() {
|
||||
done
|
||||
|
||||
if [[ "$has_user_data" == "true" ]]; then
|
||||
echo -e "${YELLOW}${ICON_WARNING}${NC} ${YELLOW}Note: Some apps contain user configurations/themes${NC}"
|
||||
echo -e "${GRAY}${ICON_WARNING}${NC} ${YELLOW}Note: Some apps contain user configurations/themes${NC}"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
@@ -270,7 +322,7 @@ batch_uninstall_applications() {
|
||||
|
||||
local brew_tag=""
|
||||
[[ "$is_brew_cask" == "true" ]] && brew_tag=" ${CYAN}[Brew]${NC}"
|
||||
echo -e "${BLUE}${ICON_CONFIRM}${NC} ${app_name}${brew_tag} ${GRAY}(${app_size_display})${NC}"
|
||||
echo -e "${BLUE}${ICON_CONFIRM}${NC} ${app_name}${brew_tag} ${GRAY}, ${app_size_display}${NC}"
|
||||
|
||||
# Show detailed file list for ALL apps (brew casks leave user data behind)
|
||||
local related_files=$(decode_file_list "$encoded_files" "$app_name")
|
||||
@@ -295,7 +347,7 @@ batch_uninstall_applications() {
|
||||
while IFS= read -r file; do
|
||||
if [[ -n "$file" && -e "$file" ]]; then
|
||||
if [[ $sys_file_count -lt $max_files ]]; then
|
||||
echo -e " ${BLUE}${ICON_SOLID}${NC} System: $file"
|
||||
echo -e " ${BLUE}${ICON_WARNING}${NC} System: $file"
|
||||
fi
|
||||
((sys_file_count++))
|
||||
fi
|
||||
@@ -315,7 +367,7 @@ batch_uninstall_applications() {
|
||||
|
||||
echo ""
|
||||
local removal_note="Remove ${app_total} ${app_text}"
|
||||
[[ -n "$size_display" ]] && removal_note+=" (${size_display})"
|
||||
[[ -n "$size_display" ]] && removal_note+=", ${size_display}"
|
||||
if [[ ${#running_apps[@]} -gt 0 ]]; then
|
||||
removal_note+=" ${YELLOW}[Running]${NC}"
|
||||
fi
|
||||
@@ -328,6 +380,7 @@ batch_uninstall_applications() {
|
||||
$'\e' | q | Q)
|
||||
echo ""
|
||||
echo ""
|
||||
_restore_uninstall_traps
|
||||
return 0
|
||||
;;
|
||||
"" | $'\n' | $'\r' | y | Y)
|
||||
@@ -336,16 +389,22 @@ batch_uninstall_applications() {
|
||||
*)
|
||||
echo ""
|
||||
echo ""
|
||||
_restore_uninstall_traps
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
# Enable uninstall mode - allows deletion of data-protected apps (VPNs, dev tools, etc.)
|
||||
# that user explicitly chose to uninstall. System-critical components remain protected.
|
||||
export MOLE_UNINSTALL_MODE=1
|
||||
|
||||
# Request sudo if needed.
|
||||
if [[ ${#sudo_apps[@]} -gt 0 ]]; then
|
||||
if ! sudo -n true 2> /dev/null; then
|
||||
if ! request_sudo_access "Admin required for system apps: ${sudo_apps[*]}"; then
|
||||
echo ""
|
||||
log_error "Admin access denied"
|
||||
_restore_uninstall_traps
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
@@ -399,34 +458,44 @@ batch_uninstall_applications() {
|
||||
fi
|
||||
|
||||
# Remove the application only if not running.
|
||||
# Stop spinner before any removal attempt (avoids mixed output on errors)
|
||||
[[ -t 1 ]] && stop_inline_spinner
|
||||
|
||||
local used_brew_successfully=false
|
||||
if [[ -z "$reason" ]]; then
|
||||
if [[ "$is_brew_cask" == "true" && -n "$cask_name" ]]; then
|
||||
# Stop spinner before brew output
|
||||
[[ -t 1 ]] && stop_inline_spinner
|
||||
# Use brew_uninstall_cask helper (handles env vars, timeout, verification)
|
||||
if brew_uninstall_cask "$cask_name" "$app_path"; then
|
||||
used_brew_successfully=true
|
||||
else
|
||||
# Fallback to manual removal if brew fails
|
||||
if [[ "$needs_sudo" == true ]]; then
|
||||
safe_sudo_remove "$app_path" || reason="remove failed"
|
||||
if ! safe_sudo_remove "$app_path"; then
|
||||
reason="brew failed, manual removal failed"
|
||||
fi
|
||||
else
|
||||
safe_remove "$app_path" true || reason="remove failed"
|
||||
if ! safe_remove "$app_path" true; then
|
||||
reason="brew failed, manual removal failed"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
elif [[ "$needs_sudo" == true ]]; then
|
||||
if ! safe_sudo_remove "$app_path"; then
|
||||
local app_owner=$(get_file_owner "$app_path")
|
||||
local current_user=$(whoami)
|
||||
if [[ -n "$app_owner" && "$app_owner" != "$current_user" && "$app_owner" != "root" ]]; then
|
||||
reason="owned by $app_owner"
|
||||
reason="owned by $app_owner, try 'sudo chown $(whoami) \"$app_path\"'"
|
||||
else
|
||||
reason="permission denied"
|
||||
reason="permission denied, try 'mole touchid' for passwordless sudo"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
safe_remove "$app_path" true || reason="remove failed"
|
||||
if ! safe_remove "$app_path" true; then
|
||||
if [[ ! -w "$(dirname "$app_path")" ]]; then
|
||||
reason="parent directory not writable"
|
||||
else
|
||||
reason="remove failed, check permissions"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -448,18 +517,23 @@ batch_uninstall_applications() {
|
||||
fi
|
||||
|
||||
# ByHost preferences (machine-specific).
|
||||
if [[ -d ~/Library/Preferences/ByHost ]]; then
|
||||
find ~/Library/Preferences/ByHost -maxdepth 1 -name "${bundle_id}.*.plist" -delete 2> /dev/null || true
|
||||
if [[ -d "$HOME/Library/Preferences/ByHost" ]]; then
|
||||
if [[ "$bundle_id" =~ ^[A-Za-z0-9._-]+$ ]]; then
|
||||
while IFS= read -r -d '' plist_file; do
|
||||
safe_remove "$plist_file" true > /dev/null || true
|
||||
done < <(command find "$HOME/Library/Preferences/ByHost" -maxdepth 1 -type f -name "${bundle_id}.*.plist" -print0 2> /dev/null || true)
|
||||
else
|
||||
debug_log "Skipping ByHost cleanup, invalid bundle id: $bundle_id"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Stop spinner and show success
|
||||
# Show success
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
if [[ ${#app_details[@]} -gt 1 ]]; then
|
||||
echo -e "\r\033[K${GREEN}✓${NC} [$current_index/${#app_details[@]}] ${app_name}"
|
||||
echo -e "${GREEN}✓${NC} [$current_index/${#app_details[@]}] ${app_name}"
|
||||
else
|
||||
echo -e "\r\033[K${GREEN}✓${NC} ${app_name}"
|
||||
echo -e "${GREEN}✓${NC} ${app_name}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -470,13 +544,12 @@ batch_uninstall_applications() {
|
||||
((total_items++))
|
||||
success_items+=("$app_name")
|
||||
else
|
||||
# Stop spinner and show failure
|
||||
# Show failure
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
if [[ ${#app_details[@]} -gt 1 ]]; then
|
||||
echo -e "\r\033[K${RED}✗${NC} [$current_index/${#app_details[@]}] ${app_name} ${GRAY}($reason)${NC}"
|
||||
echo -e "${ICON_ERROR} [$current_index/${#app_details[@]}] ${app_name} ${GRAY}, $reason${NC}"
|
||||
else
|
||||
echo -e "\r\033[K${RED}✗${NC} ${app_name} failed: $reason"
|
||||
echo -e "${ICON_ERROR} ${app_name} failed: $reason"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -550,7 +623,7 @@ batch_uninstall_applications() {
|
||||
still*running*) reason_summary="is still running" ;;
|
||||
remove*failed*) reason_summary="could not be removed" ;;
|
||||
permission*denied*) reason_summary="permission denied" ;;
|
||||
owned*by*) reason_summary="$first_reason (try with sudo)" ;;
|
||||
owned*by*) reason_summary="$first_reason, try with sudo" ;;
|
||||
*) reason_summary="$first_reason" ;;
|
||||
esac
|
||||
fi
|
||||
@@ -617,12 +690,18 @@ batch_uninstall_applications() {
|
||||
sudo_keepalive_pid=""
|
||||
fi
|
||||
|
||||
# Disable uninstall mode
|
||||
unset MOLE_UNINSTALL_MODE
|
||||
|
||||
# Invalidate cache if any apps were successfully uninstalled.
|
||||
if [[ $success_count -gt 0 ]]; then
|
||||
local cache_file="$HOME/.cache/mole/app_scan_cache"
|
||||
rm -f "$cache_file" 2> /dev/null || true
|
||||
fi
|
||||
|
||||
_restore_uninstall_traps
|
||||
unset -f _restore_uninstall_traps
|
||||
|
||||
((total_size_cleaned += total_size_freed))
|
||||
unset failed_items
|
||||
}
|
||||
|
||||
@@ -173,28 +173,45 @@ brew_uninstall_cask() {
|
||||
|
||||
debug_log "Attempting brew uninstall --cask $cask_name"
|
||||
|
||||
# Run uninstall with timeout (suppress hints/auto-update)
|
||||
debug_log "Attempting brew uninstall --cask $cask_name"
|
||||
|
||||
# Ensure we have sudo access if needed, to prevent brew from hanging on password prompt
|
||||
# Many brew casks need sudo to uninstall
|
||||
if ! sudo -n true 2> /dev/null; then
|
||||
# If we don't have sudo, try to get it (visibly)
|
||||
sudo -v
|
||||
if [[ "${NONINTERACTIVE:-}" != "1" && -t 0 && -t 1 ]]; then
|
||||
if ! sudo -n true 2> /dev/null; then
|
||||
sudo -v
|
||||
fi
|
||||
fi
|
||||
|
||||
local uninstall_ok=false
|
||||
local brew_exit=0
|
||||
|
||||
# Run directly without output capture to allow user interaction/visibility
|
||||
# This avoids silence/hangs when brew asks for passwords or confirmation
|
||||
if HOMEBREW_NO_ENV_HINTS=1 HOMEBREW_NO_AUTO_UPDATE=1 NONINTERACTIVE=1 \
|
||||
brew uninstall --cask "$cask_name"; then
|
||||
uninstall_ok=true
|
||||
else
|
||||
debug_log "brew uninstall failed with exit code $?"
|
||||
# Calculate timeout based on app size (large apps need more time)
|
||||
local timeout=300 # Default 5 minutes
|
||||
if [[ -n "$app_path" && -d "$app_path" ]]; then
|
||||
local size_gb=$(($(get_path_size_kb "$app_path") / 1048576))
|
||||
if [[ $size_gb -gt 15 ]]; then
|
||||
timeout=900 # 15 minutes for very large apps (Xcode, Adobe, etc.)
|
||||
elif [[ $size_gb -gt 5 ]]; then
|
||||
timeout=600 # 10 minutes for large apps
|
||||
fi
|
||||
debug_log "App size: ${size_gb}GB, timeout: ${timeout}s"
|
||||
fi
|
||||
|
||||
# Verify removal
|
||||
# Run with timeout to prevent hangs from problematic cask scripts
|
||||
local brew_exit=0
|
||||
if HOMEBREW_NO_ENV_HINTS=1 HOMEBREW_NO_AUTO_UPDATE=1 NONINTERACTIVE=1 \
|
||||
run_with_timeout "$timeout" brew uninstall --cask "$cask_name" 2>&1; then
|
||||
uninstall_ok=true
|
||||
else
|
||||
brew_exit=$?
|
||||
debug_log "brew uninstall timeout or failed with exit code: $brew_exit"
|
||||
# Exit code 124 indicates timeout from run_with_timeout
|
||||
# On timeout, fail immediately without verification to avoid inconsistent state
|
||||
if [[ $brew_exit -eq 124 ]]; then
|
||||
debug_log "brew uninstall timed out after ${timeout}s, returning failure"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Verify removal (only if not timed out)
|
||||
local cask_gone=true app_gone=true
|
||||
HOMEBREW_NO_ENV_HINTS=1 brew list --cask 2> /dev/null | grep -qxF "$cask_name" && cask_gone=false
|
||||
[[ -n "$app_path" && -e "$app_path" ]] && app_gone=false
|
||||
|
||||
70
mole
70
mole
@@ -13,7 +13,7 @@ source "$SCRIPT_DIR/lib/core/commands.sh"
|
||||
trap cleanup_temp_files EXIT INT TERM
|
||||
|
||||
# Version and update helpers
|
||||
VERSION="1.21.0"
|
||||
VERSION="1.23.2"
|
||||
MOLE_TAGLINE="Deep clean and optimize your Mac."
|
||||
|
||||
is_touchid_configured() {
|
||||
@@ -223,6 +223,7 @@ show_help() {
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo optimize --dry-run" "$NC" "Preview optimization"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo optimize --whitelist" "$NC" "Manage protected items"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo purge --paths" "$NC" "Configure scan directories"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "mo update --force" "$NC" "Force reinstall latest version"
|
||||
echo
|
||||
printf "%s%s%s\n" "$BLUE" "OPTIONS" "$NC"
|
||||
printf " %s%-28s%s %s\n" "$GREEN" "--debug" "$NC" "Show detailed operation logs"
|
||||
@@ -231,6 +232,7 @@ show_help() {
|
||||
|
||||
# Update flow (Homebrew or installer).
|
||||
update_mole() {
|
||||
local force_update="${1:-false}"
|
||||
local update_interrupted=false
|
||||
trap 'update_interrupted=true; echo ""; exit 130' INT TERM
|
||||
|
||||
@@ -245,14 +247,14 @@ update_mole() {
|
||||
|
||||
if [[ -z "$latest" ]]; then
|
||||
log_error "Unable to check for updates. Check network connection."
|
||||
echo -e "${YELLOW}Tip:${NC} Check if you can access GitHub (https://github.com)"
|
||||
echo -e "${YELLOW}Tip:${NC} Check if you can access GitHub, https://github.com"
|
||||
echo -e "${YELLOW}Tip:${NC} Try again with: ${GRAY}mo update${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$VERSION" == "$latest" ]]; then
|
||||
if [[ "$VERSION" == "$latest" && "$force_update" != "true" ]]; then
|
||||
echo ""
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version (${VERSION})"
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version, ${VERSION}"
|
||||
echo ""
|
||||
exit 0
|
||||
fi
|
||||
@@ -276,7 +278,7 @@ update_mole() {
|
||||
local curl_exit=$?
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
rm -f "$tmp_installer"
|
||||
log_error "Update failed (curl error: $curl_exit)"
|
||||
log_error "Update failed, curl error: $curl_exit"
|
||||
|
||||
case $curl_exit in
|
||||
6) echo -e "${YELLOW}Tip:${NC} Could not resolve host. Check DNS or network connection." ;;
|
||||
@@ -292,7 +294,7 @@ update_mole() {
|
||||
download_error=$(wget --timeout=10 --tries=3 -qO "$tmp_installer" "$installer_url" 2>&1) || {
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
rm -f "$tmp_installer"
|
||||
log_error "Update failed (wget error)"
|
||||
log_error "Update failed, wget error"
|
||||
echo -e "${YELLOW}Tip:${NC} Check network connection and try again."
|
||||
echo -e "${YELLOW}Tip:${NC} URL: $installer_url"
|
||||
exit 1
|
||||
@@ -322,7 +324,7 @@ update_mole() {
|
||||
|
||||
if [[ "$requires_sudo" == "true" ]]; then
|
||||
if ! request_sudo_access "Mole update requires admin access"; then
|
||||
log_error "Update aborted (admin access denied)"
|
||||
log_error "Update aborted, admin access denied"
|
||||
rm -f "$tmp_installer"
|
||||
exit 1
|
||||
fi
|
||||
@@ -336,6 +338,7 @@ update_mole() {
|
||||
|
||||
process_install_output() {
|
||||
local output="$1"
|
||||
local fallback_version="$2"
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
local filtered_output
|
||||
@@ -346,8 +349,17 @@ update_mole() {
|
||||
|
||||
if ! printf '%s\n' "$output" | grep -Eq "Updated to latest version|Already on latest version"; then
|
||||
local new_version
|
||||
new_version=$("$mole_path" --version 2> /dev/null | awk 'NR==1 && NF {print $NF}' || echo "")
|
||||
printf '\n%s\n\n' "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version (${new_version:-unknown})"
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*-> \([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*version[[:space:]]\{1,\}\([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$("$mole_path" --version 2> /dev/null | awk 'NR==1 && NF {print $NF}' || echo "")
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version="$fallback_version"
|
||||
fi
|
||||
printf '\n%s\n\n' "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version, ${new_version:-unknown}"
|
||||
else
|
||||
printf '\n'
|
||||
fi
|
||||
@@ -359,11 +371,10 @@ update_mole() {
|
||||
if [[ ! -f "$config_dir/lib/core/common.sh" ]]; then
|
||||
config_dir="$HOME/.config/mole"
|
||||
fi
|
||||
if install_output=$(MOLE_VERSION="$update_tag" "$tmp_installer" --prefix "$install_dir" --config "$config_dir" --update 2>&1); then
|
||||
process_install_output "$install_output"
|
||||
else
|
||||
|
||||
if [[ "$force_update" == "true" ]]; then
|
||||
if install_output=$(MOLE_VERSION="$update_tag" "$tmp_installer" --prefix "$install_dir" --config "$config_dir" 2>&1); then
|
||||
process_install_output "$install_output"
|
||||
process_install_output "$install_output" "$latest"
|
||||
else
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
rm -f "$tmp_installer"
|
||||
@@ -371,6 +382,20 @@ update_mole() {
|
||||
echo "$install_output" | tail -10 >&2 # Show last 10 lines of error
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if install_output=$(MOLE_VERSION="$update_tag" "$tmp_installer" --prefix "$install_dir" --config "$config_dir" --update 2>&1); then
|
||||
process_install_output "$install_output" "$latest"
|
||||
else
|
||||
if install_output=$(MOLE_VERSION="$update_tag" "$tmp_installer" --prefix "$install_dir" --config "$config_dir" 2>&1); then
|
||||
process_install_output "$install_output" "$latest"
|
||||
else
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
rm -f "$tmp_installer"
|
||||
log_error "Update failed"
|
||||
echo "$install_output" | tail -10 >&2 # Show last 10 lines of error
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
rm -f "$tmp_installer"
|
||||
@@ -462,15 +487,15 @@ remove_mole() {
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}Remove Mole${NC} - will delete the following:"
|
||||
echo -e "${YELLOW}Remove Mole${NC}, will delete the following:"
|
||||
if [[ "$is_homebrew" == "true" ]]; then
|
||||
echo " - Mole via Homebrew"
|
||||
echo " * Mole via Homebrew"
|
||||
fi
|
||||
for install in ${manual_installs[@]+"${manual_installs[@]}"} ${alias_installs[@]+"${alias_installs[@]}"}; do
|
||||
echo " - $install"
|
||||
echo " * $install"
|
||||
done
|
||||
echo " - ~/.config/mole"
|
||||
echo " - ~/.cache/mole"
|
||||
echo " * ~/.config/mole"
|
||||
echo " * ~/.cache/mole"
|
||||
echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to confirm, ${GRAY}ESC${NC} to cancel: "
|
||||
|
||||
IFS= read -r -s -n1 key || key=""
|
||||
@@ -759,7 +784,14 @@ main() {
|
||||
exec "$SCRIPT_DIR/bin/completion.sh" "${args[@]:1}"
|
||||
;;
|
||||
"update")
|
||||
update_mole
|
||||
local force_update=false
|
||||
for arg in "${args[@]:1}"; do
|
||||
case "$arg" in
|
||||
--force | -f) force_update=true ;;
|
||||
*) ;;
|
||||
esac
|
||||
done
|
||||
update_mole "$force_update"
|
||||
exit 0
|
||||
;;
|
||||
"remove")
|
||||
|
||||
@@ -14,7 +14,7 @@ usage() {
|
||||
Usage: ./scripts/check.sh [--format|--no-format]
|
||||
|
||||
Options:
|
||||
--format Apply formatting fixes only (shfmt, gofmt)
|
||||
--format Apply formatting fixes only, shfmt, gofmt
|
||||
--no-format Skip formatting and run checks only
|
||||
--help Show this help
|
||||
EOF
|
||||
@@ -55,7 +55,7 @@ readonly ICON_ERROR="☻"
|
||||
readonly ICON_WARNING="●"
|
||||
readonly ICON_LIST="•"
|
||||
|
||||
echo -e "${BLUE}=== Mole Check (${MODE}) ===${NC}\n"
|
||||
echo -e "${BLUE}=== Mole Check, ${MODE} ===${NC}\n"
|
||||
|
||||
SHELL_FILES=$(find . -type f \( -name "*.sh" -o -name "mole" \) \
|
||||
-not -path "./.git/*" \
|
||||
@@ -75,11 +75,11 @@ if [[ "$MODE" == "format" ]]; then
|
||||
fi
|
||||
|
||||
if command -v goimports > /dev/null 2>&1; then
|
||||
echo -e "${YELLOW}Formatting Go code (goimports)...${NC}"
|
||||
echo -e "${YELLOW}Formatting Go code, goimports...${NC}"
|
||||
goimports -w -local github.com/tw93/Mole ./cmd
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Go formatting complete${NC}\n"
|
||||
elif command -v go > /dev/null 2>&1; then
|
||||
echo -e "${YELLOW}Formatting Go code (gofmt)...${NC}"
|
||||
echo -e "${YELLOW}Formatting Go code, gofmt...${NC}"
|
||||
gofmt -w ./cmd
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Go formatting complete${NC}\n"
|
||||
else
|
||||
@@ -100,11 +100,11 @@ if [[ "$MODE" != "check" ]]; then
|
||||
fi
|
||||
|
||||
if command -v goimports > /dev/null 2>&1; then
|
||||
echo -e "${YELLOW}2. Formatting Go code (goimports)...${NC}"
|
||||
echo -e "${YELLOW}2. Formatting Go code, goimports...${NC}"
|
||||
goimports -w -local github.com/tw93/Mole ./cmd
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Go formatting applied${NC}\n"
|
||||
elif command -v go > /dev/null 2>&1; then
|
||||
echo -e "${YELLOW}2. Formatting Go code (gofmt)...${NC}"
|
||||
echo -e "${YELLOW}2. Formatting Go code, gofmt...${NC}"
|
||||
gofmt -w ./cmd
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Go formatting applied${NC}\n"
|
||||
fi
|
||||
@@ -148,18 +148,18 @@ fi
|
||||
|
||||
echo -e "${YELLOW}5. Running syntax check...${NC}"
|
||||
if ! bash -n mole; then
|
||||
echo -e "${RED}${ICON_ERROR} Syntax check failed (mole)${NC}\n"
|
||||
echo -e "${RED}${ICON_ERROR} Syntax check failed, mole${NC}\n"
|
||||
exit 1
|
||||
fi
|
||||
for script in bin/*.sh; do
|
||||
if ! bash -n "$script"; then
|
||||
echo -e "${RED}${ICON_ERROR} Syntax check failed ($script)${NC}\n"
|
||||
echo -e "${RED}${ICON_ERROR} Syntax check failed, $script${NC}\n"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
find lib -name "*.sh" | while read -r script; do
|
||||
if ! bash -n "$script"; then
|
||||
echo -e "${RED}${ICON_ERROR} Syntax check failed ($script)${NC}\n"
|
||||
echo -e "${RED}${ICON_ERROR} Syntax check failed, $script${NC}\n"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -392,7 +392,7 @@ ${command}
|
||||
</dict>
|
||||
</plist>
|
||||
EOF
|
||||
log_success "Workflow ready: ${name} (keyword: ${keyword})"
|
||||
log_success "Workflow ready: ${name}, keyword: ${keyword}"
|
||||
done
|
||||
|
||||
log_step "Open Alfred preferences → Workflows if you need to adjust keywords."
|
||||
@@ -413,11 +413,11 @@ main() {
|
||||
|
||||
echo ""
|
||||
log_success "Done! Raycast and Alfred are ready with 5 commands:"
|
||||
echo " • clean - Deep system cleanup"
|
||||
echo " • uninstall - Remove applications"
|
||||
echo " • optimize - System health & tuning"
|
||||
echo " • analyze - Disk space explorer"
|
||||
echo " • status - Live system monitor"
|
||||
echo " • clean, Deep system cleanup"
|
||||
echo " • uninstall, Remove applications"
|
||||
echo " • optimize, System health & tuning"
|
||||
echo " • analyze, Disk space explorer"
|
||||
echo " • status, Live system monitor"
|
||||
echo ""
|
||||
}
|
||||
|
||||
|
||||
@@ -183,7 +183,7 @@ echo ""
|
||||
echo "6. Testing installation..."
|
||||
# Skip if Homebrew mole is installed (install.sh will refuse to overwrite)
|
||||
if brew list mole &> /dev/null; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Installation test skipped (Homebrew)${NC}\n"
|
||||
printf "${GREEN}${ICON_SUCCESS} Installation test skipped, Homebrew${NC}\n"
|
||||
elif ./install.sh --prefix /tmp/mole-test > /dev/null 2>&1; then
|
||||
if [ -f /tmp/mole-test/mole ]; then
|
||||
printf "${GREEN}${ICON_SUCCESS} Installation test passed${NC}\n"
|
||||
@@ -203,5 +203,5 @@ if [[ $FAILED -eq 0 ]]; then
|
||||
printf "${GREEN}${ICON_SUCCESS} All tests passed!${NC}\n"
|
||||
exit 0
|
||||
fi
|
||||
printf "${RED}${ICON_ERROR} $FAILED test(s) failed!${NC}\n"
|
||||
printf "${RED}${ICON_ERROR} $FAILED tests failed!${NC}\n"
|
||||
exit 1
|
||||
|
||||
@@ -115,3 +115,48 @@ EOF
|
||||
[[ "$output" == "ok" ]]
|
||||
}
|
||||
|
||||
@test "clean_orphaned_system_services respects dry-run" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" MOLE_DRY_RUN=1 bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/apps.sh"
|
||||
|
||||
start_section_spinner() { :; }
|
||||
stop_section_spinner() { :; }
|
||||
note_activity() { :; }
|
||||
debug_log() { :; }
|
||||
|
||||
tmp_dir="$(mktemp -d)"
|
||||
tmp_plist="$tmp_dir/com.sogou.test.plist"
|
||||
touch "$tmp_plist"
|
||||
|
||||
sudo() {
|
||||
if [[ "$1" == "-n" && "$2" == "true" ]]; then
|
||||
return 0
|
||||
fi
|
||||
if [[ "$1" == "find" ]]; then
|
||||
printf '%s\0' "$tmp_plist"
|
||||
return 0
|
||||
fi
|
||||
if [[ "$1" == "du" ]]; then
|
||||
echo "4 $tmp_plist"
|
||||
return 0
|
||||
fi
|
||||
if [[ "$1" == "launchctl" ]]; then
|
||||
echo "launchctl-called"
|
||||
return 0
|
||||
fi
|
||||
if [[ "$1" == "rm" ]]; then
|
||||
echo "rm-called"
|
||||
return 0
|
||||
fi
|
||||
command "$@"
|
||||
}
|
||||
|
||||
clean_orphaned_system_services
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" != *"rm-called"* ]]
|
||||
[[ "$output" != *"launchctl-called"* ]]
|
||||
}
|
||||
|
||||
@@ -248,27 +248,24 @@ EOF
|
||||
}
|
||||
|
||||
@test "clean_edge_old_versions removes old versions but keeps current" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
|
||||
# Create mock Edge directory structure
|
||||
local EDGE_APP="$HOME/Applications/Microsoft Edge.app"
|
||||
local VERSIONS_DIR="$EDGE_APP/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR"/{120.0.0.0,121.0.0.0,122.0.0.0}
|
||||
ln -s "122.0.0.0" "$VERSIONS_DIR/Current"
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true \
|
||||
MOLE_EDGE_APP_PATHS="$EDGE_APP" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
|
||||
pgrep() { return 1; }
|
||||
export -f pgrep
|
||||
|
||||
# Create mock Edge directory structure
|
||||
EDGE_APP="$HOME/Applications/Microsoft Edge.app"
|
||||
VERSIONS_DIR="$EDGE_APP/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR"/{120.0.0.0,121.0.0.0,122.0.0.0}
|
||||
|
||||
# Create Current symlink pointing to 122.0.0.0
|
||||
ln -s "122.0.0.0" "$VERSIONS_DIR/Current"
|
||||
|
||||
is_path_whitelisted() { return 1; }
|
||||
get_path_size_kb() { echo "10240"; }
|
||||
bytes_to_human() { echo "10M"; }
|
||||
note_activity() { :; }
|
||||
export -f is_path_whitelisted get_path_size_kb bytes_to_human note_activity
|
||||
export -f pgrep is_path_whitelisted get_path_size_kb bytes_to_human note_activity
|
||||
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
@@ -289,7 +286,14 @@ EOF
|
||||
# Use a fresh temp directory for this test
|
||||
TEST_HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-test8.XXXXXX")"
|
||||
|
||||
run env HOME="$TEST_HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
# Create Edge with only current version
|
||||
local EDGE_APP="$TEST_HOME/Applications/Microsoft Edge.app"
|
||||
local VERSIONS_DIR="$EDGE_APP/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR/122.0.0.0"
|
||||
ln -s "122.0.0.0" "$VERSIONS_DIR/Current"
|
||||
|
||||
run env HOME="$TEST_HOME" PROJECT_ROOT="$PROJECT_ROOT" \
|
||||
MOLE_EDGE_APP_PATHS="$EDGE_APP" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/user.sh"
|
||||
@@ -301,17 +305,10 @@ bytes_to_human() { echo "10M"; }
|
||||
note_activity() { :; }
|
||||
export -f pgrep is_path_whitelisted get_path_size_kb bytes_to_human note_activity
|
||||
|
||||
# Initialize counters
|
||||
files_cleaned=0
|
||||
total_size_cleaned=0
|
||||
total_items=0
|
||||
|
||||
# Create Edge with only current version
|
||||
EDGE_APP="$HOME/Applications/Microsoft Edge.app"
|
||||
VERSIONS_DIR="$EDGE_APP/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
|
||||
mkdir -p "$VERSIONS_DIR/122.0.0.0"
|
||||
ln -s "122.0.0.0" "$VERSIONS_DIR/Current"
|
||||
|
||||
clean_edge_old_versions
|
||||
EOF
|
||||
|
||||
|
||||
@@ -108,105 +108,47 @@ EOF
|
||||
[[ "$output" == *"No incomplete backups found"* ]]
|
||||
}
|
||||
|
||||
@test "clean_local_snapshots skips in non-interactive mode" {
|
||||
@test "clean_local_snapshots reports snapshot count" {
|
||||
run bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/system.sh"
|
||||
|
||||
tmutil() {
|
||||
if [[ "$1" == "listlocalsnapshots" ]]; then
|
||||
printf '%s\n' \
|
||||
"com.apple.TimeMachine.2023-10-25-120000" \
|
||||
"com.apple.TimeMachine.2023-10-24-120000"
|
||||
return 0
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
start_section_spinner(){ :; }
|
||||
stop_section_spinner(){ :; }
|
||||
tm_is_running(){ return 1; }
|
||||
|
||||
DRY_RUN="false"
|
||||
clean_local_snapshots
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"skipping non-interactive mode"* ]]
|
||||
[[ "$output" != *"Removed snapshot"* ]]
|
||||
}
|
||||
|
||||
@test "clean_local_snapshots keeps latest in dry-run" {
|
||||
run bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/system.sh"
|
||||
|
||||
tmutil() {
|
||||
if [[ "$1" == "listlocalsnapshots" ]]; then
|
||||
printf '%s\n' \
|
||||
"com.apple.TimeMachine.2023-10-25-120000" \
|
||||
"com.apple.TimeMachine.2023-10-25-130000" \
|
||||
"com.apple.TimeMachine.2023-10-24-120000"
|
||||
return 0
|
||||
fi
|
||||
return 0
|
||||
run_with_timeout() {
|
||||
printf '%s\n' \
|
||||
"com.apple.TimeMachine.2023-10-25-120000" \
|
||||
"com.apple.TimeMachine.2023-10-24-120000"
|
||||
}
|
||||
start_section_spinner(){ :; }
|
||||
stop_section_spinner(){ :; }
|
||||
note_activity(){ :; }
|
||||
tm_is_running(){ return 1; }
|
||||
|
||||
DRY_RUN="true"
|
||||
clean_local_snapshots
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Local snapshot: com.apple.TimeMachine.2023-10-25-120000"* ]]
|
||||
[[ "$output" == *"Local snapshot: com.apple.TimeMachine.2023-10-24-120000"* ]]
|
||||
[[ "$output" != *"Local snapshot: com.apple.TimeMachine.2023-10-25-130000"* ]]
|
||||
[[ "$output" == *"Time Machine local snapshots:"* ]]
|
||||
[[ "$output" == *"tmutil listlocalsnapshots /"* ]]
|
||||
}
|
||||
|
||||
@test "clean_local_snapshots uses read fallback when read_key missing" {
|
||||
if ! command -v script > /dev/null 2>&1; then
|
||||
skip "script not available"
|
||||
fi
|
||||
|
||||
local tmp_script="$BATS_TEST_TMPDIR/clean_local_snapshots_fallback.sh"
|
||||
cat > "$tmp_script" <<'EOF'
|
||||
@test "clean_local_snapshots is quiet when no snapshots" {
|
||||
run bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/system.sh"
|
||||
|
||||
tmutil() {
|
||||
if [[ "$1" == "listlocalsnapshots" ]]; then
|
||||
printf '%s\n' \
|
||||
"com.apple.TimeMachine.2023-10-25-120000" \
|
||||
"com.apple.TimeMachine.2023-10-24-120000"
|
||||
return 0
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
run_with_timeout() { echo "Snapshots for disk /:"; }
|
||||
start_section_spinner(){ :; }
|
||||
stop_section_spinner(){ :; }
|
||||
note_activity(){ :; }
|
||||
tm_is_running(){ return 1; }
|
||||
|
||||
unset -f read_key
|
||||
|
||||
CALL_LOG="$HOME/snapshot_calls.log"
|
||||
> "$CALL_LOG"
|
||||
sudo() { echo "sudo:$*" >> "$CALL_LOG"; return 0; }
|
||||
|
||||
DRY_RUN="false"
|
||||
clean_local_snapshots
|
||||
cat "$CALL_LOG"
|
||||
EOF
|
||||
|
||||
run bash --noprofile --norc -c "printf '\n' | script -q /dev/null bash \"$tmp_script\""
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Skipped"* ]]
|
||||
[[ "$output" != *"Time Machine local snapshots"* ]]
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -34,8 +34,8 @@ clean_user_essentials
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Trash"* ]]
|
||||
[[ "$output" == *"whitelist"* ]]
|
||||
# Whitelist-protected items no longer show output (UX improvement in V1.22.0)
|
||||
[[ "$output" != *"Trash"* ]]
|
||||
}
|
||||
|
||||
@test "clean_macos_system_caches calls safe_clean for core paths" {
|
||||
@@ -88,8 +88,8 @@ clean_finder_metadata
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Finder metadata"* ]]
|
||||
[[ "$output" == *"protected"* ]]
|
||||
# Whitelist-protected items no longer show output (UX improvement in V1.22.0)
|
||||
[[ "$output" == "" ]]
|
||||
}
|
||||
|
||||
@test "check_ios_device_backups returns when no backup dir" {
|
||||
@@ -117,6 +117,7 @@ EOF
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Safari cache"* ]]
|
||||
[[ "$output" == *"Firefox cache"* ]]
|
||||
[[ "$output" == *"Puppeteer browser cache"* ]]
|
||||
}
|
||||
|
||||
@test "clean_application_support_logs skips when no access" {
|
||||
|
||||
@@ -108,6 +108,7 @@ setup() {
|
||||
|
||||
@test "get_invoking_user executes quickly" {
|
||||
local start end elapsed
|
||||
local limit_ms="${MOLE_PERF_GET_INVOKING_USER_LIMIT_MS:-500}"
|
||||
|
||||
start=$(date +%s%N)
|
||||
for i in {1..100}; do
|
||||
@@ -117,7 +118,7 @@ setup() {
|
||||
|
||||
elapsed=$(( (end - start) / 1000000 ))
|
||||
|
||||
[ "$elapsed" -lt 200 ]
|
||||
[ "$elapsed" -lt "$limit_ms" ]
|
||||
}
|
||||
|
||||
@test "get_darwin_major caches correctly" {
|
||||
|
||||
@@ -123,29 +123,84 @@ EOF
|
||||
}
|
||||
|
||||
@test "check_android_ndk reports multiple NDK versions" {
|
||||
run bash -c 'HOME=$(mktemp -d) && mkdir -p "$HOME/Library/Android/sdk/ndk"/{21.0.1,22.0.0,20.0.0} && source "$0" && note_activity() { :; } && NC="" && GREEN="" && GRAY="" && check_android_ndk' "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
run bash -c 'HOME=$(mktemp -d) && mkdir -p "$HOME/Library/Android/sdk/ndk"/{21.0.1,22.0.0,20.0.0} && source "$0" && note_activity() { :; } && NC="" && GREEN="" && GRAY="" && YELLOW="" && ICON_WARNING="●" && check_android_ndk' "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Found 3 Android NDK versions"* ]]
|
||||
[[ "$output" == *"Android NDK versions: 3 found"* ]]
|
||||
}
|
||||
|
||||
@test "check_android_ndk silent when only one NDK" {
|
||||
run bash -c 'HOME=$(mktemp -d) && mkdir -p "$HOME/Library/Android/sdk/ndk/22.0.0" && source "$0" && note_activity() { :; } && NC="" && GREEN="" && GRAY="" && check_android_ndk' "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
run bash -c 'HOME=$(mktemp -d) && mkdir -p "$HOME/Library/Android/sdk/ndk/22.0.0" && source "$0" && note_activity() { :; } && NC="" && GREEN="" && GRAY="" && YELLOW="" && ICON_WARNING="●" && check_android_ndk' "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" != *"Found"*"NDK"* ]]
|
||||
[[ "$output" != *"NDK versions"* ]]
|
||||
}
|
||||
|
||||
@test "check_rust_toolchains reports multiple toolchains" {
|
||||
run bash -c 'HOME=$(mktemp -d) && mkdir -p "$HOME/.rustup/toolchains"/{stable,nightly,1.75.0}-aarch64-apple-darwin && source "$0" && note_activity() { :; } && NC="" && GREEN="" && GRAY="" && rustup() { :; } && export -f rustup && check_rust_toolchains' "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
run bash -c 'HOME=$(mktemp -d) && mkdir -p "$HOME/.rustup/toolchains"/{stable,nightly,1.75.0}-aarch64-apple-darwin && source "$0" && note_activity() { :; } && NC="" && GREEN="" && GRAY="" && YELLOW="" && ICON_WARNING="●" && rustup() { :; } && export -f rustup && check_rust_toolchains' "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Found 3 Rust toolchains"* ]]
|
||||
[[ "$output" == *"Rust toolchains: 3 found"* ]]
|
||||
}
|
||||
|
||||
@test "check_rust_toolchains silent when only one toolchain" {
|
||||
run bash -c 'HOME=$(mktemp -d) && mkdir -p "$HOME/.rustup/toolchains/stable-aarch64-apple-darwin" && source "$0" && note_activity() { :; } && NC="" && GREEN="" && GRAY="" && rustup() { :; } && export -f rustup && check_rust_toolchains' "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
run bash -c 'HOME=$(mktemp -d) && mkdir -p "$HOME/.rustup/toolchains/stable-aarch64-apple-darwin" && source "$0" && note_activity() { :; } && NC="" && GREEN="" && GRAY="" && YELLOW="" && ICON_WARNING="●" && rustup() { :; } && export -f rustup && check_rust_toolchains' "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" != *"Found"*"Rust"* ]]
|
||||
[[ "$output" != *"Rust toolchains"* ]]
|
||||
}
|
||||
|
||||
@test "clean_dev_jetbrains_toolbox cleans old versions and bypasses toolbox whitelist" {
|
||||
local toolbox_channel="$HOME/Library/Application Support/JetBrains/Toolbox/apps/IDEA/ch-0"
|
||||
mkdir -p "$toolbox_channel/241.1" "$toolbox_channel/241.2" "$toolbox_channel/241.3"
|
||||
ln -s "241.3" "$toolbox_channel/current"
|
||||
touch -t 202401010000 "$toolbox_channel/241.1"
|
||||
touch -t 202402010000 "$toolbox_channel/241.2"
|
||||
touch -t 202403010000 "$toolbox_channel/241.3"
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
toolbox_root="$HOME/Library/Application Support/JetBrains/Toolbox/apps"
|
||||
WHITELIST_PATTERNS=("$toolbox_root"* "$HOME/Library/Application Support/JetBrains*")
|
||||
note_activity() { :; }
|
||||
safe_clean() {
|
||||
local target="$1"
|
||||
for pattern in "${WHITELIST_PATTERNS[@]+${WHITELIST_PATTERNS[@]}}"; do
|
||||
if [[ "$pattern" == "$toolbox_root"* ]]; then
|
||||
echo "WHITELIST_NOT_REMOVED"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "$target"
|
||||
}
|
||||
MOLE_JETBRAINS_TOOLBOX_KEEP=1
|
||||
clean_dev_jetbrains_toolbox
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"/241.1"* ]]
|
||||
[[ "$output" != *"/241.2"* ]]
|
||||
}
|
||||
|
||||
@test "clean_dev_jetbrains_toolbox keeps current directory and removes older versions" {
|
||||
local toolbox_channel="$HOME/Library/Application Support/JetBrains/Toolbox/apps/IDEA/ch-0"
|
||||
mkdir -p "$toolbox_channel/241.1" "$toolbox_channel/241.2" "$toolbox_channel/current"
|
||||
touch -t 202401010000 "$toolbox_channel/241.1"
|
||||
touch -t 202402010000 "$toolbox_channel/241.2"
|
||||
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
source "$PROJECT_ROOT/lib/core/common.sh"
|
||||
source "$PROJECT_ROOT/lib/clean/dev.sh"
|
||||
note_activity() { :; }
|
||||
safe_clean() { echo "$1"; }
|
||||
MOLE_JETBRAINS_TOOLBOX_KEEP=1
|
||||
clean_dev_jetbrains_toolbox
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"/241.1"* ]]
|
||||
[[ "$output" != *"/241.2"* ]]
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ source "$PROJECT_ROOT/lib/manage/autofix.sh"
|
||||
export FIREWALL_DISABLED=true
|
||||
export FILEVAULT_DISABLED=true
|
||||
export TOUCHID_NOT_CONFIGURED=true
|
||||
export ROSETTA_NOT_INSTALLED=true
|
||||
export CACHE_SIZE_GB=9
|
||||
export BREW_HAS_WARNINGS=true
|
||||
export DISK_FREE_GB=25
|
||||
@@ -27,7 +26,6 @@ EOF
|
||||
[[ "$output" == *"Enable Firewall for better security"* ]]
|
||||
[[ "$output" == *"Enable FileVault"* ]]
|
||||
[[ "$output" == *"Enable Touch ID for sudo"* ]]
|
||||
[[ "$output" == *"Install Rosetta 2"* ]]
|
||||
[[ "$output" == *"Low disk space (25GB free)"* ]]
|
||||
[[ "$output" == *"AUTO_FLAG=true"* ]]
|
||||
}
|
||||
@@ -72,10 +70,6 @@ sudo() {
|
||||
case "$1" in
|
||||
defaults) return 0 ;;
|
||||
bash) return 0 ;;
|
||||
softwareupdate)
|
||||
echo "Installing Rosetta 2 stub output"
|
||||
return 0
|
||||
;;
|
||||
/usr/libexec/ApplicationFirewall/socketfilterfw) return 0 ;;
|
||||
*) return 0 ;;
|
||||
esac
|
||||
@@ -83,7 +77,6 @@ sudo() {
|
||||
|
||||
export FIREWALL_DISABLED=true
|
||||
export TOUCHID_NOT_CONFIGURED=true
|
||||
export ROSETTA_NOT_INSTALLED=true
|
||||
|
||||
perform_auto_fix
|
||||
echo "SUMMARY=${AUTO_FIX_SUMMARY}"
|
||||
@@ -93,7 +86,6 @@ EOF
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Firewall enabled"* ]]
|
||||
[[ "$output" == *"Touch ID configured"* ]]
|
||||
[[ "$output" == *"Rosetta 2 installed"* ]]
|
||||
[[ "$output" == *"SUMMARY=Auto fixes applied: 3 issue(s)"* ]]
|
||||
[[ "$output" == *"SUMMARY=Auto fixes applied: 2 issues"* ]]
|
||||
[[ "$output" == *"DETAILS"* ]]
|
||||
}
|
||||
|
||||
@@ -39,8 +39,6 @@ create_app_artifacts() {
|
||||
mkdir -p "$HOME/Library/Saved Application State/com.example.TestApp.savedState"
|
||||
mkdir -p "$HOME/Library/LaunchAgents"
|
||||
touch "$HOME/Library/LaunchAgents/com.example.TestApp.plist"
|
||||
mkdir -p "$HOME/Library/LaunchDaemons"
|
||||
touch "$HOME/Library/LaunchDaemons/com.example.TestApp.plist"
|
||||
}
|
||||
|
||||
@test "find_app_files discovers user-level leftovers" {
|
||||
@@ -60,7 +58,6 @@ EOF
|
||||
[[ "$result" == *"Saved Application State/com.example.TestApp.savedState"* ]]
|
||||
[[ "$result" == *"Containers/com.example.TestApp"* ]]
|
||||
[[ "$result" == *"LaunchAgents/com.example.TestApp.plist"* ]]
|
||||
[[ "$result" == *"LaunchDaemons/com.example.TestApp.plist"* ]]
|
||||
}
|
||||
|
||||
@test "calculate_total_size returns aggregate kilobytes" {
|
||||
@@ -121,7 +118,6 @@ batch_uninstall_applications
|
||||
[[ ! -d "$HOME/Library/Caches/TestApp" ]] || exit 1
|
||||
[[ ! -f "$HOME/Library/Preferences/com.example.TestApp.plist" ]] || exit 1
|
||||
[[ ! -f "$HOME/Library/LaunchAgents/com.example.TestApp.plist" ]] || exit 1
|
||||
[[ ! -f "$HOME/Library/LaunchDaemons/com.example.TestApp.plist" ]] || exit 1
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
@@ -78,8 +78,8 @@ ask_for_updates
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 1 ] # ESC cancels
|
||||
[[ "$output" == *"Homebrew (5 updates)"* ]]
|
||||
[[ "$output" == *"App Store (1 apps)"* ]]
|
||||
[[ "$output" == *"Homebrew, 3 formula, 2 cask"* ]]
|
||||
[[ "$output" == *"App Store, 1 apps"* ]]
|
||||
[[ "$output" == *"macOS system"* ]]
|
||||
[[ "$output" == *"Mole"* ]]
|
||||
}
|
||||
@@ -233,3 +233,223 @@ EOF
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Already on latest version"* ]]
|
||||
}
|
||||
|
||||
@test "process_install_output shows install.sh success message with version" {
|
||||
run bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
GREEN='\033[0;32m'
|
||||
ICON_SUCCESS='✓'
|
||||
NC='\033[0m'
|
||||
|
||||
process_install_output() {
|
||||
local output="$1"
|
||||
local fallback_version="$2"
|
||||
|
||||
local filtered_output
|
||||
filtered_output=$(printf '%s\n' "$output" | sed '/^$/d')
|
||||
if [[ -n "$filtered_output" ]]; then
|
||||
printf '%s\n' "$filtered_output"
|
||||
fi
|
||||
|
||||
if ! printf '%s\n' "$output" | grep -Eq "Updated to latest version|Already on latest version"; then
|
||||
local new_version
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*-> \([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*version[[:space:]]\{1,\}\([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$(command -v mo > /dev/null 2>&1 && mo --version 2> /dev/null | awk 'NR==1 && NF {print $NF}' || echo "")
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version="$fallback_version"
|
||||
fi
|
||||
printf '\n%s\n' "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version, ${new_version:-unknown}"
|
||||
fi
|
||||
}
|
||||
|
||||
output="Installing Mole...
|
||||
◎ Mole installed successfully, version 1.23.1"
|
||||
process_install_output "$output" "1.23.0"
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Updated to latest version, 1.23.1"* ]]
|
||||
[[ "$output" != *"1.23.0"* ]]
|
||||
}
|
||||
|
||||
@test "process_install_output uses fallback version when install.sh has no success message" {
|
||||
run bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
GREEN='\033[0;32m'
|
||||
ICON_SUCCESS='✓'
|
||||
NC='\033[0m'
|
||||
|
||||
process_install_output() {
|
||||
local output="$1"
|
||||
local fallback_version="$2"
|
||||
|
||||
local filtered_output
|
||||
filtered_output=$(printf '%s\n' "$output" | sed '/^$/d')
|
||||
if [[ -n "$filtered_output" ]]; then
|
||||
printf '%s\n' "$filtered_output"
|
||||
fi
|
||||
|
||||
if ! printf '%s\n' "$output" | grep -Eq "Updated to latest version|Already on latest version"; then
|
||||
local new_version
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*-> \([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*version[[:space:]]\{1,\}\([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$(command -v mo > /dev/null 2>&1 && mo --version 2> /dev/null | awk 'NR==1 && NF {print $NF}' || echo "")
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version="$fallback_version"
|
||||
fi
|
||||
printf '\n%s\n' "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version, ${new_version:-unknown}"
|
||||
fi
|
||||
}
|
||||
|
||||
output="Installing Mole...
|
||||
Installation completed"
|
||||
process_install_output "$output" "1.23.1"
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Installation completed"* ]]
|
||||
[[ "$output" == *"Updated to latest version, 1.23.1"* ]]
|
||||
}
|
||||
|
||||
@test "process_install_output handles empty output with fallback version" {
|
||||
run bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
GREEN='\033[0;32m'
|
||||
ICON_SUCCESS='✓'
|
||||
NC='\033[0m'
|
||||
|
||||
process_install_output() {
|
||||
local output="$1"
|
||||
local fallback_version="$2"
|
||||
|
||||
local filtered_output
|
||||
filtered_output=$(printf '%s\n' "$output" | sed '/^$/d')
|
||||
if [[ -n "$filtered_output" ]]; then
|
||||
printf '%s\n' "$filtered_output"
|
||||
fi
|
||||
|
||||
if ! printf '%s\n' "$output" | grep -Eq "Updated to latest version|Already on latest version"; then
|
||||
local new_version
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*-> \([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*version[[:space:]]\{1,\}\([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$(command -v mo > /dev/null 2>&1 && mo --version 2> /dev/null | awk 'NR==1 && NF {print $NF}' || echo "")
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version="$fallback_version"
|
||||
fi
|
||||
printf '\n%s\n' "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version, ${new_version:-unknown}"
|
||||
fi
|
||||
}
|
||||
|
||||
output=""
|
||||
process_install_output "$output" "1.23.1"
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Updated to latest version, 1.23.1"* ]]
|
||||
}
|
||||
|
||||
@test "process_install_output does not extract wrong parentheses content" {
|
||||
run bash --noprofile --norc <<'EOF'
|
||||
set -euo pipefail
|
||||
GREEN='\033[0;32m'
|
||||
ICON_SUCCESS='✓'
|
||||
NC='\033[0m'
|
||||
|
||||
process_install_output() {
|
||||
local output="$1"
|
||||
local fallback_version="$2"
|
||||
|
||||
local filtered_output
|
||||
filtered_output=$(printf '%s\n' "$output" | sed '/^$/d')
|
||||
if [[ -n "$filtered_output" ]]; then
|
||||
printf '%s\n' "$filtered_output"
|
||||
fi
|
||||
|
||||
if ! printf '%s\n' "$output" | grep -Eq "Updated to latest version|Already on latest version"; then
|
||||
local new_version
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*-> \([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$(printf '%s\n' "$output" | sed -n 's/.*version[[:space:]]\{1,\}\([^[:space:]]\{1,\}\).*/\1/p' | head -1)
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version=$(command -v mo > /dev/null 2>&1 && mo --version 2> /dev/null | awk 'NR==1 && NF {print $NF}' || echo "")
|
||||
fi
|
||||
if [[ -z "$new_version" ]]; then
|
||||
new_version="$fallback_version"
|
||||
fi
|
||||
printf '\n%s\n' "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version, ${new_version:-unknown}"
|
||||
fi
|
||||
}
|
||||
|
||||
output="Downloading (progress: 100%)
|
||||
Done"
|
||||
process_install_output "$output" "1.23.1"
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" == *"Downloading (progress: 100%)"* ]]
|
||||
[[ "$output" == *"Updated to latest version, 1.23.1"* ]]
|
||||
[[ "$output" != *"progress: 100%"* ]] || [[ "$output" == *"Downloading (progress: 100%)"* ]]
|
||||
}
|
||||
|
||||
@test "update_mole with --force reinstalls even when on latest version" {
|
||||
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" CURRENT_VERSION="$CURRENT_VERSION" PATH="$HOME/fake-bin:/usr/bin:/bin" TERM="dumb" bash --noprofile --norc << 'EOF'
|
||||
set -euo pipefail
|
||||
curl() {
|
||||
local out=""
|
||||
local url=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-o)
|
||||
out="$2"
|
||||
shift 2
|
||||
;;
|
||||
http*://*)
|
||||
url="$1"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -n "$out" ]]; then
|
||||
cat > "$out" << 'INSTALLER'
|
||||
#!/usr/bin/env bash
|
||||
echo "Mole installed successfully, version $CURRENT_VERSION"
|
||||
INSTALLER
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ "$url" == *"api.github.com"* ]]; then
|
||||
echo "{\"tag_name\":\"$CURRENT_VERSION\"}"
|
||||
else
|
||||
echo "VERSION=\"$CURRENT_VERSION\""
|
||||
fi
|
||||
}
|
||||
export -f curl
|
||||
|
||||
brew() { exit 1; }
|
||||
export -f brew
|
||||
|
||||
"$PROJECT_ROOT/mole" update --force
|
||||
EOF
|
||||
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" != *"Already on latest version"* ]]
|
||||
[[ "$output" == *"Downloading"* ]] || [[ "$output" == *"Installing"* ]] || [[ "$output" == *"Updated"* ]]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user