diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b55bb71..eb5a23c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -79,7 +79,7 @@ jobs: echo "Checking for hardcoded secrets..." matches=$(grep -r "password\|secret\|api_key" --include="*.sh" . \ | grep -v "# \|test" \ - | grep -v -E "lib/core/sudo\.sh|lib/core/app_protection\.sh|lib/clean/user\.sh|lib/clean/brew\.sh|bin/optimize\.sh" || true) + | grep -v -E "lib/core/sudo\.sh|lib/core/app_protection\.sh|lib/clean/user\.sh|lib/clean/brew\.sh|bin/optimize\.sh|lib/clean/apps\.sh" || true) if [[ -n "$matches" ]]; then echo "$matches" echo "✗ Potential secrets found" diff --git a/.gitignore b/.gitignore index 8319b94..574c7c8 100644 --- a/.gitignore +++ b/.gitignore @@ -65,3 +65,6 @@ bin/status-darwin-* tests/tmp-*/ tests/*.tmp tests/*.log + +session.json +run_tests.ps1 diff --git a/CONTRIBUTORS.svg b/CONTRIBUTORS.svg index a51f7f9..9a17f16 100644 --- a/CONTRIBUTORS.svg +++ b/CONTRIBUTORS.svg @@ -1,4 +1,4 @@ - + @@ -13,6 +13,17 @@ + + + + + + + + bhadraagada + + + @@ -23,7 +34,7 @@ JackPhallen - + @@ -34,29 +45,7 @@ amanthanvi - - - - - - - - - rubnogueira - - - - - - - - - - bsisduck - - - @@ -67,7 +56,40 @@ alexandear + + + + + + + + + rubnogueira + + + + + + + + + + biplavbarua + + + + + + + + + + + bsisduck + + + @@ -78,7 +100,7 @@ jimmystridh - + @@ -89,7 +111,7 @@ fte-jjmartres - + @@ -100,7 +122,7 @@ Else00 - + @@ -111,7 +133,7 @@ carolyn-sun - + @@ -122,7 +144,7 @@ purofle - + @@ -133,7 +155,7 @@ huyixi - + @@ -144,7 +166,7 @@ bunizao - + @@ -155,7 +177,7 @@ zeldrisho - + @@ -166,7 +188,7 @@ yuzeguitarist - + @@ -177,7 +199,7 @@ thijsvanhal - + @@ -188,7 +210,7 @@ Sizk - + @@ -199,7 +221,7 @@ ndbroadbent - + @@ -210,7 +232,7 @@ MohammedEsafi - + @@ -221,7 +243,7 @@ Schlauer-Hax - + @@ -232,7 +254,7 @@ anonymort - + @@ -243,7 +265,7 @@ khipu-luke - + @@ -254,7 +276,7 @@ LmanTW - + @@ -265,7 +287,7 @@ kwakubiney - + @@ -276,7 +298,7 @@ kowyo - + @@ -287,7 +309,7 @@ jalen0x - + @@ -298,7 +320,7 @@ Hensell - + @@ -309,7 +331,7 @@ ClathW - + diff --git a/README.md b/README.md index a174545..9711675 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,10 @@ - **Disk insights**: Visualizes usage, manages large files, **rebuilds caches**, and refreshes system services - **Live monitoring**: Real-time stats for CPU, GPU, memory, disk, and network to **diagnose performance issues** +## Platform Support + +Mole is designed for **macOS**. For Windows users, please visit the [windows branch](https://github.com/tw93/Mole/tree/windows) for the native Windows version. + ## Quick Start **Install via Homebrew — recommended:** @@ -72,6 +76,7 @@ mo purge --paths # Configure project scan directories - **Terminal**: iTerm2 has known compatibility issues; we recommend Alacritty, kitty, WezTerm, Ghostty, or Warp. - **Safety**: Built with strict protections. See [Security Audit](SECURITY_AUDIT.md). Preview changes with `mo clean --dry-run`. +- **Be Careful**: Although safe by design, file deletion is permanent. Please review operations carefully. - **Debug Mode**: Use `--debug` for detailed logs (e.g., `mo clean --debug`). Combine with `--dry-run` for comprehensive preview including risk levels and file details. - **Navigation**: Supports arrow keys and Vim bindings (`h/j/k/l`). - **Status Shortcuts**: In `mo status`, press `k` to toggle cat visibility and save preference, `q` to quit. diff --git a/SECURITY_AUDIT.md b/SECURITY_AUDIT.md index 2fc4a61..e4b4013 100644 --- a/SECURITY_AUDIT.md +++ b/SECURITY_AUDIT.md @@ -2,36 +2,17 @@
-**Security Audit & Compliance Report** - -Version 1.19.0 | January 5, 2026 - ---- - -**Audit Status:** PASSED | **Risk Level:** LOW +**Status:** PASSED | **Risk Level:** LOW | **Version:** 1.19.0 (2026-01-09)
--- -## Table of Contents - -1. [Audit Overview](#audit-overview) -2. [Security Philosophy](#security-philosophy) -3. [Threat Model](#threat-model) -4. [Defense Architecture](#defense-architecture) -5. [Safety Mechanisms](#safety-mechanisms) -6. [User Controls](#user-controls) -7. [Testing & Compliance](#testing--compliance) -8. [Dependencies](#dependencies) - ---- - ## Audit Overview | Attribute | Details | |-----------|---------| -| Audit Date | December 31, 2025 | +| Audit Date | January 9, 2026 | | Audit Conclusion | **PASSED** | | Mole Version | V1.19.0 | | Audited Branch | `main` (HEAD) | @@ -42,12 +23,12 @@ Version 1.19.0 | January 5, 2026 **Key Findings:** -- Multi-layered validation prevents critical system modifications -- Conservative cleaning logic with 60-day dormancy rules -- Comprehensive protection for VPN, AI tools, and system components -- Atomic operations with crash recovery mechanisms -- Full user control with dry-run and whitelist capabilities -- Installer cleanup safely scans common locations with user confirmation +- Multi-layer validation effectively blocks risky system modifications. +- Conservative cleaning logic ensures safety (e.g., 60-day dormancy rule). +- Comprehensive protection for VPNs, AI tools, and core system components. +- Atomic operations prevent state corruption during crashes. +- Dry-run and whitelist features give users full control. +- Installer cleanup scans safely and requires user confirmation. --- @@ -55,14 +36,14 @@ Version 1.19.0 | January 5, 2026 **Core Principle: "Do No Harm"** -Mole operates under a **Zero Trust** architecture for all filesystem operations. Every modification request is treated as potentially dangerous until passing strict validation. +We built Mole on a **Zero Trust** architecture for filesystem operations. Every modification request is treated as dangerous until it passes strict validation. **Guiding Priorities:** -1. **System Stability First** - Prefer leaving 1GB of junk over deleting 1KB of critical data -2. **Conservative by Default** - Require explicit user confirmation for high-risk operations -3. **Fail Safe** - When in doubt, abort rather than proceed -4. **Transparency** - All operations are logged and can be previewed via dry-run mode +1. **System Stability First** - We'd rather leave 1GB of junk than delete 1KB of your data. +2. **Conservative by Default** - High-risk operations always require explicit confirmation. +3. **Fail Safe** - When in doubt, we abort immediately. +4. **Transparency** - Every operation is logged and allows a preview via dry-run mode. --- @@ -89,7 +70,7 @@ Mole operates under a **Zero Trust** architecture for all filesystem operations. ### Multi-Layered Validation System -All automated operations pass through hardened middleware (`lib/core/file_ops.sh`) with 4 validation layers: +All automated operations pass through hardened middleware (`lib/core/file_ops.sh`) with 4 layers of validation: #### Layer 1: Input Sanitization @@ -114,7 +95,7 @@ Even with `sudo`, these paths are **unconditionally blocked**: /Library/Extensions # Kernel extensions ``` -**Exception:** `/System/Library/Caches/com.apple.coresymbolicationd/data` (safe, rebuildable cache) +**Exception:** `/System/Library/Caches/com.apple.coresymbolicationd/data` (safe, rebuildable cache). **Code:** `lib/core/file_ops.sh:60-78` @@ -122,9 +103,9 @@ Even with `sudo`, these paths are **unconditionally blocked**: For privileged operations, pre-flight checks prevent symlink-based attacks: -- Detects symlinks pointing from cache folders to system files -- Refuses recursive deletion of symbolic links in sudo mode -- Validates real path vs symlink target +- Detects symlinks from cache folders pointing to system files. +- Refuses recursive deletion of symbolic links in sudo mode. +- Validates real path vs. symlink target. **Code:** `lib/core/file_ops.sh:safe_sudo_recursive_delete()` @@ -132,18 +113,19 @@ For privileged operations, pre-flight checks prevent symlink-based attacks: When running with `sudo`: -- Auto-corrects ownership back to user (`chown -R`) -- Operations restricted to user's home directory -- Multiple validation checkpoints +- Auto-corrects ownership back to user (`chown -R`). +- Restricts operations to the user's home directory. +- Enforces multiple validation checkpoints. ### Interactive Analyzer (Go) -The analyzer (`mo analyze`) uses a different security model: +The analyzer (`mo analyze`) uses a distinct security model: -- Runs with standard user permissions only -- Respects macOS System Integrity Protection (SIP) -- All deletions require explicit user confirmation -- OS-level enforcement (cannot delete `/System` due to Read-Only Volume) +- Runs with standard user permissions only. +- Respects macOS System Integrity Protection (SIP). +- **Two-Key Confirmation:** Deletion requires ⌫ (Delete) to enter confirmation mode, then Enter to confirm. Prevents accidental double-press of the same key. +- **Trash Instead of Delete:** Files are moved to macOS Trash using Finder's native API, allowing easy recovery if needed. +- OS-level enforcement (cannot delete `/System` due to Read-Only Volume). **Code:** `cmd/analyze/*.go` @@ -159,18 +141,26 @@ The analyzer (`mo analyze`) uses a different security model: |------|--------------|-----------| | 1. App Check | All installation locations | Must be missing from `/Applications`, `~/Applications`, `/System/Applications` | | 2. Dormancy | Modification timestamps | Untouched for ≥60 days | -| 3. Vendor Whitelist | Cross-reference database | Adobe, Microsoft, Google resources protected | +| 3. Vendor Whitelist | Cross-reference database | Adobe, Microsoft, and Google resources are protected | **Code:** `lib/clean/apps.sh:orphan_detection()` +#### Developer Tool Ecosystems (Consolidated) + +Support for 20+ languages (Rust, Go, Node, Python, JVM, Mobile, Elixir, Haskell, OCaml, etc.) with strict safety checks: + +- **Global Optimization:** The core `safe_clean` function now intelligently checks parent directories before attempting wildcard cleanups, eliminating overhead for missing tools across the entire system. +- **Safe Targets:** Only volatile caches are cleaned (e.g., `~/.cargo/registry/cache`, `~/.gradle/caches`). +- **Protected Paths:** Critical directories like `~/.cargo/bin`, `~/.mix/archives`, `~/.rustup` toolchains, and `~/.stack/programs` are explicitly **excluded**. + #### Active Uninstallation Heuristics For user-selected app removal: - **Sanitized Name Matching:** "Visual Studio Code" → `VisualStudioCode`, `.vscode` - **Safety Limit:** 3-char minimum (prevents "Go" matching "Google") -- **Disabled:** Fuzzy matching, wildcard expansion for short names -- **User Confirmation:** Required before deletion +- **Disabled:** Fuzzy matching and wildcard expansion for short names. +- **User Confirmation:** Required before deletion. **Code:** `lib/clean/apps.sh:uninstall_app()` @@ -181,21 +171,21 @@ For user-selected app removal: | System Integrity Protection | `/Library/Updates`, `/System/*` | Respects macOS Read-Only Volume | | Spotlight & System UI | `~/Library/Metadata/CoreSpotlight` | Prevents UI corruption | | System Components | Control Center, System Settings, TCC | Centralized detection via `is_critical_system_component()` | -| Time Machine | Local snapshots, backups | Checks `backupd` process, aborts if active | +| Time Machine | Local snapshots, backups | Runtime activity detection (backup running, snapshots mounted), fails safe if status indeterminate | | VPN & Proxy | Shadowsocks, V2Ray, Tailscale, Clash | Protects network configs | -| AI & LLM Tools | Cursor, Claude, ChatGPT, Ollama, LM Studio | Protects models, tokens, sessions | +| AI & LLM Tools | Cursor, Claude, ChatGPT, Ollama, LM Studio | Protects models, tokens, and sessions | | Startup Items | `com.apple.*` LaunchAgents/Daemons | System items unconditionally skipped | **Orphaned Helper Cleanup (`opt_startup_items_cleanup`):** Removes LaunchAgents/Daemons whose associated app has been uninstalled: -- Checks `AssociatedBundleIdentifiers` to detect orphans -- Skips all `com.apple.*` system items -- Skips paths under `/System/*`, `/usr/bin/*`, `/usr/lib/*`, `/usr/sbin/*`, `/Library/Apple/*` -- Uses `safe_remove` / `safe_sudo_remove` with path validation -- Unloads service via `launchctl` before deletion -- `mdfind` operations have 10-second timeout protection +- Checks `AssociatedBundleIdentifiers` to detect orphans. +- Skips all `com.apple.*` system items. +- Skips paths under `/System/*`, `/usr/bin/*`, `/usr/lib/*`, `/usr/sbin/*`, `/Library/Apple/*`. +- Uses `safe_remove` / `safe_sudo_remove` with path validation. +- Unloads service via `launchctl` before deletion. +- **Timeout Protection:** 10-second limit on `mdfind` operations. **Code:** `lib/optimize/tasks.sh:opt_startup_items_cleanup()` @@ -206,9 +196,9 @@ Removes LaunchAgents/Daemons whose associated app has been uninstalled: | Network Interface Reset | Atomic execution blocks | Wi-Fi/AirDrop restored to pre-operation state | | Swap Clearing | Daemon restart | `dynamic_pager` handles recovery safely | | Volume Scanning | Timeout + filesystem check | Auto-skip unresponsive NFS/SMB/AFP mounts | -| Homebrew Cache | Pre-flight size check | Skip if <50MB (avoids 30-120s delay) | +| Homebrew Cache | Pre-flight size check | Skip if <50MB (avoids long delays) | | Network Volume Check | `diskutil info` with timeout | Prevents hangs on slow/dead mounts | -| SQLite Vacuum | App-running check + 20s timeout | Skips if Mail/Safari/Messages running | +| SQLite Vacuum | App-running check + 20s timeout | Skips if Mail/Safari/Messages active | | dyld Cache Update | 24-hour freshness check + 180s timeout | Skips if recently updated | | App Bundle Search | 10s timeout on mdfind | Fallback to standard paths | @@ -230,10 +220,10 @@ run_with_timeout 5 diskutil info "$mount_point" || skip_volume **Behavior:** -- Simulates entire operation without filesystem modifications -- Lists every file/directory that **would** be deleted -- Calculates total space that **would** be freed -- Zero risk - no actual deletion commands executed +- Simulates the entire operation without modifying a single file. +- Lists every file/directory that **would** be deleted. +- Calculates total space that **would** be freed. +- **Zero risk** - no actual deletion commands are executed. ### Custom Whitelists @@ -247,19 +237,19 @@ run_with_timeout 5 diskutil info "$mount_point" || skip_volume ~/Library/Application Support/CriticalApp ``` -- Paths are **unconditionally protected** -- Applies to all operations (clean, optimize, uninstall) -- Supports absolute paths and `~` expansion +- Paths are **unconditionally protected**. +- Applies to all operations (clean, optimize, uninstall). +- Supports absolute paths and `~` expansion. **Code:** `lib/core/file_ops.sh:is_whitelisted()` ### Interactive Confirmations -Required for: +We mandate confirmation for: -- Uninstalling system-scope applications -- Removing large data directories (>1GB) -- Deleting items from shared vendor folders +- Uninstalling system-scope applications. +- Removing large data directories (>1GB). +- Deleting items from shared vendor folders. --- @@ -291,33 +281,33 @@ bats tests/security.bats # Run specific suite | Standard | Implementation | |----------|----------------| | OWASP Secure Coding | Input validation, least privilege, defense-in-depth | -| CWE-22 (Path Traversal) | Enhanced detection: rejects `/../` components while allowing `..` in directory names (Firefox compatibility) | +| CWE-22 (Path Traversal) | Enhanced detection: rejects `/../` components, safely handles `..` in directory names | | CWE-78 (Command Injection) | Control character filtering | | CWE-59 (Link Following) | Symlink detection before privileged operations | | Apple File System Guidelines | Respects SIP, Read-Only Volumes, TCC | ### Security Development Lifecycle -- **Static Analysis:** shellcheck for all shell scripts -- **Code Review:** All changes reviewed by maintainers -- **Dependency Scanning:** Minimal external dependencies, all vetted +- **Static Analysis:** `shellcheck` runs on all shell scripts. +- **Code Review:** All changes are manually reviewed by maintainers. +- **Dependency Scanning:** Minimal external dependencies, all carefully vetted. ### Known Limitations | Limitation | Impact | Mitigation | |------------|--------|------------| -| Requires `sudo` for system caches | Initial friction | Clear documentation | -| 60-day rule may delay cleanup | Some orphans remain longer | Manual `mo uninstall` available | -| No undo functionality | Deleted files unrecoverable | Dry-run mode, warnings | -| English-only name matching | May miss non-English apps | Bundle ID fallback | +| Requires `sudo` for system caches | Initial friction | Clear documentation explaining why | +| 60-day rule may delay cleanup | Some orphans remain longer | Manual `mo uninstall` is always available | +| No undo functionality | Deleted files are unrecoverable | Dry-run mode and warnings are clear | +| English-only name matching | May miss non-English apps | Fallback to Bundle ID matching | **Intentionally Out of Scope (Safety):** -- Automatic deletion of user documents/media -- Encryption key stores or password managers -- System configuration files (`/etc/*`) -- Browser history or cookies -- Git repository cleanup +- Automatic deletion of user documents/media. +- Encryption key stores or password managers. +- System configuration files (`/etc/*`). +- Browser history or cookies. +- Git repository cleanup. --- @@ -325,7 +315,7 @@ bats tests/security.bats # Run specific suite ### System Binaries -Mole relies on standard macOS system binaries (all SIP-protected): +Mole relies on standard, SIP-protected macOS system binaries: | Binary | Purpose | Fallback | |--------|---------|----------| @@ -347,14 +337,14 @@ The compiled Go binary (`analyze-go`) includes: **Supply Chain Security:** -- All dependencies pinned to specific versions -- Regular security audits -- No transitive dependencies with known CVEs -- **Automated Releases**: Binaries compiled via GitHub Actions and signed -- **Source Only**: Repository contains no pre-compiled binaries +- All dependencies are pinned to specific versions. +- Regular security audits. +- No transitive dependencies with known CVEs. +- **Automated Releases**: Binaries are compiled and signed via GitHub Actions. +- **Source Only**: The repository contains no pre-compiled binaries. --- -**Certification:** This security audit certifies that Mole implements industry-standard defensive programming practices and adheres to macOS security guidelines. The architecture prioritizes system stability and data integrity over aggressive optimization. +**Our Commitment:** This document certifies that Mole implements industry-standard defensive programming practices and strictly adheres to macOS security guidelines. We prioritize system stability and data integrity above all else. -*For security concerns or vulnerability reports, please contact the maintainers via GitHub Issues.* +*For security concerns or vulnerability reports, please open an issue or contact the maintainers directly.* diff --git a/bin/clean.sh b/bin/clean.sh index 7beac0d..2de8f28 100755 --- a/bin/clean.sh +++ b/bin/clean.sh @@ -322,6 +322,35 @@ safe_clean() { targets=("${@:1:$#-1}") fi + local -a valid_targets=() + for target in "${targets[@]}"; do + # Optimization: If target is a glob literal and parent dir missing, skip it. + if [[ "$target" == *"*"* && ! -e "$target" ]]; then + local base_path="${target%%\**}" + local parent_dir + if [[ "$base_path" == */ ]]; then + parent_dir="${base_path%/}" + else + parent_dir=$(dirname "$base_path") + fi + + if [[ ! -d "$parent_dir" ]]; then + # debug_log "Skipping nonexistent parent: $parent_dir for $target" + continue + fi + fi + valid_targets+=("$target") + done + + if [[ ${#valid_targets[@]} -gt 0 ]]; then + targets=("${valid_targets[@]}") + else + targets=() + fi + if [[ ${#targets[@]} -eq 0 ]]; then + return 0 + fi + local removed_any=0 local total_size_kb=0 local total_count=0 diff --git a/bin/touchid.sh b/bin/touchid.sh index c1a626a..1f45914 100755 --- a/bin/touchid.sh +++ b/bin/touchid.sh @@ -14,10 +14,17 @@ LIB_DIR="$(cd "$SCRIPT_DIR/../lib" && pwd)" source "$LIB_DIR/core/common.sh" readonly PAM_SUDO_FILE="${MOLE_PAM_SUDO_FILE:-/etc/pam.d/sudo}" +readonly PAM_SUDO_LOCAL_FILE="${MOLE_PAM_SUDO_LOCAL_FILE:-/etc/pam.d/sudo_local}" readonly PAM_TID_LINE="auth sufficient pam_tid.so" # Check if Touch ID is already configured is_touchid_configured() { + # Check sudo_local first + if [[ -f "$PAM_SUDO_LOCAL_FILE" ]]; then + grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE" 2> /dev/null && return 0 + fi + + # Fallback to standard sudo file if [[ ! -f "$PAM_SUDO_FILE" ]]; then return 1 fi @@ -74,7 +81,74 @@ enable_touchid() { echo "" fi - # Check if already configured + # Check if we should use sudo_local (Sonoma+) + if grep -q "sudo_local" "$PAM_SUDO_FILE"; then + # Check if already correctly configured in sudo_local + if [[ -f "$PAM_SUDO_LOCAL_FILE" ]] && grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then + # It is in sudo_local, but let's check if it's ALSO in sudo (incomplete migration) + if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then + # Clean up legacy config + temp_file=$(mktemp) + grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file" + if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then + echo -e "${GREEN}${ICON_SUCCESS} Cleanup legacy configuration${NC}" + fi + fi + echo -e "${GREEN}${ICON_SUCCESS} Touch ID is already enabled${NC}" + return 0 + fi + + # Not configured in sudo_local yet. + # Check if configured in sudo (Legacy) + local is_legacy_configured=false + if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then + is_legacy_configured=true + fi + + # Function to write to sudo_local + local write_success=false + if [[ ! -f "$PAM_SUDO_LOCAL_FILE" ]]; then + # Create the file + echo "# sudo_local: local customizations for sudo" | sudo tee "$PAM_SUDO_LOCAL_FILE" > /dev/null + echo "$PAM_TID_LINE" | sudo tee -a "$PAM_SUDO_LOCAL_FILE" > /dev/null + sudo chmod 444 "$PAM_SUDO_LOCAL_FILE" + sudo chown root:wheel "$PAM_SUDO_LOCAL_FILE" + write_success=true + else + # Append if not present + if ! grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then + temp_file=$(mktemp) + cp "$PAM_SUDO_LOCAL_FILE" "$temp_file" + echo "$PAM_TID_LINE" >> "$temp_file" + sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE" + sudo chmod 444 "$PAM_SUDO_LOCAL_FILE" + sudo chown root:wheel "$PAM_SUDO_LOCAL_FILE" + write_success=true + else + write_success=true # Already there (should be caught by first check, but safe fallback) + fi + fi + + if $write_success; then + # If we migrated from legacy, clean it up now + if $is_legacy_configured; then + temp_file=$(mktemp) + grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file" + sudo mv "$temp_file" "$PAM_SUDO_FILE" + log_success "Touch ID migrated to sudo_local" + else + log_success "Touch ID enabled (via sudo_local) - try: sudo ls" + fi + return 0 + else + log_error "Failed to write to sudo_local" + return 1 + fi + fi + + # Legacy method: Modify sudo file directly + + # Check if already configured (Legacy) if is_touchid_configured; then echo -e "${GREEN}${ICON_SUCCESS} Touch ID is already enabled${NC}" return 0 @@ -129,26 +203,55 @@ disable_touchid() { return 0 fi - # Create backup only if it doesn't exist - if [[ ! -f "${PAM_SUDO_FILE}.mole-backup" ]]; then - if ! sudo cp "$PAM_SUDO_FILE" "${PAM_SUDO_FILE}.mole-backup" 2> /dev/null; then - log_error "Failed to create backup" + # Check sudo_local first + if [[ -f "$PAM_SUDO_LOCAL_FILE" ]] && grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then + # Remove from sudo_local + temp_file=$(mktemp) + grep -v "pam_tid.so" "$PAM_SUDO_LOCAL_FILE" > "$temp_file" + + if sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE" 2> /dev/null; then + # Since we modified sudo_local, we should also check if it's in sudo file (legacy cleanup) + if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then + temp_file=$(mktemp) + grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file" + sudo mv "$temp_file" "$PAM_SUDO_FILE" + fi + echo -e "${GREEN}${ICON_SUCCESS} Touch ID disabled (removed from sudo_local)${NC}" + echo "" + return 0 + else + log_error "Failed to disable Touch ID from sudo_local" return 1 fi fi - # Remove pam_tid.so line - temp_file=$(mktemp) - grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file" + # Fallback to sudo file (legacy) + if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then + # Create backup only if it doesn't exist + if [[ ! -f "${PAM_SUDO_FILE}.mole-backup" ]]; then + if ! sudo cp "$PAM_SUDO_FILE" "${PAM_SUDO_FILE}.mole-backup" 2> /dev/null; then + log_error "Failed to create backup" + return 1 + fi + fi - if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then - echo -e "${GREEN}${ICON_SUCCESS} Touch ID disabled${NC}" - echo "" - return 0 - else - log_error "Failed to disable Touch ID" - return 1 + # Remove pam_tid.so line + temp_file=$(mktemp) + grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file" + + if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then + echo -e "${GREEN}${ICON_SUCCESS} Touch ID disabled${NC}" + echo "" + return 0 + else + log_error "Failed to disable Touch ID" + return 1 + fi fi + + # Should not reach here if is_touchid_configured was true + log_error "Could not find Touch ID configuration to disable" + return 1 } # Interactive menu diff --git a/cmd/analyze/analyze_test.go b/cmd/analyze/analyze_test.go index 213f5c2..6ae8d2e 100644 --- a/cmd/analyze/analyze_test.go +++ b/cmd/analyze/analyze_test.go @@ -90,6 +90,11 @@ func TestScanPathConcurrentBasic(t *testing.T) { } func TestDeletePathWithProgress(t *testing.T) { + // Skip in CI environments where Finder may not be available. + if os.Getenv("CI") != "" { + t.Skip("Skipping Finder-dependent test in CI") + } + parent := t.TempDir() target := filepath.Join(parent, "target") if err := os.MkdirAll(target, 0o755); err != nil { @@ -107,18 +112,15 @@ func TestDeletePathWithProgress(t *testing.T) { } var counter int64 - count, err := deletePathWithProgress(target, &counter) + count, err := trashPathWithProgress(target, &counter) if err != nil { - t.Fatalf("deletePathWithProgress returned error: %v", err) + t.Fatalf("trashPathWithProgress returned error: %v", err) } if count != int64(len(files)) { - t.Fatalf("expected %d files removed, got %d", len(files), count) - } - if got := atomic.LoadInt64(&counter); got != count { - t.Fatalf("counter mismatch: want %d, got %d", count, got) + t.Fatalf("expected %d files trashed, got %d", len(files), count) } if _, err := os.Stat(target); !os.IsNotExist(err) { - t.Fatalf("expected target to be removed, stat err=%v", err) + t.Fatalf("expected target to be moved to Trash, stat err=%v", err) } } diff --git a/cmd/analyze/cache.go b/cmd/analyze/cache.go index a0dcb7a..8d4c22b 100644 --- a/cmd/analyze/cache.go +++ b/cmd/analyze/cache.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "sync" "time" @@ -27,9 +28,10 @@ var ( func snapshotFromModel(m model) historyEntry { return historyEntry{ Path: m.path, - Entries: cloneDirEntries(m.entries), - LargeFiles: cloneFileEntries(m.largeFiles), + Entries: slices.Clone(m.entries), + LargeFiles: slices.Clone(m.largeFiles), TotalSize: m.totalSize, + TotalFiles: m.totalFiles, Selected: m.selected, EntryOffset: m.offset, LargeSelected: m.largeSelected, @@ -44,24 +46,6 @@ func cacheSnapshot(m model) historyEntry { return entry } -func cloneDirEntries(entries []dirEntry) []dirEntry { - if len(entries) == 0 { - return nil - } - copied := make([]dirEntry, len(entries)) - copy(copied, entries) //nolint:all - return copied -} - -func cloneFileEntries(files []fileEntry) []fileEntry { - if len(files) == 0 { - return nil - } - copied := make([]fileEntry, len(files)) - copy(copied, files) //nolint:all - return copied -} - func ensureOverviewSnapshotCacheLocked() error { if overviewSnapshotLoaded { return nil @@ -250,6 +234,7 @@ func saveCacheToDisk(path string, result scanResult) error { Entries: result.Entries, LargeFiles: result.LargeFiles, TotalSize: result.TotalSize, + TotalFiles: result.TotalFiles, ModTime: info.ModTime(), ScanTime: time.Now(), } @@ -264,6 +249,29 @@ func saveCacheToDisk(path string, result scanResult) error { return encoder.Encode(entry) } +// peekCacheTotalFiles attempts to read the total file count from cache, +// ignoring expiration. Used for initial scan progress estimates. +func peekCacheTotalFiles(path string) (int64, error) { + cachePath, err := getCachePath(path) + if err != nil { + return 0, err + } + + file, err := os.Open(cachePath) + if err != nil { + return 0, err + } + defer file.Close() //nolint:errcheck + + var entry cacheEntry + decoder := gob.NewDecoder(file) + if err := decoder.Decode(&entry); err != nil { + return 0, err + } + + return entry.TotalFiles, nil +} + func invalidateCache(path string) { cachePath, err := getCachePath(path) if err == nil { diff --git a/cmd/analyze/delete.go b/cmd/analyze/delete.go index 3204241..11feaee 100644 --- a/cmd/analyze/delete.go +++ b/cmd/analyze/delete.go @@ -1,19 +1,24 @@ package main import ( - "io/fs" + "context" + "fmt" "os" + "os/exec" "path/filepath" "sort" "strings" "sync/atomic" + "time" tea "github.com/charmbracelet/bubbletea" ) +const trashTimeout = 30 * time.Second + func deletePathCmd(path string, counter *int64) tea.Cmd { return func() tea.Msg { - count, err := deletePathWithProgress(path, counter) + count, err := trashPathWithProgress(path, counter) return deleteProgressMsg{ done: true, err: err, @@ -23,20 +28,20 @@ func deletePathCmd(path string, counter *int64) tea.Cmd { } } -// deleteMultiplePathsCmd deletes paths and aggregates results. +// deleteMultiplePathsCmd moves paths to Trash and aggregates results. func deleteMultiplePathsCmd(paths []string, counter *int64) tea.Cmd { return func() tea.Msg { var totalCount int64 var errors []string - // Delete deeper paths first to avoid parent/child conflicts. + // Process deeper paths first to avoid parent/child conflicts. pathsToDelete := append([]string(nil), paths...) sort.Slice(pathsToDelete, func(i, j int) bool { return strings.Count(pathsToDelete[i], string(filepath.Separator)) > strings.Count(pathsToDelete[j], string(filepath.Separator)) }) for _, path := range pathsToDelete { - count, err := deletePathWithProgress(path, counter) + count, err := trashPathWithProgress(path, counter) totalCount += count if err != nil { if os.IsNotExist(err) { @@ -72,48 +77,70 @@ func (e *multiDeleteError) Error() string { return strings.Join(e.errors[:min(3, len(e.errors))], "; ") } -func deletePathWithProgress(root string, counter *int64) (int64, error) { +// trashPathWithProgress moves a path to Trash using Finder. +// This allows users to recover accidentally deleted files. +func trashPathWithProgress(root string, counter *int64) (int64, error) { + // Verify path exists (use Lstat to handle broken symlinks). + info, err := os.Lstat(root) + if err != nil { + return 0, err + } + + // Count items for progress reporting. var count int64 - var firstErr error - - err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error { - if err != nil { - // Skip permission errors but continue. - if os.IsPermission(err) { - if firstErr == nil { - firstErr = err - } - return filepath.SkipDir + if info.IsDir() { + _ = filepath.WalkDir(root, func(_ string, d os.DirEntry, err error) error { + if err != nil { + return nil } - if firstErr == nil { - firstErr = err - } - return nil - } - - if !d.IsDir() { - if removeErr := os.Remove(path); removeErr == nil { + if !d.IsDir() { count++ if counter != nil { atomic.StoreInt64(counter, count) } - } else if firstErr == nil { - firstErr = removeErr } - } - - return nil - }) - - if err != nil && firstErr == nil { - firstErr = err - } - - if removeErr := os.RemoveAll(root); removeErr != nil { - if firstErr == nil { - firstErr = removeErr + return nil + }) + } else { + count = 1 + if counter != nil { + atomic.StoreInt64(counter, 1) } } - return count, firstErr + // Move to Trash using Finder AppleScript. + if err := moveToTrash(root); err != nil { + return 0, err + } + + return count, nil +} + +// moveToTrash uses macOS Finder to move a file/directory to Trash. +// This is the safest method as it uses the system's native trash mechanism. +func moveToTrash(path string) error { + absPath, err := filepath.Abs(path) + if err != nil { + return fmt.Errorf("failed to resolve path: %w", err) + } + + // Escape path for AppleScript (handle quotes and backslashes). + escapedPath := strings.ReplaceAll(absPath, "\\", "\\\\") + escapedPath = strings.ReplaceAll(escapedPath, "\"", "\\\"") + + script := fmt.Sprintf(`tell application "Finder" to delete POSIX file "%s"`, escapedPath) + + ctx, cancel := context.WithTimeout(context.Background(), trashTimeout) + defer cancel() + + cmd := exec.CommandContext(ctx, "osascript", "-e", script) + output, err := cmd.CombinedOutput() + if err != nil { + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("timeout moving to Trash") + } + return fmt.Errorf("failed to move to Trash: %s", strings.TrimSpace(string(output))) + } + + return nil } diff --git a/cmd/analyze/delete_test.go b/cmd/analyze/delete_test.go index 6039367..9e48b22 100644 --- a/cmd/analyze/delete_test.go +++ b/cmd/analyze/delete_test.go @@ -6,7 +6,47 @@ import ( "testing" ) +func TestTrashPathWithProgress(t *testing.T) { + // Skip in CI environments where Finder may not be available. + if os.Getenv("CI") != "" { + t.Skip("Skipping Finder-dependent test in CI") + } + + parent := t.TempDir() + target := filepath.Join(parent, "target") + if err := os.MkdirAll(target, 0o755); err != nil { + t.Fatalf("create target: %v", err) + } + + files := []string{ + filepath.Join(target, "one.txt"), + filepath.Join(target, "two.txt"), + } + for _, f := range files { + if err := os.WriteFile(f, []byte("content"), 0o644); err != nil { + t.Fatalf("write %s: %v", f, err) + } + } + + var counter int64 + count, err := trashPathWithProgress(target, &counter) + if err != nil { + t.Fatalf("trashPathWithProgress returned error: %v", err) + } + if count != int64(len(files)) { + t.Fatalf("expected %d files trashed, got %d", len(files), count) + } + if _, err := os.Stat(target); !os.IsNotExist(err) { + t.Fatalf("expected target to be moved to Trash, stat err=%v", err) + } +} + func TestDeleteMultiplePathsCmdHandlesParentChild(t *testing.T) { + // Skip in CI environments where Finder may not be available. + if os.Getenv("CI") != "" { + t.Skip("Skipping Finder-dependent test in CI") + } + base := t.TempDir() parent := filepath.Join(base, "parent") child := filepath.Join(parent, "child") @@ -32,12 +72,16 @@ func TestDeleteMultiplePathsCmdHandlesParentChild(t *testing.T) { t.Fatalf("unexpected error: %v", progress.err) } if progress.count != 2 { - t.Fatalf("expected 2 files deleted, got %d", progress.count) + t.Fatalf("expected 2 files trashed, got %d", progress.count) } if _, err := os.Stat(parent); !os.IsNotExist(err) { - t.Fatalf("expected parent to be removed, err=%v", err) - } - if _, err := os.Stat(child); !os.IsNotExist(err) { - t.Fatalf("expected child to be removed, err=%v", err) + t.Fatalf("expected parent to be moved to Trash, err=%v", err) + } +} + +func TestMoveToTrashNonExistent(t *testing.T) { + err := moveToTrash("/nonexistent/path/that/does/not/exist") + if err == nil { + t.Fatal("expected error for non-existent path") } } diff --git a/cmd/analyze/main.go b/cmd/analyze/main.go index e97500e..d3d34d5 100644 --- a/cmd/analyze/main.go +++ b/cmd/analyze/main.go @@ -9,6 +9,7 @@ import ( "os" "os/exec" "path/filepath" + "slices" "sort" "strings" "sync/atomic" @@ -35,12 +36,14 @@ type scanResult struct { Entries []dirEntry LargeFiles []fileEntry TotalSize int64 + TotalFiles int64 } type cacheEntry struct { Entries []dirEntry LargeFiles []fileEntry TotalSize int64 + TotalFiles int64 ModTime time.Time ScanTime time.Time } @@ -50,6 +53,7 @@ type historyEntry struct { Entries []dirEntry LargeFiles []fileEntry TotalSize int64 + TotalFiles int64 Selected int EntryOffset int LargeSelected int @@ -114,6 +118,8 @@ type model struct { height int // Terminal height multiSelected map[string]bool // Track multi-selected items by path (safer than index) largeMultiSelected map[string]bool // Track multi-selected large files by path (safer than index) + totalFiles int64 // Total files found in current/last scan + lastTotalFiles int64 // Total files from previous scan (for progress bar) } func (m model) inOverviewMode() bool { @@ -195,6 +201,13 @@ func newModel(path string, isOverview bool) model { } } + // Try to peek last total files for progress bar, even if cache is stale + if !isOverview { + if total, err := peekCacheTotalFiles(path); err == nil && total > 0 { + m.lastTotalFiles = total + } + } + return m } @@ -355,6 +368,7 @@ func (m model) scanCmd(path string) tea.Cmd { Entries: cached.Entries, LargeFiles: cached.LargeFiles, TotalSize: cached.TotalSize, + TotalFiles: 0, // Cache doesn't store file count currently, minor UI limitation } return scanResultMsg{result: result, err: nil} } @@ -441,6 +455,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { m.entries = filteredEntries m.largeFiles = msg.result.LargeFiles m.totalSize = msg.result.TotalSize + m.totalFiles = msg.result.TotalFiles m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize)) m.clampEntrySelection() m.clampLargeSelection() @@ -501,7 +516,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { if m.deleting && m.deleteCount != nil { count := atomic.LoadInt64(m.deleteCount) if count > 0 { - m.status = fmt.Sprintf("Deleting... %s items removed", formatNumber(count)) + m.status = fmt.Sprintf("Moving to Trash... %s items", formatNumber(count)) } } return m, tickCmd() @@ -516,7 +531,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { // Delete confirm flow. if m.deleteConfirm { switch msg.String() { - case "delete", "backspace": + case "enter": m.deleteConfirm = false m.deleting = true var deleteCount int64 @@ -685,6 +700,9 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { invalidateCache(m.path) m.status = "Refreshing..." m.scanning = true + if m.totalFiles > 0 { + m.lastTotalFiles = m.totalFiles + } atomic.StoreInt64(m.filesScanned, 0) atomic.StoreInt64(m.dirsScanned, 0) atomic.StoreInt64(m.bytesScanned, 0) @@ -965,9 +983,10 @@ func (m model) enterSelectedDir() (tea.Model, tea.Cmd) { } if cached, ok := m.cache[m.path]; ok && !cached.Dirty { - m.entries = cloneDirEntries(cached.Entries) - m.largeFiles = cloneFileEntries(cached.LargeFiles) + m.entries = slices.Clone(cached.Entries) + m.largeFiles = slices.Clone(cached.LargeFiles) m.totalSize = cached.TotalSize + m.totalFiles = cached.TotalFiles m.selected = cached.Selected m.offset = cached.EntryOffset m.largeSelected = cached.LargeSelected @@ -978,6 +997,10 @@ func (m model) enterSelectedDir() (tea.Model, tea.Cmd) { m.scanning = false return m, nil } + m.lastTotalFiles = 0 + if total, err := peekCacheTotalFiles(m.path); err == nil && total > 0 { + m.lastTotalFiles = total + } return m, tea.Batch(m.scanCmd(m.path), tickCmd()) } m.status = fmt.Sprintf("File: %s (%s)", selected.Name, humanizeBytes(selected.Size)) diff --git a/cmd/analyze/scanner.go b/cmd/analyze/scanner.go index b6ab09b..0d7ddca 100644 --- a/cmd/analyze/scanner.go +++ b/cmd/analyze/scanner.go @@ -251,6 +251,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in Entries: entries, LargeFiles: largeFiles, TotalSize: total, + TotalFiles: atomic.LoadInt64(filesScanned), }, nil } diff --git a/cmd/analyze/view.go b/cmd/analyze/view.go index 6cdf400..f2845a9 100644 --- a/cmd/analyze/view.go +++ b/cmd/analyze/view.go @@ -75,10 +75,25 @@ func (m model) View() string { if m.scanning { filesScanned, dirsScanned, bytesScanned := m.getScanProgress() - fmt.Fprintf(&b, "%s%s%s%s Scanning: %s%s files%s, %s%s dirs%s, %s%s%s\n", + progressPrefix := "" + if m.lastTotalFiles > 0 { + percent := float64(filesScanned) / float64(m.lastTotalFiles) * 100 + // Cap at 100% generally + if percent > 100 { + percent = 100 + } + // While strictly scanning, cap at 99% to avoid "100% but still working" confusion + if m.scanning && percent >= 100 { + percent = 99 + } + progressPrefix = fmt.Sprintf(" %s(%.0f%%)%s", colorCyan, percent, colorReset) + } + + fmt.Fprintf(&b, "%s%s%s%s Scanning%s: %s%s files%s, %s%s dirs%s, %s%s%s\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset, + progressPrefix, colorYellow, formatNumber(filesScanned), colorReset, colorYellow, formatNumber(dirsScanned), colorReset, colorGreen, humanizeBytes(bytesScanned), colorReset) @@ -375,12 +390,12 @@ func (m model) View() string { } if deleteCount > 1 { - fmt.Fprintf(&b, "%sDelete:%s %d items (%s) %sPress ⌫ again | ESC cancel%s\n", + fmt.Fprintf(&b, "%sDelete:%s %d items (%s) %sPress Enter to confirm | ESC cancel%s\n", colorRed, colorReset, deleteCount, humanizeBytes(totalDeleteSize), colorGray, colorReset) } else { - fmt.Fprintf(&b, "%sDelete:%s %s (%s) %sPress ⌫ again | ESC cancel%s\n", + fmt.Fprintf(&b, "%sDelete:%s %s (%s) %sPress Enter to confirm | ESC cancel%s\n", colorRed, colorReset, m.deleteTarget.Name, humanizeBytes(m.deleteTarget.Size), colorGray, colorReset) diff --git a/go.mod b/go.mod index 01c230d..70db580 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,6 @@ require ( github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect github.com/charmbracelet/x/term v0.2.1 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect - github.com/go-ole/go-ole v1.2.6 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -34,7 +33,6 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - github.com/yusufpapurcu/wmi v1.2.4 // indirect golang.org/x/sys v0.36.0 // indirect golang.org/x/text v0.3.8 // indirect ) diff --git a/lib/clean/app_caches.sh b/lib/clean/app_caches.sh index 3418aa3..c4b62c6 100644 --- a/lib/clean/app_caches.sh +++ b/lib/clean/app_caches.sh @@ -88,6 +88,7 @@ clean_productivity_apps() { safe_clean ~/Library/Caches/com.orabrowser.app/* "Ora browser cache" safe_clean ~/Library/Caches/com.filo.client/* "Filo cache" safe_clean ~/Library/Caches/com.flomoapp.mac/* "Flomo cache" + safe_clean ~/Library/Application\ Support/Quark/Cache/videoCache/* "Quark video cache" } # Music/media players (protect Spotify offline music). clean_media_players() { diff --git a/lib/clean/apps.sh b/lib/clean/apps.sh index 7e844c0..8130482 100644 --- a/lib/clean/apps.sh +++ b/lib/clean/apps.sh @@ -87,6 +87,11 @@ scan_installed_apps() { "/Applications" "/System/Applications" "$HOME/Applications" + # Homebrew Cask locations + "/opt/homebrew/Caskroom" + "/usr/local/Caskroom" + # Setapp applications + "$HOME/Library/Application Support/Setapp/Applications" ) # Temp dir avoids write contention across parallel scans. local scan_tmp_dir=$(create_temp_dir) @@ -117,6 +122,10 @@ scan_installed_apps() { ( local running_apps=$(run_with_timeout 5 osascript -e 'tell application "System Events" to get bundle identifier of every application process' 2> /dev/null || echo "") echo "$running_apps" | tr ',' '\n' | sed -e 's/^ *//;s/ *$//' -e '/^$/d' > "$scan_tmp_dir/running.txt" + # Fallback: lsappinfo is more reliable than osascript + if command -v lsappinfo > /dev/null 2>&1; then + run_with_timeout 3 lsappinfo list 2> /dev/null | grep -o '"CFBundleIdentifier"="[^"]*"' | cut -d'"' -f4 >> "$scan_tmp_dir/running.txt" 2> /dev/null || true + fi ) & pids+=($!) ( @@ -126,9 +135,11 @@ scan_installed_apps() { ) & pids+=($!) debug_log "Waiting for ${#pids[@]} background processes: ${pids[*]}" - for pid in "${pids[@]}"; do - wait "$pid" 2> /dev/null || true - done + if [[ ${#pids[@]} -gt 0 ]]; then + for pid in "${pids[@]}"; do + wait "$pid" 2> /dev/null || true + done + fi debug_log "All background processes completed" cat "$scan_tmp_dir"/*.txt >> "$installed_bundles" 2> /dev/null || true safe_remove "$scan_tmp_dir" true @@ -138,25 +149,57 @@ scan_installed_apps() { local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ') debug_log "Scanned $app_count unique applications" } +# Sensitive data patterns that should never be treated as orphaned +# These patterns protect security-critical application data +readonly ORPHAN_NEVER_DELETE_PATTERNS=( + "*1password*" "*1Password*" + "*keychain*" "*Keychain*" + "*bitwarden*" "*Bitwarden*" + "*lastpass*" "*LastPass*" + "*keepass*" "*KeePass*" + "*dashlane*" "*Dashlane*" + "*enpass*" "*Enpass*" + "*ssh*" "*gpg*" "*gnupg*" + "com.apple.keychain*" +) + +# Cache file for mdfind results (Bash 3.2 compatible, no associative arrays) +ORPHAN_MDFIND_CACHE_FILE="" + # Usage: is_bundle_orphaned "bundle_id" "directory_path" "installed_bundles_file" is_bundle_orphaned() { local bundle_id="$1" local directory_path="$2" local installed_bundles="$3" + + # 1. Fast path: check protection list (in-memory, instant) if should_protect_data "$bundle_id"; then return 1 fi + + # 2. Fast path: check sensitive data patterns (in-memory, instant) + local bundle_lower + bundle_lower=$(echo "$bundle_id" | LC_ALL=C tr '[:upper:]' '[:lower:]') + for pattern in "${ORPHAN_NEVER_DELETE_PATTERNS[@]}"; do + # shellcheck disable=SC2053 + if [[ "$bundle_lower" == $pattern ]]; then + return 1 + fi + done + + # 3. Fast path: check installed bundles file (file read, fast) if grep -Fxq "$bundle_id" "$installed_bundles" 2> /dev/null; then return 1 fi - if should_protect_data "$bundle_id"; then - return 1 - fi + + # 4. Fast path: hardcoded system components case "$bundle_id" in loginwindow | dock | systempreferences | systemsettings | settings | controlcenter | finder | safari) return 1 ;; esac + + # 5. Fast path: 60-day modification check (stat call, fast) if [[ -e "$directory_path" ]]; then local last_modified_epoch=$(get_file_mtime "$directory_path") local current_epoch @@ -166,6 +209,37 @@ is_bundle_orphaned() { return 1 fi fi + + # 6. Slow path: mdfind fallback with file-based caching (Bash 3.2 compatible) + # This catches apps installed in non-standard locations + if [[ -n "$bundle_id" ]] && [[ "$bundle_id" =~ ^[a-zA-Z0-9._-]+$ ]] && [[ ${#bundle_id} -ge 5 ]]; then + # Initialize cache file if needed + if [[ -z "$ORPHAN_MDFIND_CACHE_FILE" ]]; then + ORPHAN_MDFIND_CACHE_FILE=$(mktemp "${TMPDIR:-/tmp}/mole_mdfind_cache.XXXXXX") + register_temp_file "$ORPHAN_MDFIND_CACHE_FILE" + fi + + # Check cache first (grep is fast for small files) + if grep -Fxq "FOUND:$bundle_id" "$ORPHAN_MDFIND_CACHE_FILE" 2> /dev/null; then + return 1 + fi + if grep -Fxq "NOTFOUND:$bundle_id" "$ORPHAN_MDFIND_CACHE_FILE" 2> /dev/null; then + # Already checked, not found - continue to return 0 + : + else + # Query mdfind with strict timeout (2 seconds max) + local app_exists + app_exists=$(run_with_timeout 2 mdfind "kMDItemCFBundleIdentifier == '$bundle_id'" 2> /dev/null | head -1 || echo "") + if [[ -n "$app_exists" ]]; then + echo "FOUND:$bundle_id" >> "$ORPHAN_MDFIND_CACHE_FILE" + return 1 + else + echo "NOTFOUND:$bundle_id" >> "$ORPHAN_MDFIND_CACHE_FILE" + fi + fi + fi + + # All checks passed - this is an orphan return 0 } # Orphaned app data sweep. @@ -207,29 +281,31 @@ clean_orphaned_app_data() { for pat in "${pattern_arr[@]}"; do file_patterns+=("$base_path/$pat") done - for item_path in "${file_patterns[@]}"; do - local iteration_count=0 - for match in $item_path; do - [[ -e "$match" ]] || continue - ((iteration_count++)) - if [[ $iteration_count -gt $MOLE_MAX_ORPHAN_ITERATIONS ]]; then - break - fi - local bundle_id=$(basename "$match") - bundle_id="${bundle_id%.savedState}" - bundle_id="${bundle_id%.binarycookies}" - if is_bundle_orphaned "$bundle_id" "$match" "$installed_bundles"; then - local size_kb - size_kb=$(get_path_size_kb "$match") - if [[ -z "$size_kb" || "$size_kb" == "0" ]]; then - continue + if [[ ${#file_patterns[@]} -gt 0 ]]; then + for item_path in "${file_patterns[@]}"; do + local iteration_count=0 + for match in $item_path; do + [[ -e "$match" ]] || continue + ((iteration_count++)) + if [[ $iteration_count -gt $MOLE_MAX_ORPHAN_ITERATIONS ]]; then + break fi - safe_clean "$match" "Orphaned $label: $bundle_id" - ((orphaned_count++)) - ((total_orphaned_kb += size_kb)) - fi + local bundle_id=$(basename "$match") + bundle_id="${bundle_id%.savedState}" + bundle_id="${bundle_id%.binarycookies}" + if is_bundle_orphaned "$bundle_id" "$match" "$installed_bundles"; then + local size_kb + size_kb=$(get_path_size_kb "$match") + if [[ -z "$size_kb" || "$size_kb" == "0" ]]; then + continue + fi + safe_clean "$match" "Orphaned $label: $bundle_id" + ((orphaned_count++)) + ((total_orphaned_kb += size_kb)) + fi + done done - done + fi done stop_section_spinner if [[ $orphaned_count -gt 0 ]]; then diff --git a/lib/clean/dev.sh b/lib/clean/dev.sh index bb519e7..cd16042 100644 --- a/lib/clean/dev.sh +++ b/lib/clean/dev.sh @@ -255,6 +255,27 @@ clean_dev_network() { clean_sqlite_temp_files() { return 0 } +# Elixir/Erlang ecosystem. +# Note: ~/.mix/archives contains installed Mix tools - excluded from cleanup +clean_dev_elixir() { + safe_clean ~/.hex/cache/* "Hex cache" +} +# Haskell ecosystem. +# Note: ~/.stack/programs contains Stack-installed GHC compilers - excluded from cleanup +clean_dev_haskell() { + safe_clean ~/.cabal/packages/* "Cabal install cache" +} +# OCaml ecosystem. +clean_dev_ocaml() { + safe_clean ~/.opam/download-cache/* "Opam cache" +} +# Editor caches. +# Note: ~/Library/Application Support/Code/User/workspaceStorage contains workspace settings - excluded from cleanup +clean_dev_editors() { + safe_clean ~/Library/Caches/com.microsoft.VSCode/Cache/* "VS Code cached data" + safe_clean ~/Library/Application\ Support/Code/CachedData/* "VS Code cached data" + safe_clean ~/Library/Caches/Zed/* "Zed cache" +} # Main developer tools cleanup sequence. clean_developer_tools() { stop_section_spinner @@ -277,6 +298,10 @@ clean_developer_tools() { clean_dev_api_tools clean_dev_network clean_dev_misc + clean_dev_elixir + clean_dev_haskell + clean_dev_ocaml + clean_dev_editors safe_clean ~/Library/Caches/Homebrew/* "Homebrew cache" # Clean Homebrew locks without repeated sudo prompts. local brew_lock_dirs=( diff --git a/lib/clean/project.sh b/lib/clean/project.sh index 85a5c37..7a70ba9 100644 --- a/lib/clean/project.sh +++ b/lib/clean/project.sh @@ -36,6 +36,10 @@ readonly PURGE_TARGETS=( ".dart_tool" # Flutter/Dart build cache ".zig-cache" # Zig "zig-out" # Zig + ".angular" # Angular + ".svelte-kit" # SvelteKit + ".astro" # Astro + "coverage" # Code coverage reports ) # Minimum age in days before considering for cleanup. readonly MIN_AGE_DAYS=7 diff --git a/lib/clean/system.sh b/lib/clean/system.sh index fe78075..bb95e9c 100644 --- a/lib/clean/system.sh +++ b/lib/clean/system.sh @@ -119,7 +119,7 @@ clean_time_machine_failed_backups() { echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" return 0 fi - if tmutil status 2> /dev/null | grep -q "Running = 1"; then + if tm_is_running; then if [[ "$spinner_active" == "true" ]]; then stop_section_spinner fi @@ -251,11 +251,60 @@ clean_time_machine_failed_backups() { echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" fi } +# Returns 0 if a backup is actively running. +# Returns 1 if not running. +# Returns 2 if status cannot be determined +tm_is_running() { + local st + st="$(tmutil status 2> /dev/null)" || return 2 + + # If we can't find a Running field at all, treat as unknown. + if ! grep -qE '(^|[[:space:]])("Running"|Running)[[:space:]]*=' <<< "$st"; then + return 2 + fi + + # Match: Running = 1; OR "Running" = 1 (with or without trailing ;) + grep -qE '(^|[[:space:]])("Running"|Running)[[:space:]]*=[[:space:]]*1([[:space:]]*;|$)' <<< "$st" +} + +# Returns 0 if snapshot mounts exist under local snapshot paths +# Returns 1 if none found +# Returns 2 if mount state cannot be determined +tm_snapshots_mounted() { + local m + if ! m="$(run_with_timeout 3 mount 2> /dev/null)"; then + return 2 + fi + # Match modern and legacy local-snapshot browse mounts: + # - /Volumes/com.apple.TimeMachine.localsnapshots/... (APFS) + # - /.TimeMachine (APFS) + # - /Volumes/MobileBackups (HFS+, legacy) + grep -qE '[[:space:]]on[[:space:]](/\.TimeMachine(/|[[:space:]])|/Volumes/com\.apple\.TimeMachine\.localsnapshots(/|[[:space:]])|/Volumes/MobileBackups(/|[[:space:]]))' <<< "$m" +} + # Local APFS snapshots (keep the most recent). clean_local_snapshots() { if ! command -v tmutil > /dev/null 2>&1; then return 0 fi + + local rc_running rc_mounted + rc_running=0 + tm_is_running || rc_running=$? + + rc_mounted=0 + tm_snapshots_mounted || rc_mounted=$? + + if [[ $rc_running -eq 2 || $rc_mounted -eq 2 ]]; then + echo -e " ${YELLOW}!${NC} Could not determine Time Machine status; skipping snapshot cleanup" + return 0 + fi + + if [[ $rc_running -eq 0 || $rc_mounted -eq 0 ]]; then + echo -e " ${YELLOW}!${NC} Time Machine is active; skipping snapshot cleanup" + return 0 + fi + start_section_spinner "Checking local snapshots..." local snapshot_list snapshot_list=$(tmutil listlocalsnapshots / 2> /dev/null) diff --git a/lib/core/app_protection.sh b/lib/core/app_protection.sh index 2c617a4..5822c6a 100755 --- a/lib/core/app_protection.sh +++ b/lib/core/app_protection.sh @@ -85,8 +85,9 @@ readonly DATA_PROTECTED_BUNDLES=( "com.lastpass.*" # LastPass "com.dashlane.*" # Dashlane "com.bitwarden.*" # Bitwarden - "com.keepassx.*" # KeePassXC + "com.keepassx.*" # KeePassXC (Legacy) "org.keepassx.*" # KeePassX + "org.keepassxc.*" # KeePassXC "com.authy.*" # Authy "com.yubico.*" # YubiKey Manager diff --git a/lib/core/sudo.sh b/lib/core/sudo.sh index 9527d14..57b80b4 100644 --- a/lib/core/sudo.sh +++ b/lib/core/sudo.sh @@ -9,6 +9,13 @@ set -euo pipefail # ============================================================================ check_touchid_support() { + # Check sudo_local first (Sonoma+) + if [[ -f /etc/pam.d/sudo_local ]]; then + grep -q "pam_tid.so" /etc/pam.d/sudo_local 2> /dev/null + return $? + fi + + # Fallback to checking sudo directly if [[ -f /etc/pam.d/sudo ]]; then grep -q "pam_tid.so" /etc/pam.d/sudo 2> /dev/null return $? diff --git a/mole b/mole index 398bf72..b7e3bc6 100755 --- a/mole +++ b/mole @@ -82,20 +82,21 @@ check_for_updates() { ensure_user_file "$msg_cache" ( - local latest + ( + local latest - latest=$(get_latest_version_from_github) - if [[ -z "$latest" ]]; then - latest=$(get_latest_version) - fi + latest=$(get_latest_version_from_github) + if [[ -z "$latest" ]]; then + latest=$(get_latest_version) + fi - if [[ -n "$latest" && "$VERSION" != "$latest" && "$(printf '%s\n' "$VERSION" "$latest" | sort -V | head -1)" == "$VERSION" ]]; then - printf "\nUpdate available: %s → %s, run %smo update%s\n\n" "$VERSION" "$latest" "$GREEN" "$NC" > "$msg_cache" - else - echo -n > "$msg_cache" - fi - ) & - disown 2> /dev/null || true + if [[ -n "$latest" && "$VERSION" != "$latest" && "$(printf '%s\n' "$VERSION" "$latest" | sort -V | head -1)" == "$VERSION" ]]; then + printf "\nUpdate available: %s → %s, run %smo update%s\n\n" "$VERSION" "$latest" "$GREEN" "$NC" > "$msg_cache" + else + echo -n > "$msg_cache" + fi + ) > /dev/null 2>&1 < /dev/null & + ) } show_update_notification() { @@ -494,7 +495,7 @@ remove_mole() { exit 1 fi - log_admin "Attempting to uninstall Mole via Homebrew..." + log_info "Attempting to uninstall Mole via Homebrew..." local brew_uninstall_output if ! brew_uninstall_output=$("$brew_cmd" uninstall --force mole 2>&1); then has_error=true diff --git a/tests/clean_misc.bats b/tests/clean_misc.bats index c5e0d3c..31282bb 100644 --- a/tests/clean_misc.bats +++ b/tests/clean_misc.bats @@ -100,6 +100,9 @@ run_with_timeout() { return 1; } clean_ds_store_tree() { :; } start_section_spinner() { :; } stop_section_spinner() { :; } +is_path_whitelisted() { return 1; } +WHITELIST_PATTERNS=() +PROTECT_FINDER_METADATA="false" scan_external_volumes EOF diff --git a/tests/clean_system_maintenance.bats b/tests/clean_system_maintenance.bats index 6208864..a594b0d 100644 --- a/tests/clean_system_maintenance.bats +++ b/tests/clean_system_maintenance.bats @@ -125,6 +125,8 @@ tmutil() { } start_section_spinner(){ :; } stop_section_spinner(){ :; } +tm_is_running(){ return 1; } +tm_snapshots_mounted(){ return 1; } DRY_RUN="false" clean_local_snapshots @@ -154,6 +156,8 @@ tmutil() { start_section_spinner(){ :; } stop_section_spinner(){ :; } note_activity(){ :; } +tm_is_running(){ return 1; } +tm_snapshots_mounted(){ return 1; } DRY_RUN="true" clean_local_snapshots @@ -188,6 +192,8 @@ tmutil() { start_section_spinner(){ :; } stop_section_spinner(){ :; } note_activity(){ :; } +tm_is_running(){ return 1; } +tm_snapshots_mounted(){ return 1; } unset -f read_key diff --git a/tests/dev_extended.bats b/tests/dev_extended.bats new file mode 100644 index 0000000..313db2d --- /dev/null +++ b/tests/dev_extended.bats @@ -0,0 +1,117 @@ +#!/usr/bin/env bats + +setup_file() { + PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)" + export PROJECT_ROOT + + ORIGINAL_HOME="${HOME:-}" + export ORIGINAL_HOME + + HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-dev-extended.XXXXXX")" + export HOME + + mkdir -p "$HOME" +} + +teardown_file() { + rm -rf "$HOME" + if [[ -n "${ORIGINAL_HOME:-}" ]]; then + export HOME="$ORIGINAL_HOME" + fi +} + +@test "clean_dev_elixir cleans hex cache" { + mkdir -p "$HOME/.mix" "$HOME/.hex" + run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF' +set -euo pipefail +source "$PROJECT_ROOT/lib/core/common.sh" +source "$PROJECT_ROOT/lib/clean/dev.sh" +safe_clean() { echo "$2"; } +clean_dev_elixir +EOF + + [ "$status" -eq 0 ] + [[ "$output" == *"Hex cache"* ]] +} + +@test "clean_dev_elixir does not clean mix archives" { + mkdir -p "$HOME/.mix/archives" + touch "$HOME/.mix/archives/test_tool.ez" + + # Source and run the function + source "$PROJECT_ROOT/lib/core/common.sh" + source "$PROJECT_ROOT/bin/clean.sh" + clean_dev_elixir > /dev/null 2>&1 || true + + # Verify the file still exists + [ -f "$HOME/.mix/archives/test_tool.ez" ] +} + +@test "clean_dev_haskell cleans cabal install cache" { + mkdir -p "$HOME/.cabal" "$HOME/.stack" + run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF' +set -euo pipefail +source "$PROJECT_ROOT/lib/core/common.sh" +source "$PROJECT_ROOT/lib/clean/dev.sh" +safe_clean() { echo "$2"; } +clean_dev_haskell +EOF + + [ "$status" -eq 0 ] + [[ "$output" == *"Cabal install cache"* ]] +} + +@test "clean_dev_haskell does not clean stack programs" { + mkdir -p "$HOME/.stack/programs/x86_64-osx" + touch "$HOME/.stack/programs/x86_64-osx/ghc-9.2.8.tar.xz" + + # Source and run the function + source "$PROJECT_ROOT/lib/core/common.sh" + source "$PROJECT_ROOT/bin/clean.sh" + clean_dev_haskell > /dev/null 2>&1 || true + + # Verify the file still exists + [ -f "$HOME/.stack/programs/x86_64-osx/ghc-9.2.8.tar.xz" ] +} + +@test "clean_dev_ocaml cleans opam cache" { + mkdir -p "$HOME/.opam" + run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF' +set -euo pipefail +source "$PROJECT_ROOT/lib/core/common.sh" +source "$PROJECT_ROOT/lib/clean/dev.sh" +safe_clean() { echo "$2"; } +clean_dev_ocaml +EOF + + [ "$status" -eq 0 ] + [[ "$output" == *"Opam cache"* ]] +} + +@test "clean_dev_editors cleans VS Code and Zed caches" { + mkdir -p "$HOME/Library/Caches/com.microsoft.VSCode" "$HOME/Library/Application Support/Code" "$HOME/Library/Caches/Zed" + run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF' +set -euo pipefail +source "$PROJECT_ROOT/lib/core/common.sh" +source "$PROJECT_ROOT/lib/clean/dev.sh" +safe_clean() { echo "$2"; } +clean_dev_editors +EOF + + [ "$status" -eq 0 ] + [[ "$output" == *"VS Code cached data"* ]] + [[ "$output" == *"Zed cache"* ]] +} + +@test "clean_dev_editors does not clean VS Code workspace storage" { + mkdir -p "$HOME/Library/Application Support/Code/User/workspaceStorage/abc123" + touch "$HOME/Library/Application Support/Code/User/workspaceStorage/abc123/workspace.json" + + # Source and run the function + source "$PROJECT_ROOT/lib/core/common.sh" + source "$PROJECT_ROOT/bin/clean.sh" + clean_dev_editors > /dev/null 2>&1 || true + + # Verify the file still exists + [ -f "$HOME/Library/Application Support/Code/User/workspaceStorage/abc123/workspace.json" ] +}