From 694c55f73bdcd5fa08230025eecb432b00d4ddd7 Mon Sep 17 00:00:00 2001 From: Tw93 Date: Mon, 29 Dec 2025 14:27:47 +0800 Subject: [PATCH] fix: implement layered error tolerance and accurate cleanup reporting (#175 #176 #180) - Fix safe_remove set -e trap in command substitution - Fix has_full_disk_access false positives and unknown state handling - Use set +e in perform_cleanup for graceful degradation - Track removal failures and only count actually deleted items (#180) - Add "Skipped X items (permission denied or in use)" notification - Improve spinner reliability with cooperative stop mechanism (#175) --- bin/clean.sh | 81 +++++++++++++++++----- bin/optimize.sh | 4 +- install.sh | 2 +- lib/clean/app_caches.sh | 29 -------- lib/clean/apps.sh | 69 ++----------------- lib/clean/brew.sh | 28 +------- lib/clean/caches.sh | 48 ++----------- lib/clean/dev.sh | 61 +++-------------- lib/clean/project.sh | 103 ++-------------------------- lib/clean/system.sh | 70 ------------------- lib/clean/user.sh | 145 +++++++--------------------------------- lib/core/base.sh | 4 +- lib/core/file_ops.sh | 21 +++++- lib/core/log.sh | 2 +- lib/core/ui.sh | 107 ++++++++++++++++++++++++----- 15 files changed, 228 insertions(+), 546 deletions(-) diff --git a/bin/clean.sh b/bin/clean.sh index 40b0f62..eacc6b0 100755 --- a/bin/clean.sh +++ b/bin/clean.sh @@ -125,16 +125,12 @@ cleanup() { fi CLEANUP_DONE=true - # Stop all spinners and clear the line - if [[ -n "${INLINE_SPINNER_PID:-}" ]] && kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then - kill "$INLINE_SPINNER_PID" 2> /dev/null || true - wait "$INLINE_SPINNER_PID" 2> /dev/null || true - INLINE_SPINNER_PID="" - fi + # Stop any inline spinner + stop_inline_spinner 2> /dev/null || true # Clear any spinner output - spinner outputs to stderr if [[ -t 1 ]]; then - printf "\r\033[K" >&2 + printf "\r\033[K" >&2 || true fi # Clean up temporary files @@ -205,6 +201,8 @@ safe_clean() { local total_size_bytes=0 local total_count=0 local skipped_count=0 + local removal_failed_count=0 + local permission_start=${MOLE_PERMISSION_DENIED_COUNT:-0} local show_scan_feedback=false if [[ ${#targets[@]} -gt 20 && -t 1 ]]; then @@ -316,17 +314,25 @@ safe_clean() { if [[ -f "$result_file" ]]; then read -r size count < "$result_file" 2> /dev/null || true if [[ "$count" -gt 0 && "$size" -gt 0 ]]; then + local removed=1 if [[ "$DRY_RUN" != "true" ]]; then + removed=0 # Handle symbolic links separately (only remove the link, not the target) if [[ -L "$path" ]]; then - rm "$path" 2> /dev/null || true + rm "$path" 2> /dev/null && removed=1 else - safe_remove "$path" true || true + if safe_remove "$path" true; then + removed=1 + fi fi fi - ((total_size_bytes += size)) - ((total_count += 1)) - removed_any=1 + if [[ $removed -eq 1 ]]; then + ((total_size_bytes += size)) + ((total_count += 1)) + removed_any=1 + else + ((removal_failed_count++)) + fi fi fi ((idx++)) @@ -341,17 +347,25 @@ safe_clean() { # Optimization: Skip expensive file counting if [[ "$size_bytes" -gt 0 ]]; then + local removed=1 if [[ "$DRY_RUN" != "true" ]]; then + removed=0 # Handle symbolic links separately (only remove the link, not the target) if [[ -L "$path" ]]; then - rm "$path" 2> /dev/null || true + rm "$path" 2> /dev/null && removed=1 else - safe_remove "$path" true || true + if safe_remove "$path" true; then + removed=1 + fi fi fi - ((total_size_bytes += size_bytes)) - ((total_count += 1)) - removed_any=1 + if [[ $removed -eq 1 ]]; then + ((total_size_bytes += size_bytes)) + ((total_count += 1)) + removed_any=1 + else + ((removal_failed_count++)) + fi fi ((idx++)) done @@ -361,6 +375,16 @@ safe_clean() { stop_section_spinner fi + # Track permission failures reported by safe_remove + local permission_end=${MOLE_PERMISSION_DENIED_COUNT:-0} + if [[ $permission_end -gt $permission_start && $removed_any -eq 0 ]]; then + debug_log "Permission denied while cleaning: $description" + fi + if [[ $removal_failed_count -gt 0 && "$DRY_RUN" != "true" ]]; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped $removal_failed_count items (permission denied or in use)" + note_activity + fi + if [[ $removed_any -eq 1 ]]; then local size_human=$(bytes_to_human "$((total_size_bytes * 1024))") @@ -562,10 +586,28 @@ perform_cleanup() { fi fi + # Hint about Full Disk Access for better results (only if not already granted) + if [[ -t 1 && "$DRY_RUN" != "true" ]]; then + local fda_status=0 + has_full_disk_access + fda_status=$? + if [[ $fda_status -eq 1 ]]; then + echo "" + echo -e "${YELLOW}${ICON_WARNING}${NC} ${GRAY}Tip: Grant Full Disk Access to your terminal in System Settings for best results${NC}" + fi + fi + total_items=0 files_cleaned=0 total_size_cleaned=0 + local had_errexit=0 + [[ $- == *e* ]] && had_errexit=1 + + # Allow cleanup functions to fail without exiting the script + # Individual operations use || true for granular error handling + set +e + # ===== 1. Deep system cleanup (if admin) - Do this first while sudo is fresh ===== if [[ "$SYSTEM_CLEAN" == "true" ]]; then start_section "Deep system" @@ -745,6 +787,11 @@ perform_cleanup() { summary_details+=("Free space now: $(get_free_space)") fi + # Restore strict error handling only if it was enabled + if [[ $had_errexit -eq 1 ]]; then + set -e + fi + print_summary_block "$summary_heading" "${summary_details[@]}" printf '\n' } diff --git a/bin/optimize.sh b/bin/optimize.sh index 260f4ac..31e0026 100755 --- a/bin/optimize.sh +++ b/bin/optimize.sh @@ -340,13 +340,13 @@ main() { fi print_header if ! command -v jq > /dev/null 2>&1; then - echo -e "${RED}${ICON_ERROR}${NC} Missing dependency: jq" + echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: jq" echo -e "${GRAY}Install with: ${GREEN}brew install jq${NC}" exit 1 fi if ! command -v bc > /dev/null 2>&1; then - echo -e "${RED}${ICON_ERROR}${NC} Missing dependency: bc" + echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: bc" echo -e "${GRAY}Install with: ${GREEN}brew install bc${NC}" exit 1 fi diff --git a/install.sh b/install.sh index c289511..977f1f8 100755 --- a/install.sh +++ b/install.sh @@ -50,7 +50,7 @@ ICON_ERROR="☻" log_info() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}$1${NC}"; } log_success() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${GREEN}${ICON_SUCCESS}${NC} $1"; } log_warning() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${YELLOW}$1${NC}"; } -log_error() { echo -e "${RED}${ICON_ERROR}${NC} $1"; } +log_error() { echo -e "${YELLOW}${ICON_ERROR}${NC} $1"; } log_admin() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_ADMIN}${NC} $1"; } log_confirm() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_CONFIRM}${NC} $1"; } diff --git a/lib/clean/app_caches.sh b/lib/clean/app_caches.sh index 07c51ab..06d5e71 100644 --- a/lib/clean/app_caches.sh +++ b/lib/clean/app_caches.sh @@ -1,9 +1,7 @@ #!/bin/bash # User GUI Applications Cleanup Module # Desktop applications, communication tools, media players, games, utilities - set -euo pipefail - # Clean Xcode and iOS development tools clean_xcode_tools() { # Check if Xcode is running for safer cleanup of critical resources @@ -11,7 +9,6 @@ clean_xcode_tools() { if pgrep -x "Xcode" > /dev/null 2>&1; then xcode_running=true fi - # Safe to clean regardless of Xcode state safe_clean ~/Library/Developer/CoreSimulator/Caches/* "Simulator cache" safe_clean ~/Library/Developer/CoreSimulator/Devices/*/data/tmp/* "Simulator temp files" @@ -19,7 +16,6 @@ clean_xcode_tools() { safe_clean ~/Library/Developer/Xcode/iOS\ Device\ Logs/* "iOS device logs" safe_clean ~/Library/Developer/Xcode/watchOS\ Device\ Logs/* "watchOS device logs" safe_clean ~/Library/Developer/Xcode/Products/* "Xcode build products" - # Clean build artifacts only if Xcode is not running if [[ "$xcode_running" == "false" ]]; then safe_clean ~/Library/Developer/Xcode/DerivedData/* "Xcode derived data" @@ -28,7 +24,6 @@ clean_xcode_tools() { echo -e " ${YELLOW}${ICON_WARNING}${NC} Xcode is running, skipping DerivedData and Archives cleanup" fi } - # Clean code editors (VS Code, Sublime, etc.) clean_code_editors() { safe_clean ~/Library/Application\ Support/Code/logs/* "VS Code logs" @@ -37,7 +32,6 @@ clean_code_editors() { safe_clean ~/Library/Application\ Support/Code/CachedData/* "VS Code data cache" safe_clean ~/Library/Caches/com.sublimetext.*/* "Sublime Text cache" } - # Clean communication apps (Slack, Discord, Zoom, etc.) clean_communication_apps() { safe_clean ~/Library/Application\ Support/discord/Cache/* "Discord cache" @@ -53,7 +47,6 @@ clean_communication_apps() { safe_clean ~/Library/Caches/com.tencent.WeWorkMac/* "WeCom cache" safe_clean ~/Library/Caches/com.feishu.*/* "Feishu cache" } - # Clean DingTalk clean_dingtalk() { safe_clean ~/Library/Caches/dd.work.exclusive4aliding/* "DingTalk iDingTalk cache" @@ -61,14 +54,12 @@ clean_dingtalk() { safe_clean ~/Library/Application\ Support/iDingTalk/log/* "DingTalk logs" safe_clean ~/Library/Application\ Support/iDingTalk/holmeslogs/* "DingTalk holmes logs" } - # Clean AI assistants clean_ai_apps() { safe_clean ~/Library/Caches/com.openai.chat/* "ChatGPT cache" safe_clean ~/Library/Caches/com.anthropic.claudefordesktop/* "Claude desktop cache" safe_clean ~/Library/Logs/Claude/* "Claude logs" } - # Clean design and creative tools clean_design_tools() { safe_clean ~/Library/Caches/com.bohemiancoding.sketch3/* "Sketch cache" @@ -78,7 +69,6 @@ clean_design_tools() { safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache" # Note: Raycast cache is protected - contains clipboard history (including images) } - # Clean video editing tools clean_video_tools() { safe_clean ~/Library/Caches/net.telestream.screenflow10/* "ScreenFlow cache" @@ -86,7 +76,6 @@ clean_video_tools() { safe_clean ~/Library/Caches/com.blackmagic-design.DaVinciResolve/* "DaVinci Resolve cache" safe_clean ~/Library/Caches/com.adobe.PremierePro.*/* "Premiere Pro cache" } - # Clean 3D and CAD tools clean_3d_tools() { safe_clean ~/Library/Caches/org.blenderfoundation.blender/* "Blender cache" @@ -94,7 +83,6 @@ clean_3d_tools() { safe_clean ~/Library/Caches/com.autodesk.*/* "Autodesk cache" safe_clean ~/Library/Caches/com.sketchup.*/* "SketchUp cache" } - # Clean productivity apps clean_productivity_apps() { safe_clean ~/Library/Caches/com.tw93.MiaoYan/* "MiaoYan cache" @@ -104,14 +92,12 @@ clean_productivity_apps() { safe_clean ~/Library/Caches/com.filo.client/* "Filo cache" safe_clean ~/Library/Caches/com.flomoapp.mac/* "Flomo cache" } - # Clean music and media players (protects Spotify offline music) clean_media_players() { # Spotify cache protection: check for offline music indicators local spotify_cache="$HOME/Library/Caches/com.spotify.client" local spotify_data="$HOME/Library/Application Support/Spotify" local has_offline_music=false - # Check for offline music database or large cache (>500MB) if [[ -f "$spotify_data/PersistentCache/Storage/offline.bnk" ]] || [[ -d "$spotify_data/PersistentCache/Storage" && -n "$(find "$spotify_data/PersistentCache/Storage" -type f -name "*.file" 2> /dev/null | head -1)" ]]; then @@ -124,7 +110,6 @@ clean_media_players() { has_offline_music=true fi fi - if [[ "$has_offline_music" == "true" ]]; then echo -e " ${YELLOW}${ICON_WARNING}${NC} Spotify cache protected · offline music detected" note_activity @@ -140,7 +125,6 @@ clean_media_players() { safe_clean ~/Library/Caches/com.kugou.mac/* "Kugou Music cache" safe_clean ~/Library/Caches/com.kuwo.mac/* "Kuwo Music cache" } - # Clean video players clean_video_players() { safe_clean ~/Library/Caches/com.colliderli.iina "IINA cache" @@ -152,7 +136,6 @@ clean_video_players() { safe_clean ~/Library/Caches/com.douyu.*/* "Douyu cache" safe_clean ~/Library/Caches/com.huya.*/* "Huya cache" } - # Clean download managers clean_download_managers() { safe_clean ~/Library/Caches/net.xmac.aria2gui "Aria2 cache" @@ -162,7 +145,6 @@ clean_download_managers() { safe_clean ~/Library/Caches/com.folx.*/* "Folx cache" safe_clean ~/Library/Caches/com.charlessoft.pacifist/* "Pacifist cache" } - # Clean gaming platforms clean_gaming_platforms() { safe_clean ~/Library/Caches/com.valvesoftware.steam/* "Steam cache" @@ -174,33 +156,28 @@ clean_gaming_platforms() { safe_clean ~/Library/Caches/com.gog.galaxy/* "GOG Galaxy cache" safe_clean ~/Library/Caches/com.riotgames.*/* "Riot Games cache" } - # Clean translation and dictionary apps clean_translation_apps() { safe_clean ~/Library/Caches/com.youdao.YoudaoDict "Youdao Dictionary cache" safe_clean ~/Library/Caches/com.eudic.* "Eudict cache" safe_clean ~/Library/Caches/com.bob-build.Bob "Bob Translation cache" } - # Clean screenshot and screen recording tools clean_screenshot_tools() { safe_clean ~/Library/Caches/com.cleanshot.* "CleanShot cache" safe_clean ~/Library/Caches/com.reincubate.camo "Camo cache" safe_clean ~/Library/Caches/com.xnipapp.xnip "Xnip cache" } - # Clean email clients clean_email_clients() { safe_clean ~/Library/Caches/com.readdle.smartemail-Mac "Spark cache" safe_clean ~/Library/Caches/com.airmail.* "Airmail cache" } - # Clean task management apps clean_task_apps() { safe_clean ~/Library/Caches/com.todoist.mac.Todoist "Todoist cache" safe_clean ~/Library/Caches/com.any.do.* "Any.do cache" } - # Clean shell and terminal utilities clean_shell_utils() { safe_clean ~/.zcompdump* "Zsh completion cache" @@ -208,13 +185,11 @@ clean_shell_utils() { safe_clean ~/.viminfo.tmp "Vim temporary files" safe_clean ~/.wget-hsts "wget HSTS cache" } - # Clean input method and system utilities clean_system_utils() { safe_clean ~/Library/Caches/com.runjuu.Input-Source-Pro/* "Input Source Pro cache" safe_clean ~/Library/Caches/macos-wakatime.WakaTime/* "WakaTime cache" } - # Clean note-taking apps clean_note_apps() { safe_clean ~/Library/Caches/notion.id/* "Notion cache" @@ -224,13 +199,11 @@ clean_note_apps() { safe_clean ~/Library/Caches/com.evernote.*/* "Evernote cache" safe_clean ~/Library/Caches/com.yinxiang.*/* "Yinxiang Note cache" } - # Clean launcher and automation tools clean_launcher_apps() { safe_clean ~/Library/Caches/com.runningwithcrayons.Alfred/* "Alfred cache" safe_clean ~/Library/Caches/cx.c3.theunarchiver/* "The Unarchiver cache" } - # Clean remote desktop tools clean_remote_desktop() { safe_clean ~/Library/Caches/com.teamviewer.*/* "TeamViewer cache" @@ -238,11 +211,9 @@ clean_remote_desktop() { safe_clean ~/Library/Caches/com.todesk.*/* "ToDesk cache" safe_clean ~/Library/Caches/com.sunlogin.*/* "Sunlogin cache" } - # Main function to clean all user GUI applications clean_user_gui_applications() { stop_section_spinner - clean_xcode_tools clean_code_editors clean_communication_apps diff --git a/lib/clean/apps.sh b/lib/clean/apps.sh index 2bfa317..e722ecd 100644 --- a/lib/clean/apps.sh +++ b/lib/clean/apps.sh @@ -1,26 +1,20 @@ #!/bin/bash # Application Data Cleanup Module - set -euo pipefail - -# Clean .DS_Store (Finder metadata), home uses maxdepth 5, excludes slow paths, max 500 files # Args: $1=target_dir, $2=label +# Clean .DS_Store (Finder metadata), home uses maxdepth 5, excludes slow paths, max 500 files clean_ds_store_tree() { local target="$1" local label="$2" - [[ -d "$target" ]] || return 0 - local file_count=0 local total_bytes=0 local spinner_active="false" - if [[ -t 1 ]]; then MOLE_SPINNER_PREFIX=" " start_inline_spinner "Cleaning Finder metadata..." spinner_active="true" fi - # Build exclusion paths for find (skip common slow/large directories) local -a exclude_paths=( -path "*/Library/Application Support/MobileSync" -prune -o @@ -30,14 +24,12 @@ clean_ds_store_tree() { -path "*/.git" -prune -o -path "*/Library/Caches" -prune -o ) - # Build find command to avoid unbound array expansion with set -u local -a find_cmd=("command" "find" "$target") if [[ "$target" == "$HOME" ]]; then find_cmd+=("-maxdepth" "5") fi find_cmd+=("${exclude_paths[@]}" "-type" "f" "-name" ".DS_Store" "-print0") - # Find .DS_Store files with exclusions and depth limit while IFS= read -r -d '' ds_file; do local size @@ -47,16 +39,13 @@ clean_ds_store_tree() { if [[ "$DRY_RUN" != "true" ]]; then rm -f "$ds_file" 2> /dev/null || true fi - if [[ $file_count -ge $MOLE_MAX_DS_STORE_FILES ]]; then break fi done < <("${find_cmd[@]}" 2> /dev/null || true) - if [[ "$spinner_active" == "true" ]]; then stop_section_spinner fi - if [[ $file_count -gt 0 ]]; then local size_human size_human=$(bytes_to_human "$total_bytes") @@ -65,7 +54,6 @@ clean_ds_store_tree() { else echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label ${GREEN}($file_count files, $size_human)${NC}" fi - local size_kb=$(((total_bytes + 1023) / 1024)) ((files_cleaned += file_count)) ((total_size_cleaned += size_kb)) @@ -73,24 +61,20 @@ clean_ds_store_tree() { note_activity fi } - # Clean data for uninstalled apps (caches/logs/states older than 60 days) # Protects system apps, major vendors, scans /Applications+running processes # Max 100 items/pattern, 2s du timeout. Env: ORPHAN_AGE_THRESHOLD, DRY_RUN -# Scan system for installed application bundle IDs # Usage: scan_installed_apps "output_file" +# Scan system for installed application bundle IDs scan_installed_apps() { local installed_bundles="$1" - # Performance optimization: cache results for 5 minutes local cache_file="$HOME/.cache/mole/installed_apps_cache" local cache_age_seconds=300 # 5 minutes - if [[ -f "$cache_file" ]]; then local cache_mtime=$(get_file_mtime "$cache_file") local current_time=$(date +%s) local age=$((current_time - cache_mtime)) - if [[ $age -lt $cache_age_seconds ]]; then debug_log "Using cached app list (age: ${age}s)" # Verify cache file is readable and not empty @@ -105,19 +89,15 @@ scan_installed_apps() { fi fi fi - debug_log "Scanning installed applications (cache expired or missing)" - # Scan all Applications directories local -a app_dirs=( "/Applications" "/System/Applications" "$HOME/Applications" ) - # Create a temp dir for parallel results to avoid write contention local scan_tmp_dir=$(create_temp_dir) - # Parallel scan for applications local pids=() local dir_idx=0 @@ -129,109 +109,86 @@ scan_installed_apps() { while IFS= read -r app_path; do [[ -n "$app_path" ]] && app_paths+=("$app_path") done < <(find "$app_dir" -name '*.app' -maxdepth 3 -type d 2> /dev/null) - # Read bundle IDs with PlistBuddy local count=0 for app_path in "${app_paths[@]:-}"; do local plist_path="$app_path/Contents/Info.plist" [[ ! -f "$plist_path" ]] && continue - local bundle_id=$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" "$plist_path" 2> /dev/null || echo "") - if [[ -n "$bundle_id" ]]; then echo "$bundle_id" ((count++)) - fi done - ) > "$scan_tmp_dir/apps_${dir_idx}.txt" & pids+=($!) ((dir_idx++)) done - # Get running applications and LaunchAgents in parallel ( local running_apps=$(run_with_timeout 5 osascript -e 'tell application "System Events" to get bundle identifier of every application process' 2> /dev/null || echo "") echo "$running_apps" | tr ',' '\n' | sed -e 's/^ *//;s/ *$//' -e '/^$/d' > "$scan_tmp_dir/running.txt" ) & pids+=($!) - ( run_with_timeout 5 find ~/Library/LaunchAgents /Library/LaunchAgents \ -name "*.plist" -type f 2> /dev/null | xargs -I {} basename {} .plist > "$scan_tmp_dir/agents.txt" 2> /dev/null || true ) & pids+=($!) - # Wait for all background scans to complete debug_log "Waiting for ${#pids[@]} background processes: ${pids[*]}" - for pid in "${pids[@]}"; do wait "$pid" 2> /dev/null || true done - debug_log "All background processes completed" - cat "$scan_tmp_dir"/*.txt >> "$installed_bundles" 2> /dev/null || true safe_remove "$scan_tmp_dir" true - sort -u "$installed_bundles" -o "$installed_bundles" - # Cache the results ensure_user_dir "$(dirname "$cache_file")" cp "$installed_bundles" "$cache_file" 2> /dev/null || true - local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ') debug_log "Scanned $app_count unique applications" } - -# Check if bundle is orphaned # Usage: is_bundle_orphaned "bundle_id" "directory_path" "installed_bundles_file" +# Check if bundle is orphaned is_bundle_orphaned() { local bundle_id="$1" local directory_path="$2" local installed_bundles="$3" - # Skip system-critical and protected apps if should_protect_data "$bundle_id"; then return 1 fi - # Check if app exists in our scan if grep -Fxq "$bundle_id" "$installed_bundles" 2> /dev/null; then return 1 fi - # Check against centralized protected patterns (app_protection.sh) if should_protect_data "$bundle_id"; then return 1 fi - # Extra check for specific system bundles not covered by patterns case "$bundle_id" in loginwindow | dock | systempreferences | systemsettings | settings | controlcenter | finder | safari) return 1 ;; esac - # Check file age - only clean if 60+ days inactive if [[ -e "$directory_path" ]]; then local last_modified_epoch=$(get_file_mtime "$directory_path") local current_epoch=$(date +%s) local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400)) - if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-60} ]]; then return 1 fi fi - return 0 } - # Clean data for uninstalled apps (caches/logs/states older than 60 days) -# Protects system apps, major vendors, scans /Applications+running processes # Max 100 items/pattern, 2s du timeout. Env: ORPHAN_AGE_THRESHOLD, DRY_RUN +# Protects system apps, major vendors, scans /Applications+running processes clean_orphaned_app_data() { # Quick permission check - if we can't access Library folders, skip if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then @@ -239,24 +196,19 @@ clean_orphaned_app_data() { echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Library folders" return 0 fi - # Build list of installed/active apps start_section_spinner "Scanning installed apps..." local installed_bundles=$(create_temp_file) scan_installed_apps "$installed_bundles" stop_section_spinner - # Display scan results local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ') echo -e " ${GREEN}${ICON_SUCCESS}${NC} Found $app_count active/installed apps" - # Track statistics local orphaned_count=0 local total_orphaned_kb=0 - # Unified orphaned resource scanner (caches, logs, states, webkit, HTTP, cookies) start_section_spinner "Scanning orphaned app resources..." - # Define resource types to scan # CRITICAL: NEVER add LaunchAgents or LaunchDaemons (breaks login items/startup apps) local -a resource_types=( @@ -267,49 +219,39 @@ clean_orphaned_app_data() { "$HOME/Library/HTTPStorages|HTTP|com.*:org.*:net.*:io.*" "$HOME/Library/Cookies|Cookies|*.binarycookies" ) - orphaned_count=0 - for resource_type in "${resource_types[@]}"; do IFS='|' read -r base_path label patterns <<< "$resource_type" - # Check both existence and permission to avoid hanging if [[ ! -d "$base_path" ]]; then continue fi - # Quick permission check - if we can't ls the directory, skip it if ! ls "$base_path" > /dev/null 2>&1; then continue fi - # Build file pattern array local -a file_patterns=() IFS=':' read -ra pattern_arr <<< "$patterns" for pat in "${pattern_arr[@]}"; do file_patterns+=("$base_path/$pat") done - # Scan and clean orphaned items for item_path in "${file_patterns[@]}"; do # Use shell glob (no ls needed) # Limit iterations to prevent hanging on directories with too many files local iteration_count=0 - for match in $item_path; do [[ -e "$match" ]] || continue - # Safety: limit iterations to prevent infinite loops on massive directories ((iteration_count++)) if [[ $iteration_count -gt $MOLE_MAX_ORPHAN_ITERATIONS ]]; then break fi - # Extract bundle ID from filename local bundle_id=$(basename "$match") bundle_id="${bundle_id%.savedState}" bundle_id="${bundle_id%.binarycookies}" - if is_bundle_orphaned "$bundle_id" "$match" "$installed_bundles"; then # Use timeout to prevent du from hanging on network mounts or problematic paths local size_kb @@ -324,14 +266,11 @@ clean_orphaned_app_data() { done done done - stop_section_spinner - if [[ $orphaned_count -gt 0 ]]; then local orphaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}') echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count items (~${orphaned_mb}MB)" note_activity fi - rm -f "$installed_bundles" } diff --git a/lib/clean/brew.sh b/lib/clean/brew.sh index 6df6cb2..33c8b43 100644 --- a/lib/clean/brew.sh +++ b/lib/clean/brew.sh @@ -1,23 +1,19 @@ #!/bin/bash - # Clean Homebrew caches and remove orphaned dependencies -# Skips if run within 7 days, runs cleanup/autoremove in parallel with 120s timeout # Env: MO_BREW_TIMEOUT, DRY_RUN +# Skips if run within 7 days, runs cleanup/autoremove in parallel with 120s timeout clean_homebrew() { command -v brew > /dev/null 2>&1 || return 0 - # Dry run mode - just indicate what would happen if [[ "${DRY_RUN:-false}" == "true" ]]; then echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Homebrew · would cleanup and autoremove" return 0 fi - # Smart caching: check if brew cleanup was run recently (within 7 days) # Extended from 2 days to 7 days to reduce cleanup frequency local brew_cache_file="${HOME}/.cache/mole/brew_last_cleanup" local cache_valid_days=7 local should_skip=false - if [[ -f "$brew_cache_file" ]]; then local last_cleanup last_cleanup=$(cat "$brew_cache_file" 2> /dev/null || echo "0") @@ -25,15 +21,12 @@ clean_homebrew() { current_time=$(date +%s) local time_diff=$((current_time - last_cleanup)) local days_diff=$((time_diff / 86400)) - if [[ $days_diff -lt $cache_valid_days ]]; then should_skip=true echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew · cleaned ${days_diff}d ago, skipped" fi fi - [[ "$should_skip" == "true" ]] && return 0 - # Quick pre-check: determine if cleanup is needed based on cache size (<50MB) # Use timeout to prevent slow du on very large caches # If timeout occurs, assume cache is large and run cleanup @@ -42,13 +35,11 @@ clean_homebrew() { if [[ -d ~/Library/Caches/Homebrew ]]; then brew_cache_size=$(run_with_timeout 3 du -sk ~/Library/Caches/Homebrew 2> /dev/null | awk '{print $1}') local du_exit=$? - # Skip cleanup (but still run autoremove) if cache is small if [[ $du_exit -eq 0 && -n "$brew_cache_size" && "$brew_cache_size" -lt 51200 ]]; then skip_cleanup=true fi fi - # Display appropriate spinner message if [[ -t 1 ]]; then if [[ "$skip_cleanup" == "true" ]]; then @@ -57,30 +48,23 @@ clean_homebrew() { MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew cleanup and autoremove..." fi fi - local timeout_seconds=${MO_BREW_TIMEOUT:-120} - # Run brew cleanup and/or autoremove based on cache size local brew_tmp_file autoremove_tmp_file local brew_pid autoremove_pid - if [[ "$skip_cleanup" == "false" ]]; then brew_tmp_file=$(create_temp_file) (brew cleanup > "$brew_tmp_file" 2>&1) & brew_pid=$! fi - autoremove_tmp_file=$(create_temp_file) (brew autoremove > "$autoremove_tmp_file" 2>&1) & autoremove_pid=$! - local elapsed=0 local brew_done=false local autoremove_done=false - # Mark cleanup as done if it was skipped [[ "$skip_cleanup" == "true" ]] && brew_done=true - # Wait for both to complete or timeout while [[ "$brew_done" == "false" ]] || [[ "$autoremove_done" == "false" ]]; do if [[ $elapsed -ge $timeout_seconds ]]; then @@ -88,14 +72,11 @@ clean_homebrew() { kill -TERM $autoremove_pid 2> /dev/null || true break fi - [[ -n "$brew_pid" ]] && { kill -0 $brew_pid 2> /dev/null || brew_done=true; } kill -0 $autoremove_pid 2> /dev/null || autoremove_done=true - sleep 1 ((elapsed++)) done - # Wait for processes to finish local brew_success=false if [[ "$skip_cleanup" == "false" && -n "$brew_pid" ]]; then @@ -103,14 +84,11 @@ clean_homebrew() { brew_success=true fi fi - local autoremove_success=false if wait $autoremove_pid 2> /dev/null; then autoremove_success=true fi - if [[ -t 1 ]]; then stop_inline_spinner; fi - # Process cleanup output and extract metrics if [[ "$skip_cleanup" == "true" ]]; then # Cleanup was skipped due to small cache size @@ -122,7 +100,6 @@ clean_homebrew() { local removed_count freed_space removed_count=$(printf '%s\n' "$brew_output" | grep -c "Removing:" 2> /dev/null || true) freed_space=$(printf '%s\n' "$brew_output" | grep -o "[0-9.]*[KMGT]B freed" 2> /dev/null | tail -1 || true) - if [[ $removed_count -gt 0 ]] || [[ -n "$freed_space" ]]; then if [[ -n "$freed_space" ]]; then echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup ${GREEN}($freed_space)${NC}" @@ -133,21 +110,18 @@ clean_homebrew() { elif [[ $elapsed -ge $timeout_seconds ]]; then echo -e " ${YELLOW}${ICON_WARNING}${NC} Homebrew cleanup timed out · run ${GRAY}brew cleanup${NC} manually" fi - # Process autoremove output - only show if packages were removed if [[ "$autoremove_success" == "true" && -f "$autoremove_tmp_file" ]]; then local autoremove_output autoremove_output=$(cat "$autoremove_tmp_file" 2> /dev/null || echo "") local removed_packages removed_packages=$(printf '%s\n' "$autoremove_output" | grep -c "^Uninstalling" 2> /dev/null || true) - if [[ $removed_packages -gt 0 ]]; then echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed orphaned dependencies (${removed_packages} packages)" fi elif [[ $elapsed -ge $timeout_seconds ]]; then echo -e " ${YELLOW}${ICON_WARNING}${NC} Autoremove timed out · run ${GRAY}brew autoremove${NC} manually" fi - # Update cache timestamp on successful completion or when cleanup was intelligently skipped # This prevents repeated cache size checks within the 7-day window if [[ "$skip_cleanup" == "true" ]] || [[ "$brew_success" == "true" ]] || [[ "$autoremove_success" == "true" ]]; then diff --git a/lib/clean/caches.sh b/lib/clean/caches.sh index 2297d55..9c211f7 100644 --- a/lib/clean/caches.sh +++ b/lib/clean/caches.sh @@ -1,19 +1,14 @@ #!/bin/bash # Cache Cleanup Module - set -euo pipefail - -# Trigger all TCC permission dialogs upfront to avoid random interruptions # Only runs once (uses ~/.cache/mole/permissions_granted flag) +# Trigger all TCC permission dialogs upfront to avoid random interruptions check_tcc_permissions() { # Only check in interactive mode [[ -t 1 ]] || return 0 - local permission_flag="$HOME/.cache/mole/permissions_granted" - # Skip if permissions were already granted [[ -f "$permission_flag" ]] && return 0 - # Key protected directories that require TCC approval local -a tcc_dirs=( "$HOME/Library/Caches" @@ -22,14 +17,12 @@ check_tcc_permissions() { "$HOME/Library/Containers" "$HOME/.cache" ) - # Quick permission test - if first directory is accessible, likely others are too # Use simple ls test instead of find to avoid triggering permission dialogs prematurely local needs_permission_check=false if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then needs_permission_check=true fi - if [[ "$needs_permission_check" == "true" ]]; then echo "" echo -e "${BLUE}First-time setup${NC}" @@ -38,44 +31,35 @@ check_tcc_permissions() { echo "" echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to continue: " read -r - MOLE_SPINNER_PREFIX="" start_inline_spinner "Requesting permissions..." - # Trigger all TCC prompts upfront by accessing each directory # Using find -maxdepth 1 ensures we touch the directory without deep scanning for dir in "${tcc_dirs[@]}"; do [[ -d "$dir" ]] && command find "$dir" -maxdepth 1 -type d > /dev/null 2>&1 done - stop_inline_spinner echo "" fi - # Mark permissions as granted (won't prompt again) ensure_user_file "$permission_flag" + return 0 } - -# Clean browser Service Worker cache, protecting web editing tools (capcut, photopea, pixlr) # Args: $1=browser_name, $2=cache_path +# Clean browser Service Worker cache, protecting web editing tools (capcut, photopea, pixlr) clean_service_worker_cache() { local browser_name="$1" local cache_path="$2" - [[ ! -d "$cache_path" ]] && return 0 - local cleaned_size=0 local protected_count=0 - # Find all cache directories and calculate sizes with timeout protection while IFS= read -r cache_dir; do [[ ! -d "$cache_dir" ]] && continue - # Extract domain from path using regex # Pattern matches: letters/numbers, hyphens, then dot, then TLD # Example: "abc123_https_example.com_0" → "example.com" local domain=$(basename "$cache_dir" | grep -oE '[a-zA-Z0-9][-a-zA-Z0-9]*\.[a-zA-Z]{2,}' | head -1 || echo "") local size=$(run_with_timeout 5 get_path_size_kb "$cache_dir") - # Check if domain is protected local is_protected=false for protected_domain in "${PROTECTED_SW_DOMAINS[@]}"; do @@ -85,7 +69,6 @@ clean_service_worker_cache() { break fi done - # Clean if not protected if [[ "$is_protected" == "false" ]]; then if [[ "$DRY_RUN" != "true" ]]; then @@ -94,7 +77,6 @@ clean_service_worker_cache() { cleaned_size=$((cleaned_size + size)) fi done < <(run_with_timeout 10 sh -c "find '$cache_path' -type d -depth 2 2> /dev/null || true") - if [[ $cleaned_size -gt 0 ]]; then # Temporarily stop spinner for clean output local spinner_was_running=false @@ -102,7 +84,6 @@ clean_service_worker_cache() { stop_inline_spinner spinner_was_running=true fi - local cleaned_mb=$((cleaned_size / 1024)) if [[ "$DRY_RUN" != "true" ]]; then if [[ $protected_count -gt 0 ]]; then @@ -114,19 +95,16 @@ clean_service_worker_cache() { echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $browser_name Service Worker (would clean ${cleaned_mb}MB, ${protected_count} protected)" fi note_activity - # Restart spinner if it was running if [[ "$spinner_was_running" == "true" ]]; then MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning browser Service Worker caches..." fi fi } - -# Clean Next.js (.next/cache) and Python (__pycache__) build caches # Uses maxdepth 3, excludes Library/.Trash/node_modules, 10s timeout per scan +# Clean Next.js (.next/cache) and Python (__pycache__) build caches clean_project_caches() { stop_inline_spinner 2> /dev/null || true - # Quick check: skip if user likely doesn't have development projects local has_dev_projects=false local -a common_dev_dirs=( @@ -149,14 +127,12 @@ clean_project_caches() { "$HOME/dotnet" "$HOME/node" ) - for dir in "${common_dev_dirs[@]}"; do if [[ -d "$dir" ]]; then has_dev_projects=true break fi done - # If no common dev directories found, perform feature-based detection # Check for project markers in $HOME (node_modules, .git, target, etc.) if [[ "$has_dev_projects" == "false" ]]; then @@ -170,14 +146,12 @@ clean_project_caches() { "pom.xml" "build.gradle" ) - local spinner_active=false if [[ -t 1 ]]; then MOLE_SPINNER_PREFIX=" " start_inline_spinner "Detecting dev projects..." spinner_active=true fi - for marker in "${project_markers[@]}"; do # Quick check with maxdepth 2 and 3s timeout to avoid slow scans if run_with_timeout 3 sh -c "find '$HOME' -maxdepth 2 -name '$marker' -not -path '*/Library/*' -not -path '*/.Trash/*' 2>/dev/null | head -1" | grep -q .; then @@ -185,26 +159,21 @@ clean_project_caches() { break fi done - if [[ "$spinner_active" == "true" ]]; then stop_inline_spinner 2> /dev/null || true fi - # If still no dev projects found, skip scanning [[ "$has_dev_projects" == "false" ]] && return 0 fi - if [[ -t 1 ]]; then MOLE_SPINNER_PREFIX=" " start_inline_spinner "Searching project caches..." fi - local nextjs_tmp_file nextjs_tmp_file=$(create_temp_file) local pycache_tmp_file pycache_tmp_file=$(create_temp_file) local find_timeout=10 - # 1. Start Next.js search ( command find "$HOME" -P -mount -type d -name ".next" -maxdepth 3 \ @@ -215,7 +184,6 @@ clean_project_caches() { 2> /dev/null || true ) > "$nextjs_tmp_file" 2>&1 & local next_pid=$! - # 2. Start Python search ( command find "$HOME" -P -mount -type d -name "__pycache__" -maxdepth 3 \ @@ -226,7 +194,6 @@ clean_project_caches() { 2> /dev/null || true ) > "$pycache_tmp_file" 2>&1 & local py_pid=$! - # 3. Wait for both with timeout (using smaller intervals for better responsiveness) local elapsed=0 local check_interval=0.2 # Check every 200ms instead of 1s for smoother experience @@ -237,13 +204,11 @@ clean_project_caches() { sleep $check_interval elapsed=$(echo "$elapsed + $check_interval" | awk '{print $1 + $2}') done - # 4. Clean up any stuck processes for pid in $next_pid $py_pid; do if kill -0 "$pid" 2> /dev/null; then # Send TERM signal first kill -TERM "$pid" 2> /dev/null || true - # Wait up to 2 seconds for graceful termination local grace_period=0 while [[ $grace_period -lt 20 ]]; do @@ -253,28 +218,23 @@ clean_project_caches() { sleep 0.1 ((grace_period++)) done - # Force kill if still running if kill -0 "$pid" 2> /dev/null; then kill -KILL "$pid" 2> /dev/null || true fi - # Final wait (should be instant now) wait "$pid" 2> /dev/null || true else wait "$pid" 2> /dev/null || true fi done - if [[ -t 1 ]]; then stop_inline_spinner fi - # 5. Process Next.js results while IFS= read -r next_dir; do [[ -d "$next_dir/cache" ]] && safe_clean "$next_dir/cache"/* "Next.js build cache" || true done < "$nextjs_tmp_file" - # 6. Process Python results while IFS= read -r pycache; do [[ -d "$pycache" ]] && safe_clean "$pycache"/* "Python bytecode cache" || true diff --git a/lib/clean/dev.sh b/lib/clean/dev.sh index 32414f8..65e3797 100644 --- a/lib/clean/dev.sh +++ b/lib/clean/dev.sh @@ -1,13 +1,11 @@ #!/bin/bash # Developer Tools Cleanup Module - set -euo pipefail - # Helper function to clean tool caches using their built-in commands # Args: $1 - description, $@ - command to execute # Env: DRY_RUN -# Note: Try to estimate potential savings (many tool caches don't have a direct path, # so we just report the action if we can't easily find a path) +# Note: Try to estimate potential savings (many tool caches don't have a direct path, clean_tool_cache() { local description="$1" shift @@ -18,30 +16,27 @@ clean_tool_cache() { else echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $description · would clean" fi + return 0 } - # Clean npm cache (command + directories) -# npm cache clean clears official npm cache, safe_clean handles alternative package managers # Env: DRY_RUN +# npm cache clean clears official npm cache, safe_clean handles alternative package managers clean_dev_npm() { if command -v npm > /dev/null 2>&1; then # clean_tool_cache now calculates size before cleanup for better statistics clean_tool_cache "npm cache" npm cache clean --force note_activity fi - # Clean pnpm store cache local pnpm_default_store=~/Library/pnpm/store if command -v pnpm > /dev/null 2>&1; then # Use pnpm's built-in prune command clean_tool_cache "pnpm cache" pnpm store prune - # Get the actual store path to check if default is orphaned local pnpm_store_path start_section_spinner "Checking store path..." pnpm_store_path=$(run_with_timeout 2 pnpm store path 2> /dev/null) || pnpm_store_path="" stop_section_spinner - # If store path is different from default, clean the orphaned default if [[ -n "$pnpm_store_path" && "$pnpm_store_path" != "$pnpm_default_store" ]]; then safe_clean "$pnpm_default_store"/* "Orphaned pnpm store" @@ -51,24 +46,21 @@ clean_dev_npm() { safe_clean "$pnpm_default_store"/* "pnpm store" fi note_activity - # Clean alternative package manager caches safe_clean ~/.tnpm/_cacache/* "tnpm cache directory" safe_clean ~/.tnpm/_logs/* "tnpm logs" safe_clean ~/.yarn/cache/* "Yarn cache" safe_clean ~/.bun/install/cache/* "Bun cache" } - # Clean Python/pip cache (command + directories) -# pip cache purge clears official pip cache, safe_clean handles other Python tools # Env: DRY_RUN +# pip cache purge clears official pip cache, safe_clean handles other Python tools clean_dev_python() { if command -v pip3 > /dev/null 2>&1; then # clean_tool_cache now calculates size before cleanup for better statistics clean_tool_cache "pip cache" bash -c 'pip3 cache purge >/dev/null 2>&1 || true' note_activity fi - # Clean Python ecosystem caches safe_clean ~/.pyenv/cache/* "pyenv cache" safe_clean ~/.cache/poetry/* "Poetry cache" @@ -84,10 +76,9 @@ clean_dev_python() { safe_clean ~/anaconda3/pkgs/* "Anaconda packages cache" safe_clean ~/.cache/wandb/* "Weights & Biases cache" } - # Clean Go cache (command + directories) -# go clean handles build and module caches comprehensively # Env: DRY_RUN +# go clean handles build and module caches comprehensively clean_dev_go() { if command -v go > /dev/null 2>&1; then # clean_tool_cache now calculates size before cleanup for better statistics @@ -95,16 +86,14 @@ clean_dev_go() { note_activity fi } - # Clean Rust/cargo cache directories clean_dev_rust() { safe_clean ~/.cargo/registry/cache/* "Rust cargo cache" safe_clean ~/.cargo/git/* "Cargo git cache" safe_clean ~/.rustup/downloads/* "Rust downloads cache" } - -# Clean Docker cache (command + directories) # Env: DRY_RUN +# Clean Docker cache (command + directories) clean_dev_docker() { if command -v docker > /dev/null 2>&1; then if [[ "$DRY_RUN" != "true" ]]; then @@ -115,7 +104,6 @@ clean_dev_docker() { docker_running=true fi stop_section_spinner - if [[ "$docker_running" == "true" ]]; then clean_tool_cache "Docker build cache" docker builder prune -af else @@ -131,7 +119,6 @@ clean_dev_docker() { retry_success=true fi stop_section_spinner - if [[ "$retry_success" == "true" ]]; then clean_tool_cache "Docker build cache" docker builder prune -af else @@ -150,12 +137,10 @@ clean_dev_docker() { echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker build cache · would clean" fi fi - safe_clean ~/.docker/buildx/cache/* "Docker BuildX cache" } - -# Clean Nix package manager # Env: DRY_RUN +# Clean Nix package manager clean_dev_nix() { if command -v nix-collect-garbage > /dev/null 2>&1; then if [[ "$DRY_RUN" != "true" ]]; then @@ -166,7 +151,6 @@ clean_dev_nix() { note_activity fi } - # Clean cloud CLI tools cache clean_dev_cloud() { safe_clean ~/.kube/cache/* "Kubernetes cache" @@ -175,7 +159,6 @@ clean_dev_cloud() { safe_clean ~/.config/gcloud/logs/* "Google Cloud logs" safe_clean ~/.azure/logs/* "Azure CLI logs" } - # Clean frontend build tool caches clean_dev_frontend() { safe_clean ~/.cache/typescript/* "TypeScript cache" @@ -190,23 +173,20 @@ clean_dev_frontend() { safe_clean ~/.cache/eslint/* "ESLint cache" safe_clean ~/.cache/prettier/* "Prettier cache" } - # Clean mobile development tools # iOS simulator cleanup can free significant space (70GB+ in some cases) -# DeviceSupport files accumulate for each iOS version connected # Simulator runtime caches can grow large over time +# DeviceSupport files accumulate for each iOS version connected clean_dev_mobile() { # Clean Xcode unavailable simulators # Removes old and unused local iOS simulator data from old unused runtimes # Can free up significant space (70GB+ in some cases) if command -v xcrun > /dev/null 2>&1; then debug_log "Checking for unavailable Xcode simulators" - if [[ "$DRY_RUN" == "true" ]]; then clean_tool_cache "Xcode unavailable simulators" xcrun simctl delete unavailable else start_section_spinner "Checking unavailable simulators..." - # Run command manually to control UI output order if xcrun simctl delete unavailable > /dev/null 2>&1; then stop_section_spinner @@ -218,7 +198,6 @@ clean_dev_mobile() { fi note_activity fi - # Clean iOS DeviceSupport - more comprehensive cleanup # DeviceSupport directories store debug symbols for each iOS version # Safe to clean caches and logs, but preserve device support files themselves @@ -226,11 +205,9 @@ clean_dev_mobile() { safe_clean ~/Library/Developer/Xcode/iOS\ DeviceSupport/*.log "iOS device support logs" safe_clean ~/Library/Developer/Xcode/watchOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "watchOS device symbol cache" safe_clean ~/Library/Developer/Xcode/tvOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "tvOS device symbol cache" - # Clean simulator runtime caches # RuntimeRoot caches can accumulate system library caches safe_clean ~/Library/Developer/CoreSimulator/Profiles/Runtimes/*/Contents/Resources/RuntimeRoot/System/Library/Caches/* "Simulator runtime cache" - safe_clean ~/Library/Caches/Google/AndroidStudio*/* "Android Studio cache" safe_clean ~/Library/Caches/CocoaPods/* "CocoaPods cache" safe_clean ~/.cache/flutter/* "Flutter cache" @@ -239,7 +216,6 @@ clean_dev_mobile() { safe_clean ~/Library/Developer/Xcode/UserData/IB\ Support/* "Xcode Interface Builder cache" safe_clean ~/.cache/swift-package-manager/* "Swift package manager cache" } - # Clean JVM ecosystem tools clean_dev_jvm() { safe_clean ~/.gradle/caches/* "Gradle caches" @@ -247,7 +223,6 @@ clean_dev_jvm() { safe_clean ~/.sbt/* "SBT cache" safe_clean ~/.ivy2/cache/* "Ivy cache" } - # Clean other language tools clean_dev_other_langs() { safe_clean ~/.bundle/cache/* "Ruby Bundler cache" @@ -258,7 +233,6 @@ clean_dev_other_langs() { safe_clean ~/.cache/zig/* "Zig cache" safe_clean ~/Library/Caches/deno/* "Deno cache" } - # Clean CI/CD and DevOps tools clean_dev_cicd() { safe_clean ~/.cache/terraform/* "Terraform cache" @@ -270,7 +244,6 @@ clean_dev_cicd() { safe_clean ~/.circleci/cache/* "CircleCI cache" safe_clean ~/.sonar/* "SonarQube cache" } - # Clean database tools clean_dev_database() { safe_clean ~/Library/Caches/com.sequel-ace.sequel-ace/* "Sequel Ace cache" @@ -280,7 +253,6 @@ clean_dev_database() { safe_clean ~/Library/Caches/com.dbeaver.* "DBeaver cache" safe_clean ~/Library/Caches/com.redis.RedisInsight "Redis Insight cache" } - # Clean API/network debugging tools clean_dev_api_tools() { safe_clean ~/Library/Caches/com.postmanlabs.mac/* "Postman cache" @@ -290,7 +262,6 @@ clean_dev_api_tools() { safe_clean ~/Library/Caches/com.charlesproxy.charles/* "Charles Proxy cache" safe_clean ~/Library/Caches/com.proxyman.NSProxy/* "Proxyman cache" } - # Clean misc dev tools clean_dev_misc() { safe_clean ~/Library/Caches/com.unity3d.*/* "Unity cache" @@ -301,7 +272,6 @@ clean_dev_misc() { safe_clean ~/Library/Caches/KSCrash/* "KSCrash reports" safe_clean ~/Library/Caches/com.crashlytics.data/* "Crashlytics data" } - # Clean shell and version control clean_dev_shell() { safe_clean ~/.gitconfig.lock "Git config lock" @@ -312,7 +282,6 @@ clean_dev_shell() { safe_clean ~/.zsh_history.bak* "Zsh history backup" safe_clean ~/.cache/pre-commit/* "pre-commit cache" } - # Clean network utilities clean_dev_network() { safe_clean ~/.cache/curl/* "curl cache" @@ -320,26 +289,22 @@ clean_dev_network() { safe_clean ~/Library/Caches/curl/* "macOS curl cache" safe_clean ~/Library/Caches/wget/* "macOS wget cache" } - # Clean orphaned SQLite temporary files (-shm and -wal files) # Strategy: Only clean truly orphaned temp files where base database is missing -# This is fast and safe - skip complex checks for files with existing base DB # Env: DRY_RUN +# This is fast and safe - skip complex checks for files with existing base DB clean_sqlite_temp_files() { # Skip this cleanup due to low ROI (收益比低,经常没东西可清理) # Find scan is still slow even optimized, and orphaned files are rare return 0 } - # Main developer tools cleanup function -# Calls all specialized cleanup functions # Env: DRY_RUN +# Calls all specialized cleanup functions clean_developer_tools() { stop_section_spinner - # Clean SQLite temporary files first clean_sqlite_temp_files - clean_dev_npm clean_dev_python clean_dev_go @@ -349,10 +314,8 @@ clean_developer_tools() { clean_dev_nix clean_dev_shell clean_dev_frontend - # Project build caches (delegated to clean_caches module) clean_project_caches - clean_dev_mobile clean_dev_jvm clean_dev_other_langs @@ -361,16 +324,13 @@ clean_developer_tools() { clean_dev_api_tools clean_dev_network clean_dev_misc - # Homebrew caches and cleanup (delegated to clean_brew module) safe_clean ~/Library/Caches/Homebrew/* "Homebrew cache" - # Clean Homebrew locks intelligently (avoid repeated sudo prompts) local brew_lock_dirs=( "/opt/homebrew/var/homebrew/locks" "/usr/local/var/homebrew/locks" ) - for lock_dir in "${brew_lock_dirs[@]}"; do if [[ -d "$lock_dir" && -w "$lock_dir" ]]; then # User can write, safe to clean @@ -384,6 +344,5 @@ clean_developer_tools() { fi fi done - clean_homebrew } diff --git a/lib/clean/project.sh b/lib/clean/project.sh index 5763c2c..115e76d 100644 --- a/lib/clean/project.sh +++ b/lib/clean/project.sh @@ -1,9 +1,7 @@ #!/bin/bash # Project Purge Module (mo purge) # Removes heavy project build artifacts and dependencies - set -euo pipefail - # Targets to look for (heavy build artifacts) readonly PURGE_TARGETS=( "node_modules" @@ -23,14 +21,11 @@ readonly PURGE_TARGETS=( ".parcel-cache" # Parcel bundler ".dart_tool" # Flutter/Dart build cache ) - # Minimum age in days before considering for cleanup readonly MIN_AGE_DAYS=7 - # Scan depth defaults (relative to search root) readonly PURGE_MIN_DEPTH_DEFAULT=2 readonly PURGE_MAX_DEPTH_DEFAULT=8 - # Search paths (only project directories) readonly PURGE_SEARCH_PATHS=( "$HOME/www" @@ -42,43 +37,36 @@ readonly PURGE_SEARCH_PATHS=( "$HOME/Repos" "$HOME/Development" ) - -# Check if path is safe to clean (must be inside a project directory) # Args: $1 - path to check +# Check if path is safe to clean (must be inside a project directory) is_safe_project_artifact() { local path="$1" local search_path="$2" - # Path must be absolute if [[ "$path" != /* ]]; then return 1 fi - # Must not be a direct child of HOME directory # e.g., ~/.gradle is NOT safe, but ~/Projects/foo/.gradle IS safe local relative_path="${path#"$search_path"/}" local depth=$(echo "$relative_path" | tr -cd '/' | wc -c) - # Require at least 1 level deep (inside a project folder) # e.g., ~/www/weekly/node_modules is OK (depth >= 1) # but ~/www/node_modules is NOT OK (depth < 1) if [[ $depth -lt 1 ]]; then return 1 fi - return 0 } - # Fast scan using fd or optimized find # Args: $1 - search path, $2 - output file -# Scan for purge targets using strict project boundary checks # Args: $1 - search path, $2 - output file +# Scan for purge targets using strict project boundary checks scan_purge_targets() { local search_path="$1" local output_file="$2" local min_depth="${MOLE_PURGE_MIN_DEPTH:-$PURGE_MIN_DEPTH_DEFAULT}" local max_depth="${MOLE_PURGE_MAX_DEPTH:-$PURGE_MAX_DEPTH_DEFAULT}" - if [[ ! "$min_depth" =~ ^[0-9]+$ ]]; then min_depth="$PURGE_MIN_DEPTH_DEFAULT" fi @@ -88,11 +76,9 @@ scan_purge_targets() { if [[ "$max_depth" -lt "$min_depth" ]]; then max_depth="$min_depth" fi - if [[ ! -d "$search_path" ]]; then return fi - # Use fd for fast parallel search if available if command -v fd > /dev/null 2>&1; then local fd_args=( @@ -108,11 +94,9 @@ scan_purge_targets() { "--exclude" ".Trash" "--exclude" "Applications" ) - for target in "${PURGE_TARGETS[@]}"; do fd_args+=("-g" "$target") done - # Run fd command fd "${fd_args[@]}" . "$search_path" 2> /dev/null | while IFS= read -r item; do if is_safe_project_artifact "$item" "$search_path"; then @@ -123,68 +107,55 @@ scan_purge_targets() { # Fallback to optimized find with pruning # This prevents descending into heavily nested dirs like node_modules once found, # providing a massive speedup (O(project_dirs) vs O(files)). - local prune_args=() - # 1. Directories to prune (ignore completely) local prune_dirs=(".git" "Library" ".Trash" "Applications") for dir in "${prune_dirs[@]}"; do # -name "DIR" -prune -o prune_args+=("-name" "$dir" "-prune" "-o") done - # 2. Targets to find (print AND prune) # If we find node_modules, we print it and STOP looking inside it for target in "${PURGE_TARGETS[@]}"; do # -name "TARGET" -print -prune -o prune_args+=("-name" "$target" "-print" "-prune" "-o") done - # Run find command # Logic: ( prune_pattern -prune -o target_pattern -print -prune ) # Note: We rely on implicit recursion for directories that don't match any pattern. # -print is only called explicitly on targets. - # Removing the trailing -o from loop construction if necessary? # Actually my loop adds -o at the end. I need to handle that. # Let's verify the array construction. - # Re-building args cleanly: local find_expr=() - # Excludes for dir in "${prune_dirs[@]}"; do find_expr+=("-name" "$dir" "-prune" "-o") done - # Targets local i=0 for target in "${PURGE_TARGETS[@]}"; do find_expr+=("-name" "$target" "-print" "-prune") - # Add -o unless it's the very last item of targets if [[ $i -lt $((${#PURGE_TARGETS[@]} - 1)) ]]; then find_expr+=("-o") fi ((i++)) done - command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \ \( "${find_expr[@]}" \) 2> /dev/null | while IFS= read -r item; do - if is_safe_project_artifact "$item" "$search_path"; then echo "$item" fi done | filter_nested_artifacts > "$output_file" fi } - # Filter out nested artifacts (e.g. node_modules inside node_modules) filter_nested_artifacts() { while IFS= read -r item; do local parent_dir=$(dirname "$item") local is_nested=false - for target in "${PURGE_TARGETS[@]}"; do # Check if parent directory IS a target or IS INSIDE a target # e.g. .../node_modules/foo/node_modules -> parent has node_modules @@ -194,39 +165,33 @@ filter_nested_artifacts() { break fi done - if [[ "$is_nested" == "false" ]]; then echo "$item" fi done } - -# Check if a path was modified recently (safety check) # Args: $1 - path +# Check if a path was modified recently (safety check) is_recently_modified() { local path="$1" local age_days=$MIN_AGE_DAYS - if [[ ! -e "$path" ]]; then return 1 fi - # Get modification time using base.sh helper (handles GNU vs BSD stat) local mod_time mod_time=$(get_file_mtime "$path") local current_time=$(date +%s) local age_seconds=$((current_time - mod_time)) local age_in_days=$((age_seconds / 86400)) - if [[ $age_in_days -lt $age_days ]]; then return 0 # Recently modified else return 1 # Old enough to clean fi } - -# Get human-readable size of directory # Args: $1 - path +# Get human-readable size of directory get_dir_size_kb() { local path="$1" if [[ -d "$path" ]]; then @@ -235,20 +200,17 @@ get_dir_size_kb() { echo "0" fi } - # Simple category selector (for purge only) # Args: category names and metadata as arrays (passed via global vars) -# Returns: selected indices in PURGE_SELECTION_RESULT (comma-separated) # Uses PURGE_RECENT_CATEGORIES to mark categories with recent items (default unselected) +# Returns: selected indices in PURGE_SELECTION_RESULT (comma-separated) select_purge_categories() { local -a categories=("$@") local total_items=${#categories[@]} local clear_line=$'\r\033[2K' - if [[ $total_items -eq 0 ]]; then return 1 fi - # Initialize selection (all selected by default, except recent ones) local -a selected=() IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}" @@ -260,13 +222,11 @@ select_purge_categories() { selected[i]=true fi done - local cursor_pos=0 local original_stty="" if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then original_stty=$(stty -g 2> /dev/null || echo "") fi - # Terminal control functions restore_terminal() { trap - EXIT INT TERM @@ -275,13 +235,11 @@ select_purge_categories() { stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || true fi } - # shellcheck disable=SC2329 handle_interrupt() { restore_terminal exit 130 } - draw_menu() { printf "\033[H" # Calculate total size of selected items for header @@ -296,48 +254,37 @@ select_purge_categories() { done local selected_gb selected_gb=$(echo "scale=1; $selected_size/1024/1024" | bc) - printf "%s\n" "$clear_line" printf "%s${PURPLE_BOLD}Select Categories to Clean${NC} ${GRAY}- ${selected_gb}GB ($selected_count selected)${NC}\n" "$clear_line" printf "%s\n" "$clear_line" - IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}" for ((i = 0; i < total_items; i++)); do local checkbox="$ICON_EMPTY" [[ ${selected[i]} == true ]] && checkbox="$ICON_SOLID" - local recent_marker="" [[ ${recent_flags[i]:-false} == "true" ]] && recent_marker=" ${GRAY}| Recent${NC}" - if [[ $i -eq $cursor_pos ]]; then printf "%s${CYAN}${ICON_ARROW} %s %s%s${NC}\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker" else printf "%s %s %s%s\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker" fi done - printf "%s\n" "$clear_line" printf "%s${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space Select | Enter Confirm | A All | I Invert | Q Quit${NC}\n" "$clear_line" } - trap restore_terminal EXIT trap handle_interrupt INT TERM - # Preserve interrupt character for Ctrl-C stty -echo -icanon intr ^C 2> /dev/null || true hide_cursor - if [[ -t 1 ]]; then clear_screen fi - # Main loop while true; do draw_menu - # Read key IFS= read -r -s -n1 key || key="" - case "$key" in $'\x1b') # Arrow keys or ESC @@ -393,20 +340,17 @@ select_purge_categories() { PURGE_SELECTION_RESULT+="$i" fi done - restore_terminal return 0 ;; esac done } - # Main cleanup function - scans and prompts user to select artifacts to clean clean_project_artifacts() { local -a all_found_items=() local -a safe_to_clean=() local -a recently_modified=() - # Set up cleanup on interrupt # Note: Declared without 'local' so cleanup_scan trap can access them scan_pids=() @@ -428,35 +372,29 @@ clean_project_artifacts() { exit 130 } trap cleanup_scan INT TERM - # Start parallel scanning of all paths at once if [[ -t 1 ]]; then start_inline_spinner "Scanning projects..." fi - # Launch all scans in parallel for path in "${PURGE_SEARCH_PATHS[@]}"; do if [[ -d "$path" ]]; then local scan_output scan_output=$(mktemp) scan_temps+=("$scan_output") - # Launch scan in background for true parallelism scan_purge_targets "$path" "$scan_output" & local scan_pid=$! scan_pids+=("$scan_pid") fi done - # Wait for all scans to complete for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do wait "$pid" 2> /dev/null || true done - if [[ -t 1 ]]; then stop_inline_spinner fi - # Collect all results for scan_output in "${scan_temps[@]+"${scan_temps[@]}"}"; do if [[ -f "$scan_output" ]]; then @@ -468,17 +406,14 @@ clean_project_artifacts() { rm -f "$scan_output" fi done - # Clean up trap trap - INT TERM - if [[ ${#all_found_items[@]} -eq 0 ]]; then echo "" echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No old project artifacts to clean" printf '\n' return 2 # Special code: nothing to clean fi - # Mark recently modified items (for default selection state) for item in "${all_found_items[@]}"; do if is_recently_modified "$item"; then @@ -487,23 +422,19 @@ clean_project_artifacts() { # Add all items to safe_to_clean, let user choose safe_to_clean+=("$item") done - # Build menu options - one per artifact if [[ -t 1 ]]; then start_inline_spinner "Calculating sizes..." fi - local -a menu_options=() local -a item_paths=() local -a item_sizes=() local -a item_recent_flags=() - # Helper to get project name from path # For ~/www/pake/src-tauri/target -> returns "pake" # For ~/www/project/node_modules/xxx/node_modules -> returns "project" get_project_name() { local path="$1" - # Find the project root by looking for direct child of search paths local search_roots=() if [[ ${#PURGE_SEARCH_PATHS[@]} -gt 0 ]]; then @@ -511,7 +442,6 @@ clean_project_artifacts() { else search_roots=("$HOME/www" "$HOME/dev" "$HOME/Projects") fi - for root in "${search_roots[@]}"; do # Normalize trailing slash for consistent matching root="${root%/}" @@ -523,44 +453,36 @@ clean_project_artifacts() { return 0 fi done - # Fallback: use grandparent directory dirname "$(dirname "$path")" | xargs basename } - # Format display with alignment (like app_selector) format_purge_display() { local project_name="$1" local artifact_type="$2" local size_str="$3" - # Terminal width for alignment local terminal_width=$(tput cols 2> /dev/null || echo 80) local fixed_width=28 # Reserve for type and size local available_width=$((terminal_width - fixed_width)) - # Bounds: 24-35 chars for project name [[ $available_width -lt 24 ]] && available_width=24 [[ $available_width -gt 35 ]] && available_width=35 - # Truncate project name if needed local truncated_name=$(truncate_by_display_width "$project_name" "$available_width") local current_width=$(get_display_width "$truncated_name") local char_count=${#truncated_name} local padding=$((available_width - current_width)) local printf_width=$((char_count + padding)) - # Format: "project_name size | artifact_type" printf "%-*s %9s | %-13s" "$printf_width" "$truncated_name" "$size_str" "$artifact_type" } - # Build menu options - one line per artifact for item in "${safe_to_clean[@]}"; do local project_name=$(get_project_name "$item") local artifact_type=$(basename "$item") local size_kb=$(get_dir_size_kb "$item") local size_human=$(bytes_to_human "$((size_kb * 1024))") - # Check if recent local is_recent=false for recent_item in "${recently_modified[@]+"${recently_modified[@]}"}"; do @@ -569,17 +491,14 @@ clean_project_artifacts() { break fi done - menu_options+=("$(format_purge_display "$project_name" "$artifact_type" "$size_human")") item_paths+=("$item") item_sizes+=("$size_kb") item_recent_flags+=("$is_recent") done - if [[ -t 1 ]]; then stop_inline_spinner fi - # Set global vars for selector export PURGE_CATEGORY_SIZES=$( IFS=, @@ -589,7 +508,6 @@ clean_project_artifacts() { IFS=, echo "${item_recent_flags[*]}" ) - # Interactive selection (only if terminal is available) PURGE_SELECTION_RESULT="" if [[ -t 0 ]]; then @@ -606,7 +524,6 @@ clean_project_artifacts() { fi done fi - if [[ -z "$PURGE_SELECTION_RESULT" ]]; then echo "" echo -e "${GRAY}No items selected${NC}" @@ -614,48 +531,38 @@ clean_project_artifacts() { unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT return 0 fi - # Clean selected items echo "" IFS=',' read -r -a selected_indices <<< "$PURGE_SELECTION_RESULT" - local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole" local cleaned_count=0 - for idx in "${selected_indices[@]}"; do local item_path="${item_paths[idx]}" local artifact_type=$(basename "$item_path") local project_name=$(get_project_name "$item_path") local size_kb="${item_sizes[idx]}" local size_human=$(bytes_to_human "$((size_kb * 1024))") - # Safety checks if [[ -z "$item_path" || "$item_path" == "/" || "$item_path" == "$HOME" || "$item_path" != "$HOME/"* ]]; then continue fi - if [[ -t 1 ]]; then start_inline_spinner "Cleaning $project_name/$artifact_type..." fi - if [[ -e "$item_path" ]]; then safe_remove "$item_path" true - if [[ ! -e "$item_path" ]]; then local current_total=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0") echo "$((current_total + size_kb))" > "$stats_dir/purge_stats" ((cleaned_count++)) fi fi - if [[ -t 1 ]]; then stop_inline_spinner echo -e "${GREEN}${ICON_SUCCESS}${NC} $project_name - $artifact_type ${GREEN}($size_human)${NC}" fi done - # Update count echo "$cleaned_count" > "$stats_dir/purge_count" - unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT } diff --git a/lib/clean/system.sh b/lib/clean/system.sh index 1311436..4d90dfc 100644 --- a/lib/clean/system.sh +++ b/lib/clean/system.sh @@ -1,35 +1,28 @@ #!/bin/bash # System-Level Cleanup Module # Deep system cleanup (requires sudo) and Time Machine failed backups - set -euo pipefail - # Deep system cleanup (requires sudo) clean_deep_system() { stop_section_spinner - # Clean old system caches local cache_cleaned=0 safe_sudo_find_delete "/Library/Caches" "*.cache" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true safe_sudo_find_delete "/Library/Caches" "*.tmp" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true safe_sudo_find_delete "/Library/Caches" "*.log" "$MOLE_LOG_AGE_DAYS" "f" && cache_cleaned=1 || true [[ $cache_cleaned -eq 1 ]] && log_success "System caches" - # Clean temporary files (macOS /tmp is a symlink to /private/tmp) local tmp_cleaned=0 safe_sudo_find_delete "/private/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true safe_sudo_find_delete "/private/var/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true [[ $tmp_cleaned -eq 1 ]] && log_success "System temp files" - # Clean crash reports safe_sudo_find_delete "/Library/Logs/DiagnosticReports" "*" "$MOLE_CRASH_REPORT_AGE_DAYS" "f" || true log_success "System crash reports" - # Clean system logs (macOS /var is a symlink to /private/var) safe_sudo_find_delete "/private/var/log" "*.log" "$MOLE_LOG_AGE_DAYS" "f" || true safe_sudo_find_delete "/private/var/log" "*.gz" "$MOLE_LOG_AGE_DAYS" "f" || true log_success "System logs" - # Clean Library Updates safely (skip if SIP is enabled) if [[ -d "/Library/Updates" && ! -L "/Library/Updates" ]]; then if ! is_sip_enabled; then @@ -41,14 +34,12 @@ clean_deep_system() { debug_log "Skipping malformed path: $item" continue fi - # Skip system-protected files (restricted flag) local item_flags item_flags=$($STAT_BSD -f%Sf "$item" 2> /dev/null || echo "") if [[ "$item_flags" == *"restricted"* ]]; then continue fi - if safe_sudo_remove "$item"; then ((updates_cleaned++)) fi @@ -56,20 +47,16 @@ clean_deep_system() { [[ $updates_cleaned -gt 0 ]] && log_success "System library updates" fi fi - # Clean macOS Install Data (legacy upgrade leftovers) if [[ -d "/macOS Install Data" ]]; then local mtime=$(get_file_mtime "/macOS Install Data") local age_days=$((($(date +%s) - mtime) / 86400)) - debug_log "Found macOS Install Data (age: ${age_days} days)" - if [[ $age_days -ge 30 ]]; then local size_kb=$(get_path_size_kb "/macOS Install Data") if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then local size_human=$(bytes_to_human "$((size_kb * 1024))") debug_log "Cleaning macOS Install Data: $size_human (${age_days} days old)" - if safe_sudo_remove "/macOS Install Data"; then log_success "macOS Install Data ($size_human)" fi @@ -78,21 +65,18 @@ clean_deep_system() { debug_log "Keeping macOS Install Data (only ${age_days} days old, needs 30+)" fi fi - # Clean browser code signature caches start_section_spinner "Scanning system caches..." local code_sign_cleaned=0 local found_count=0 local last_update_time=$(date +%s) local update_interval=2 # Update spinner every 2 seconds instead of every 50 files - # Efficient stream processing for large directories while IFS= read -r -d '' cache_dir; do if safe_remove "$cache_dir" true; then ((code_sign_cleaned++)) fi ((found_count++)) - # Update progress spinner periodically based on time, not count local current_time=$(date +%s) if [[ $((current_time - last_update_time)) -ge $update_interval ]]; then @@ -100,26 +84,20 @@ clean_deep_system() { last_update_time=$current_time fi done < <(run_with_timeout 5 command find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true) - stop_section_spinner - [[ $code_sign_cleaned -gt 0 ]] && log_success "Browser code signature caches ($code_sign_cleaned items)" - # Clean system diagnostics logs safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*" "$MOLE_LOG_AGE_DAYS" "f" || true safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*" "$MOLE_LOG_AGE_DAYS" "f" || true safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" || true log_success "System diagnostic logs" - # Clean power logs safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" || true log_success "Power logs" - # Clean memory exception reports (can accumulate to 1-2GB, thousands of files) # These track app memory limit violations, safe to clean old ones safe_sudo_find_delete "/private/var/db/reportmemoryexception/MemoryLimitViolations" "*" "30" "f" || true log_success "Memory exception reports" - # Clean system diagnostic tracev3 logs (can be 1-2GB) # System generates these continuously, safe to clean old ones start_section_spinner "Cleaning diagnostic trace logs..." @@ -128,7 +106,6 @@ clean_deep_system() { safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*.tracev3" "30" "f" && diag_logs_cleaned=1 || true stop_section_spinner [[ $diag_logs_cleaned -eq 1 ]] && log_success "System diagnostic trace logs" - # Clean core symbolication cache (can be 3-5GB, mostly for crash report debugging) # Will regenerate when needed for crash analysis # Use faster du with timeout instead of get_path_size_kb to avoid hanging @@ -138,11 +115,9 @@ clean_deep_system() { # Quick size check with timeout (max 5 seconds) local symbolication_size_mb="" symbolication_size_mb=$(run_with_timeout 5 du -sm "/System/Library/Caches/com.apple.coresymbolicationd/data" 2> /dev/null | awk '{print $1}') - # Validate that we got a valid size (non-empty and numeric) if [[ -n "$symbolication_size_mb" && "$symbolication_size_mb" =~ ^[0-9]+$ ]]; then debug_log "Symbolication cache size: ${symbolication_size_mb}MB" - # Only clean if larger than 1GB (1024MB) if [[ $symbolication_size_mb -gt 1024 ]]; then debug_log "Cleaning symbolication cache (size > 1GB)..." @@ -156,21 +131,17 @@ clean_deep_system() { fi debug_log "Core symbolication cache section completed" } - # Clean incomplete Time Machine backups clean_time_machine_failed_backups() { local tm_cleaned=0 - # Check if tmutil is available if ! command -v tmutil > /dev/null 2>&1; then echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" return 0 fi - # Start spinner early (before potentially slow tmutil command) start_section_spinner "Checking Time Machine configuration..." local spinner_active=true - # Check if Time Machine is configured (with short timeout for faster response) local tm_info tm_info=$(run_with_timeout 2 tmutil destinationinfo 2>&1 || echo "failed") @@ -181,7 +152,6 @@ clean_time_machine_failed_backups() { echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" return 0 fi - if [[ ! -d "/Volumes" ]]; then if [[ "$spinner_active" == "true" ]]; then stop_section_spinner @@ -189,7 +159,6 @@ clean_time_machine_failed_backups() { echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" return 0 fi - # Skip if backup is running (check actual Running status, not just daemon existence) if tmutil status 2> /dev/null | grep -q "Running = 1"; then if [[ "$spinner_active" == "true" ]]; then @@ -198,25 +167,21 @@ clean_time_machine_failed_backups() { echo -e " ${YELLOW}!${NC} Time Machine backup in progress, skipping cleanup" return 0 fi - # Update spinner message for volume scanning if [[ "$spinner_active" == "true" ]]; then start_section_spinner "Checking backup volumes..." fi - # Fast pre-scan: check which volumes have Backups.backupdb (avoid expensive tmutil checks) local -a backup_volumes=() for volume in /Volumes/*; do [[ -d "$volume" ]] || continue [[ "$volume" == "/Volumes/MacintoshHD" || "$volume" == "/" ]] && continue [[ -L "$volume" ]] && continue - # Quick check: does this volume have backup directories? if [[ -d "$volume/Backups.backupdb" ]] || [[ -d "$volume/.MobileBackups" ]]; then backup_volumes+=("$volume") fi done - # If no backup volumes found, stop spinner and return if [[ ${#backup_volumes[@]} -eq 0 ]]; then if [[ "$spinner_active" == "true" ]]; then @@ -225,7 +190,6 @@ clean_time_machine_failed_backups() { echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" return 0 fi - # Update spinner message: we have potential backup volumes, now scan them if [[ "$spinner_active" == "true" ]]; then start_section_spinner "Scanning backup volumes..." @@ -237,47 +201,38 @@ clean_time_machine_failed_backups() { case "$fs_type" in nfs | smbfs | afpfs | cifs | webdav | unknown) continue ;; esac - # HFS+ style backups (Backups.backupdb) local backupdb_dir="$volume/Backups.backupdb" if [[ -d "$backupdb_dir" ]]; then while IFS= read -r inprogress_file; do [[ -d "$inprogress_file" ]] || continue - # Only delete old incomplete backups (safety window) local file_mtime=$(get_file_mtime "$inprogress_file") local current_time=$(date +%s) local hours_old=$(((current_time - file_mtime) / 3600)) - if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then continue fi - local size_kb=$(get_path_size_kb "$inprogress_file") [[ "$size_kb" -le 0 ]] && continue - # Stop spinner before first output if [[ "$spinner_active" == "true" ]]; then stop_section_spinner spinner_active=false fi - local backup_name=$(basename "$inprogress_file") local size_human=$(bytes_to_human "$((size_kb * 1024))") - if [[ "$DRY_RUN" == "true" ]]; then echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete backup: $backup_name ${YELLOW}($size_human dry)${NC}" ((tm_cleaned++)) note_activity continue fi - # Real deletion if ! command -v tmutil > /dev/null 2>&1; then echo -e " ${YELLOW}!${NC} tmutil not available, skipping: $backup_name" continue fi - if tmutil delete "$inprogress_file" 2> /dev/null; then echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete backup: $backup_name ${GREEN}($size_human)${NC}" ((tm_cleaned++)) @@ -290,53 +245,42 @@ clean_time_machine_failed_backups() { fi done < <(run_with_timeout 15 find "$backupdb_dir" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true) fi - # APFS style backups (.backupbundle or .sparsebundle) for bundle in "$volume"/*.backupbundle "$volume"/*.sparsebundle; do [[ -e "$bundle" ]] || continue [[ -d "$bundle" ]] || continue - # Check if bundle is mounted local bundle_name=$(basename "$bundle") local mounted_path=$(hdiutil info 2> /dev/null | grep -A 5 "image-path.*$bundle_name" | grep "/Volumes/" | awk '{print $1}' | head -1 || echo "") - if [[ -n "$mounted_path" && -d "$mounted_path" ]]; then while IFS= read -r inprogress_file; do [[ -d "$inprogress_file" ]] || continue - # Only delete old incomplete backups (safety window) local file_mtime=$(get_file_mtime "$inprogress_file") local current_time=$(date +%s) local hours_old=$(((current_time - file_mtime) / 3600)) - if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then continue fi - local size_kb=$(get_path_size_kb "$inprogress_file") [[ "$size_kb" -le 0 ]] && continue - # Stop spinner before first output if [[ "$spinner_active" == "true" ]]; then stop_section_spinner spinner_active=false fi - local backup_name=$(basename "$inprogress_file") local size_human=$(bytes_to_human "$((size_kb * 1024))") - if [[ "$DRY_RUN" == "true" ]]; then echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${YELLOW}($size_human dry)${NC}" ((tm_cleaned++)) note_activity continue fi - # Real deletion if ! command -v tmutil > /dev/null 2>&1; then continue fi - if tmutil delete "$inprogress_file" 2> /dev/null; then echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${GREEN}($size_human)${NC}" ((tm_cleaned++)) @@ -351,55 +295,42 @@ clean_time_machine_failed_backups() { fi done done - # Stop spinner if still active (no backups found) if [[ "$spinner_active" == "true" ]]; then stop_section_spinner fi - if [[ $tm_cleaned -eq 0 ]]; then echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" fi } - # Clean local APFS snapshots (older than 24 hours) clean_local_snapshots() { # Check if tmutil is available if ! command -v tmutil > /dev/null 2>&1; then return 0 fi - start_section_spinner "Checking local snapshots..." - # Check for local snapshots local snapshot_list snapshot_list=$(tmutil listlocalsnapshots / 2> /dev/null) - stop_section_spinner - [[ -z "$snapshot_list" ]] && return 0 - # Parse and clean snapshots local cleaned_count=0 local total_cleaned_size=0 # Estimation not possible without thin - # Get current time local current_ts=$(date +%s) local one_day_ago=$((current_ts - 86400)) - while IFS= read -r line; do # Format: com.apple.TimeMachine.2023-10-25-120000 if [[ "$line" =~ com\.apple\.TimeMachine\.([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{6}) ]]; then local date_str="${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]} ${BASH_REMATCH[4]:0:2}:${BASH_REMATCH[4]:2:2}:${BASH_REMATCH[4]:4:2}" local snap_ts=$(date -j -f "%Y-%m-%d %H:%M:%S" "$date_str" "+%s" 2> /dev/null || echo "0") - # Skip if parsing failed [[ "$snap_ts" == "0" ]] && continue - # If snapshot is older than 24 hours if [[ $snap_ts -lt $one_day_ago ]]; then local snap_name="${BASH_REMATCH[0]}" - if [[ "$DRY_RUN" == "true" ]]; then echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Old local snapshot: $snap_name ${YELLOW}(dry)${NC}" ((cleaned_count++)) @@ -417,7 +348,6 @@ clean_local_snapshots() { fi fi done <<< "$snapshot_list" - if [[ $cleaned_count -gt 0 && "$DRY_RUN" != "true" ]]; then log_success "Cleaned $cleaned_count old local snapshots" fi diff --git a/lib/clean/user.sh b/lib/clean/user.sh index cd0a3ce..3646d74 100644 --- a/lib/clean/user.sh +++ b/lib/clean/user.sh @@ -1,18 +1,12 @@ #!/bin/bash # User Data Cleanup Module - set -euo pipefail - # Clean user essentials (caches, logs, trash) clean_user_essentials() { start_section_spinner "Scanning caches..." - safe_clean ~/Library/Caches/* "User app cache" - stop_section_spinner - safe_clean ~/Library/Logs/* "User app logs" - # Check if Trash directory is whitelisted if is_path_whitelisted "$HOME/.Trash"; then note_activity @@ -21,33 +15,27 @@ clean_user_essentials() { safe_clean ~/.Trash/* "Trash" fi } - # Helper: Scan external volumes for cleanup (Trash & DS_Store) scan_external_volumes() { [[ -d "/Volumes" ]] || return 0 - # Fast pre-check: collect non-system external volumes and detect network volumes local -a candidate_volumes=() local -a network_volumes=() for volume in /Volumes/*; do # Basic checks (directory, writable, not a symlink) [[ -d "$volume" && -w "$volume" && ! -L "$volume" ]] || continue - # Skip system root if it appears in /Volumes [[ "$volume" == "/" || "$volume" == "/Volumes/Macintosh HD" ]] && continue - # Use diskutil to intelligently detect network volumes (SMB/NFS/AFP) # Timeout protection: 1s per volume to avoid slow network responses local protocol="" protocol=$(run_with_timeout 1 command diskutil info "$volume" 2> /dev/null | grep -i "Protocol:" | awk '{print $2}' || echo "") - case "$protocol" in SMB | NFS | AFP | CIFS | WebDAV) network_volumes+=("$volume") continue ;; esac - # Fallback: Check filesystem type via df if diskutil didn't identify protocol local fs_type="" fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "") @@ -57,14 +45,11 @@ scan_external_volumes() { continue ;; esac - candidate_volumes+=("$volume") done - # If no external volumes found, return immediately (zero overhead) local volume_count=${#candidate_volumes[@]} local network_count=${#network_volumes[@]} - if [[ $volume_count -eq 0 ]]; then # Show info if network volumes were skipped if [[ $network_count -gt 0 ]]; then @@ -73,18 +58,14 @@ scan_external_volumes() { fi return 0 fi - # We have local external volumes, now perform full scan start_section_spinner "Scanning $volume_count external volume(s)..." - for volume in "${candidate_volumes[@]}"; do # Re-verify volume is still accessible (may have been unmounted since initial scan) # Use simple directory check instead of slow mount command for better performance [[ -d "$volume" && -r "$volume" ]] || continue - # 1. Clean Trash on volume local volume_trash="$volume/.Trashes" - # Check if external volume Trash is whitelisted if [[ -d "$volume_trash" && "$DRY_RUN" != "true" ]] && ! is_path_whitelisted "$volume_trash"; then # Safely iterate and remove each item @@ -92,71 +73,56 @@ scan_external_volumes() { safe_remove "$item" true || true done < <(command find "$volume_trash" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true) fi - # 2. Clean .DS_Store if [[ "$PROTECT_FINDER_METADATA" != "true" ]]; then clean_ds_store_tree "$volume" "$(basename "$volume") volume (.DS_Store)" fi done - stop_section_spinner } - # Clean Finder metadata (.DS_Store files) clean_finder_metadata() { stop_section_spinner - if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then note_activity echo -e " ${GREEN}${ICON_EMPTY}${NC} Finder metadata · whitelist protected" return fi - clean_ds_store_tree "$HOME" "Home directory (.DS_Store)" } - # Clean macOS system caches clean_macos_system_caches() { stop_section_spinner - # Clean saved application states with protection for System Settings # Note: safe_clean already calls should_protect_path for each file - safe_clean ~/Library/Saved\ Application\ State/* "Saved application states" - + safe_clean ~/Library/Saved\ Application\ State/* "Saved application states" || true # REMOVED: Spotlight cache cleanup can cause system UI issues # Spotlight indexes should be managed by macOS automatically # safe_clean ~/Library/Caches/com.apple.spotlight "Spotlight cache" - - safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache" - safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache" - safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache" - + safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache" || true + safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache" || true + safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache" || true # Extra user items - safe_clean ~/Library/DiagnosticReports/* "Diagnostic reports" - safe_clean ~/Library/Caches/com.apple.QuickLook.thumbnailcache "QuickLook thumbnails" - safe_clean ~/Library/Caches/Quick\ Look/* "QuickLook cache" - safe_clean ~/Library/Caches/com.apple.iconservices* "Icon services cache" - safe_clean ~/Library/Caches/CloudKit/* "CloudKit cache" - + safe_clean ~/Library/DiagnosticReports/* "Diagnostic reports" || true + safe_clean ~/Library/Caches/com.apple.QuickLook.thumbnailcache "QuickLook thumbnails" || true + safe_clean ~/Library/Caches/Quick\ Look/* "QuickLook cache" || true + safe_clean ~/Library/Caches/com.apple.iconservices* "Icon services cache" || true + safe_clean ~/Library/Caches/CloudKit/* "CloudKit cache" || true # Clean incomplete downloads - safe_clean ~/Downloads/*.download "Safari incomplete downloads" - safe_clean ~/Downloads/*.crdownload "Chrome incomplete downloads" - safe_clean ~/Downloads/*.part "Partial incomplete downloads" - + safe_clean ~/Downloads/*.download "Safari incomplete downloads" || true + safe_clean ~/Downloads/*.crdownload "Chrome incomplete downloads" || true + safe_clean ~/Downloads/*.part "Partial incomplete downloads" || true # Additional user-level caches - safe_clean ~/Library/Autosave\ Information/* "Autosave information" - safe_clean ~/Library/IdentityCaches/* "Identity caches" - safe_clean ~/Library/Suggestions/* "Siri suggestions cache" - safe_clean ~/Library/Calendars/Calendar\ Cache "Calendar cache" - safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache" + safe_clean ~/Library/Autosave\ Information/* "Autosave information" || true + safe_clean ~/Library/IdentityCaches/* "Identity caches" || true + safe_clean ~/Library/Suggestions/* "Siri suggestions cache" || true + safe_clean ~/Library/Calendars/Calendar\ Cache "Calendar cache" || true + safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache" || true } - # Clean recent items lists clean_recent_items() { stop_section_spinner - local shared_dir="$HOME/Library/Application Support/com.apple.sharedfilelist" - # Target only the global recent item lists to avoid touching per-app/System Settings SFL files local -a recent_lists=( "$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl2" @@ -168,53 +134,40 @@ clean_recent_items() { "$shared_dir/com.apple.LSSharedFileList.RecentServers.sfl" "$shared_dir/com.apple.LSSharedFileList.RecentHosts.sfl" ) - if [[ -d "$shared_dir" ]]; then for sfl_file in "${recent_lists[@]}"; do - [[ -e "$sfl_file" ]] && safe_clean "$sfl_file" "Recent items list" + [[ -e "$sfl_file" ]] && safe_clean "$sfl_file" "Recent items list" || true done fi - # Clean recent items preferences - safe_clean ~/Library/Preferences/com.apple.recentitems.plist "Recent items preferences" + safe_clean ~/Library/Preferences/com.apple.recentitems.plist "Recent items preferences" || true } - # Clean old mail downloads clean_mail_downloads() { stop_section_spinner - local mail_age_days=${MOLE_MAIL_AGE_DAYS:-30} if ! [[ "$mail_age_days" =~ ^[0-9]+$ ]]; then mail_age_days=30 fi - local -a mail_dirs=( "$HOME/Library/Mail Downloads" "$HOME/Library/Containers/com.apple.mail/Data/Library/Mail Downloads" ) - local count=0 local cleaned_kb=0 - for target_path in "${mail_dirs[@]}"; do if [[ -d "$target_path" ]]; then # Check directory size threshold local dir_size_kb=0 - if command -v du > /dev/null 2>&1; then - dir_size_kb=$(du -sk "$target_path" 2> /dev/null | awk 'NR==1{print $1}') - dir_size_kb=${dir_size_kb:-0} - fi - + dir_size_kb=$(get_path_size_kb "$target_path") # Skip if below threshold if [[ $dir_size_kb -lt ${MOLE_MAIL_DOWNLOADS_MIN_KB:-5120} ]]; then continue fi - # Find and remove files older than specified days while IFS= read -r -d '' file_path; do if [[ -f "$file_path" ]]; then - local file_size_kb=$(du -sk "$file_path" 2> /dev/null | awk 'NR==1{print $1}') - file_size_kb=${file_size_kb:-0} + local file_size_kb=$(get_path_size_kb "$file_path") if safe_remove "$file_path" true; then ((count++)) ((cleaned_kb += file_size_kb)) @@ -223,47 +176,36 @@ clean_mail_downloads() { done < <(command find "$target_path" -type f -mtime +"$mail_age_days" -print0 2> /dev/null || true) fi done - if [[ $count -gt 0 ]]; then - local cleaned_mb=$(echo "$cleaned_kb" | awk '{printf "%.1f", $1/1024}') + local cleaned_mb=$(echo "$cleaned_kb" | awk '{printf "%.1f", $1/1024}' || echo "0.0") echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $count mail attachments (~${cleaned_mb}MB)" note_activity fi } - # Clean sandboxed app caches clean_sandboxed_app_caches() { stop_section_spinner - safe_clean ~/Library/Containers/com.apple.wallpaper.agent/Data/Library/Caches/* "Wallpaper agent cache" safe_clean ~/Library/Containers/com.apple.mediaanalysisd/Data/Library/Caches/* "Media analysis cache" safe_clean ~/Library/Containers/com.apple.AppStore/Data/Library/Caches/* "App Store cache" safe_clean ~/Library/Containers/com.apple.configurator.xpc.InternetService/Data/tmp/* "Apple Configurator temp files" - # Clean sandboxed app caches - iterate quietly to avoid UI flashing local containers_dir="$HOME/Library/Containers" [[ ! -d "$containers_dir" ]] && return 0 - start_section_spinner "Scanning sandboxed apps..." - local total_size=0 local cleaned_count=0 local found_any=false - # Enable nullglob for safe globbing; restore afterwards local _ng_state _ng_state=$(shopt -p nullglob || true) shopt -s nullglob - for container_dir in "$containers_dir"/*; do process_container_cache "$container_dir" done - # Restore nullglob to previous state eval "$_ng_state" - stop_section_spinner - if [[ "$found_any" == "true" ]]; then local size_human=$(bytes_to_human "$((total_size * 1024))") if [[ "$DRY_RUN" == "true" ]]; then @@ -277,12 +219,10 @@ clean_sandboxed_app_caches() { note_activity fi } - # Process a single container cache directory (reduces nesting) process_container_cache() { local container_dir="$1" [[ -d "$container_dir" ]] || return 0 - # Extract bundle ID and check protection status early local bundle_id=$(basename "$container_dir") if is_critical_system_component "$bundle_id"; then @@ -291,11 +231,9 @@ process_container_cache() { if should_protect_data "$bundle_id" || should_protect_data "$(echo "$bundle_id" | tr '[:upper:]' '[:lower:]')"; then return 0 fi - local cache_dir="$container_dir/Data/Library/Caches" # Check if dir exists and has content [[ -d "$cache_dir" ]] || return 0 - # Fast check if empty using find (more efficient than ls) if find "$cache_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then # Use global variables from caller for tracking @@ -303,35 +241,28 @@ process_container_cache() { ((total_size += size)) found_any=true ((cleaned_count++)) - if [[ "$DRY_RUN" != "true" ]]; then # Clean contents safely with local nullglob management local _ng_state _ng_state=$(shopt -p nullglob || true) shopt -s nullglob - for item in "$cache_dir"/*; do [[ -e "$item" ]] || continue safe_remove "$item" true || true done - eval "$_ng_state" fi fi } - # Clean browser caches (Safari, Chrome, Edge, Firefox, etc.) clean_browsers() { stop_section_spinner - safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache" - # Chrome/Chromium safe_clean ~/Library/Caches/Google/Chrome/* "Chrome cache" safe_clean ~/Library/Application\ Support/Google/Chrome/*/Application\ Cache/* "Chrome app cache" safe_clean ~/Library/Application\ Support/Google/Chrome/*/GPUCache/* "Chrome GPU cache" safe_clean ~/Library/Caches/Chromium/* "Chromium cache" - safe_clean ~/Library/Caches/com.microsoft.edgemac/* "Edge cache" safe_clean ~/Library/Caches/company.thebrowser.Browser/* "Arc cache" safe_clean ~/Library/Caches/company.thebrowser.dia/* "Dia cache" @@ -344,11 +275,9 @@ clean_browsers() { safe_clean ~/Library/Caches/zen/* "Zen cache" safe_clean ~/Library/Application\ Support/Firefox/Profiles/*/cache2/* "Firefox profile cache" } - # Clean cloud storage app caches clean_cloud_storage() { stop_section_spinner - safe_clean ~/Library/Caches/com.dropbox.* "Dropbox cache" safe_clean ~/Library/Caches/com.getdropbox.dropbox "Dropbox cache" safe_clean ~/Library/Caches/com.google.GoogleDrive "Google Drive cache" @@ -357,11 +286,9 @@ clean_cloud_storage() { safe_clean ~/Library/Caches/com.box.desktop "Box cache" safe_clean ~/Library/Caches/com.microsoft.OneDrive "OneDrive cache" } - # Clean office application caches clean_office_applications() { stop_section_spinner - safe_clean ~/Library/Caches/com.microsoft.Word "Microsoft Word cache" safe_clean ~/Library/Caches/com.microsoft.Excel "Microsoft Excel cache" safe_clean ~/Library/Caches/com.microsoft.Powerpoint "Microsoft PowerPoint cache" @@ -371,62 +298,48 @@ clean_office_applications() { safe_clean ~/Library/Caches/org.mozilla.thunderbird/* "Thunderbird cache" safe_clean ~/Library/Caches/com.apple.mail/* "Apple Mail cache" } - # Clean virtualization tools clean_virtualization_tools() { stop_section_spinner - safe_clean ~/Library/Caches/com.vmware.fusion "VMware Fusion cache" safe_clean ~/Library/Caches/com.parallels.* "Parallels cache" safe_clean ~/VirtualBox\ VMs/.cache "VirtualBox cache" safe_clean ~/.vagrant.d/tmp/* "Vagrant temporary files" } - # Clean Application Support logs and caches clean_application_support_logs() { stop_section_spinner - if [[ ! -d "$HOME/Library/Application Support" ]] || ! ls "$HOME/Library/Application Support" > /dev/null 2>&1; then note_activity echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Application Support" return 0 fi - start_section_spinner "Scanning Application Support..." - local total_size=0 local cleaned_count=0 local found_any=false - # Enable nullglob for safe globbing local _ng_state _ng_state=$(shopt -p nullglob || true) shopt -s nullglob - # Clean log directories and cache patterns for app_dir in ~/Library/Application\ Support/*; do [[ -d "$app_dir" ]] || continue - local app_name=$(basename "$app_dir") local app_name_lower=$(echo "$app_name" | tr '[:upper:]' '[:lower:]') local is_protected=false - if should_protect_data "$app_name"; then is_protected=true elif should_protect_data "$app_name_lower"; then is_protected=true fi - if [[ "$is_protected" == "true" ]]; then continue fi - if is_critical_system_component "$app_name"; then continue fi - local -a start_candidates=("$app_dir/log" "$app_dir/logs" "$app_dir/activitylog" "$app_dir/Cache/Cache_Data" "$app_dir/Crashpad/completed") - for candidate in "${start_candidates[@]}"; do if [[ -d "$candidate" ]]; then if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then @@ -434,7 +347,6 @@ clean_application_support_logs() { ((total_size += size)) ((cleaned_count++)) found_any=true - if [[ "$DRY_RUN" != "true" ]]; then for item in "$candidate"/*; do [[ -e "$item" ]] || continue @@ -445,16 +357,13 @@ clean_application_support_logs() { fi done done - # Clean Group Containers logs local known_group_containers=( "group.com.apple.contentdelivery" ) - for container in "${known_group_containers[@]}"; do local container_path="$HOME/Library/Group Containers/$container" local -a gc_candidates=("$container_path/Logs" "$container_path/Library/Logs") - for candidate in "${gc_candidates[@]}"; do if [[ -d "$candidate" ]]; then if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then @@ -462,7 +371,6 @@ clean_application_support_logs() { ((total_size += size)) ((cleaned_count++)) found_any=true - if [[ "$DRY_RUN" != "true" ]]; then for item in "$candidate"/*; do [[ -e "$item" ]] || continue @@ -473,12 +381,9 @@ clean_application_support_logs() { fi done done - # Restore nullglob to previous state eval "$_ng_state" - stop_section_spinner - if [[ "$found_any" == "true" ]]; then local size_human=$(bytes_to_human "$((total_size * 1024))") if [[ "$DRY_RUN" == "true" ]]; then @@ -492,7 +397,6 @@ clean_application_support_logs() { note_activity fi } - # Check and show iOS device backup info check_ios_device_backups() { local backup_dir="$HOME/Library/Application Support/MobileSync/Backup" @@ -508,15 +412,14 @@ check_ios_device_backups() { fi fi fi + return 0 } - -# Clean Apple Silicon specific caches # Env: IS_M_SERIES +# Clean Apple Silicon specific caches clean_apple_silicon_caches() { if [[ "${IS_M_SERIES:-false}" != "true" ]]; then return 0 fi - start_section "Apple Silicon updates" safe_clean /Library/Apple/usr/share/rosetta/rosetta_update_bundle "Rosetta 2 cache" safe_clean ~/Library/Caches/com.apple.rosetta.update "Rosetta 2 user cache" diff --git a/lib/core/base.sh b/lib/core/base.sh index 0f1f2bd..50aeae9 100644 --- a/lib/core/base.sh +++ b/lib/core/base.sh @@ -30,8 +30,8 @@ readonly NC="${ESC}[0m" readonly ICON_CONFIRM="◎" readonly ICON_ADMIN="⚙" readonly ICON_SUCCESS="✓" -readonly ICON_ERROR="☹︎" -readonly ICON_WARNING="☺︎" +readonly ICON_ERROR="☻" +readonly ICON_WARNING="●" readonly ICON_EMPTY="○" readonly ICON_SOLID="●" readonly ICON_LIST="•" diff --git a/lib/core/file_ops.sh b/lib/core/file_ops.sh index 3299ca6..f3bf74a 100644 --- a/lib/core/file_ops.sh +++ b/lib/core/file_ops.sh @@ -97,10 +97,23 @@ safe_remove() { debug_log "Removing: $path" # Perform the deletion - if rm -rf "$path" 2> /dev/null; then # SAFE: safe_remove implementation + # Use || to capture the exit code so set -e won't abort on rm failures + local error_msg + local rm_exit=0 + error_msg=$(rm -rf "$path" 2>&1) || rm_exit=$? + + if [[ $rm_exit -eq 0 ]]; then return 0 else - [[ "$silent" != "true" ]] && log_error "Failed to remove: $path" + # Check if it's a permission error + if [[ "$error_msg" == *"Permission denied"* ]] || [[ "$error_msg" == *"Operation not permitted"* ]]; then + MOLE_PERMISSION_DENIED_COUNT=${MOLE_PERMISSION_DENIED_COUNT:-0} + MOLE_PERMISSION_DENIED_COUNT=$((MOLE_PERMISSION_DENIED_COUNT + 1)) + export MOLE_PERMISSION_DENIED_COUNT + debug_log "Permission denied: $path (may need Full Disk Access)" + else + [[ "$silent" != "true" ]] && log_error "Failed to remove: $path" + fi return 1 fi } @@ -241,8 +254,10 @@ get_path_size_kb() { return } # Direct execution without timeout overhead - critical for performance in loops + # Use || echo 0 to ensure failure in du (e.g. permission error) doesn't exit script under set -e + # Pipefail would normally cause the pipeline to fail if du fails, but || handle catches it. local size - size=$(command du -sk "$path" 2> /dev/null | awk '{print $1}') + size=$(command du -sk "$path" 2> /dev/null | awk '{print $1}' || echo "0") echo "${size:-0}" } diff --git a/lib/core/log.sh b/lib/core/log.sh index 001a24a..b1bdb59 100644 --- a/lib/core/log.sh +++ b/lib/core/log.sh @@ -81,7 +81,7 @@ log_warning() { # Log error message log_error() { - echo -e "${RED}${ICON_ERROR}${NC} $1" >&2 + echo -e "${YELLOW}${ICON_ERROR}${NC} $1" >&2 local timestamp=$(date '+%Y-%m-%d %H:%M:%S') echo "[$timestamp] ERROR: $1" >> "$LOG_FILE" 2> /dev/null || true if [[ "${MO_DEBUG:-}" == "1" ]]; then diff --git a/lib/core/ui.sh b/lib/core/ui.sh index c0a6d00..951233b 100755 --- a/lib/core/ui.sh +++ b/lib/core/ui.sh @@ -249,28 +249,35 @@ show_menu_option() { # Background spinner implementation INLINE_SPINNER_PID="" +INLINE_SPINNER_STOP_FILE="" + start_inline_spinner() { stop_inline_spinner 2> /dev/null || true local message="$1" if [[ -t 1 ]]; then - ( - # Clean exit handler for spinner subprocess (invoked by trap) - # shellcheck disable=SC2329 - cleanup_spinner() { exit 0; } - trap cleanup_spinner TERM INT EXIT + # Create unique stop flag file for this spinner instance + INLINE_SPINNER_STOP_FILE="${TMPDIR:-/tmp}/mole_spinner_$$_$RANDOM.stop" + ( + local stop_file="$INLINE_SPINNER_STOP_FILE" local chars chars="$(mo_spinner_chars)" [[ -z "$chars" ]] && chars="|/-\\" local i=0 - while true; do + + # Cooperative exit: check for stop file instead of relying on signals + while [[ ! -f "$stop_file" ]]; do local c="${chars:$((i % ${#chars})):1}" # Output to stderr to avoid interfering with stdout - printf "\r${MOLE_SPINNER_PREFIX:-}${BLUE}%s${NC} %s" "$c" "$message" >&2 || exit 0 + printf "\r${MOLE_SPINNER_PREFIX:-}${BLUE}%s${NC} %s" "$c" "$message" >&2 || break ((i++)) sleep 0.1 done + + # Clean up stop file before exiting + rm -f "$stop_file" 2> /dev/null || true + exit 0 ) & INLINE_SPINNER_PID=$! disown 2> /dev/null || true @@ -281,17 +288,30 @@ start_inline_spinner() { stop_inline_spinner() { if [[ -n "$INLINE_SPINNER_PID" ]]; then - # Try graceful TERM first, then force KILL if needed - if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then - kill -TERM "$INLINE_SPINNER_PID" 2> /dev/null || true - sleep 0.1 2> /dev/null || true - # Force kill if still running - if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then - kill -KILL "$INLINE_SPINNER_PID" 2> /dev/null || true - fi + # Cooperative stop: create stop file to signal spinner to exit + if [[ -n "$INLINE_SPINNER_STOP_FILE" ]]; then + touch "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true fi + + # Wait briefly for cooperative exit + local wait_count=0 + while kill -0 "$INLINE_SPINNER_PID" 2> /dev/null && [[ $wait_count -lt 5 ]]; do + sleep 0.05 2> /dev/null || true + ((wait_count++)) + done + + # Only use SIGKILL as last resort if process is stuck + if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then + kill -KILL "$INLINE_SPINNER_PID" 2> /dev/null || true + fi + wait "$INLINE_SPINNER_PID" 2> /dev/null || true + + # Cleanup + rm -f "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true INLINE_SPINNER_PID="" + INLINE_SPINNER_STOP_FILE="" + # Clear the line - use \033[2K to clear entire line, not just to end [[ -t 1 ]] && printf "\r\033[2K" >&2 || true fi @@ -355,3 +375,60 @@ format_last_used_summary() { fi echo "$value" } + +# Check if terminal has Full Disk Access +# Returns 0 if FDA is granted, 1 if denied, 2 if unknown +has_full_disk_access() { + # Cache the result to avoid repeated checks + if [[ -n "${MOLE_HAS_FDA:-}" ]]; then + if [[ "$MOLE_HAS_FDA" == "1" ]]; then + return 0 + elif [[ "$MOLE_HAS_FDA" == "unknown" ]]; then + return 2 + else + return 1 + fi + fi + + # Test access to protected directories that require FDA + # Strategy: Try to access directories that are commonly protected + # If ANY of them are accessible, we likely have FDA + # If ALL fail, we definitely don't have FDA + local -a protected_dirs=( + "$HOME/Library/Safari/LocalStorage" + "$HOME/Library/Mail/V10" + "$HOME/Library/Messages/chat.db" + ) + + local accessible_count=0 + local tested_count=0 + + for test_path in "${protected_dirs[@]}"; do + # Only test when the protected path exists + if [[ -e "$test_path" ]]; then + tested_count=$((tested_count + 1)) + # Try to stat the ACTUAL protected path - this requires FDA + if stat "$test_path" > /dev/null 2>&1; then + accessible_count=$((accessible_count + 1)) + fi + fi + done + + # Three possible outcomes: + # 1. tested_count = 0: Can't determine (test paths don't exist) → unknown + # 2. tested_count > 0 && accessible_count > 0: Has FDA → yes + # 3. tested_count > 0 && accessible_count = 0: No FDA → no + if [[ $tested_count -eq 0 ]]; then + # Can't determine - test paths don't exist, treat as unknown + export MOLE_HAS_FDA="unknown" + return 2 + elif [[ $accessible_count -gt 0 ]]; then + # At least one path is accessible → has FDA + export MOLE_HAS_FDA=1 + return 0 + else + # Tested paths exist but not accessible → no FDA + export MOLE_HAS_FDA=0 + return 1 + fi +}