1
0
mirror of https://github.com/tw93/Mole.git synced 2026-02-04 15:04:42 +00:00

fix: implement layered error tolerance and accurate cleanup reporting (#175 #176 #180)

- Fix safe_remove set -e trap in command substitution
  - Fix has_full_disk_access false positives and unknown state handling
  - Use set +e in perform_cleanup for graceful degradation
  - Track removal failures and only count actually deleted items (#180)
  - Add "Skipped X items (permission denied or in use)" notification
  - Improve spinner reliability with cooperative stop mechanism (#175)
This commit is contained in:
Tw93
2025-12-29 14:27:47 +08:00
parent 16de9d13a8
commit 694c55f73b
15 changed files with 228 additions and 546 deletions

View File

@@ -125,16 +125,12 @@ cleanup() {
fi fi
CLEANUP_DONE=true CLEANUP_DONE=true
# Stop all spinners and clear the line # Stop any inline spinner
if [[ -n "${INLINE_SPINNER_PID:-}" ]] && kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then stop_inline_spinner 2> /dev/null || true
kill "$INLINE_SPINNER_PID" 2> /dev/null || true
wait "$INLINE_SPINNER_PID" 2> /dev/null || true
INLINE_SPINNER_PID=""
fi
# Clear any spinner output - spinner outputs to stderr # Clear any spinner output - spinner outputs to stderr
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
printf "\r\033[K" >&2 printf "\r\033[K" >&2 || true
fi fi
# Clean up temporary files # Clean up temporary files
@@ -205,6 +201,8 @@ safe_clean() {
local total_size_bytes=0 local total_size_bytes=0
local total_count=0 local total_count=0
local skipped_count=0 local skipped_count=0
local removal_failed_count=0
local permission_start=${MOLE_PERMISSION_DENIED_COUNT:-0}
local show_scan_feedback=false local show_scan_feedback=false
if [[ ${#targets[@]} -gt 20 && -t 1 ]]; then if [[ ${#targets[@]} -gt 20 && -t 1 ]]; then
@@ -316,17 +314,25 @@ safe_clean() {
if [[ -f "$result_file" ]]; then if [[ -f "$result_file" ]]; then
read -r size count < "$result_file" 2> /dev/null || true read -r size count < "$result_file" 2> /dev/null || true
if [[ "$count" -gt 0 && "$size" -gt 0 ]]; then if [[ "$count" -gt 0 && "$size" -gt 0 ]]; then
local removed=1
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
removed=0
# Handle symbolic links separately (only remove the link, not the target) # Handle symbolic links separately (only remove the link, not the target)
if [[ -L "$path" ]]; then if [[ -L "$path" ]]; then
rm "$path" 2> /dev/null || true rm "$path" 2> /dev/null && removed=1
else else
safe_remove "$path" true || true if safe_remove "$path" true; then
removed=1
fi
fi fi
fi fi
((total_size_bytes += size)) if [[ $removed -eq 1 ]]; then
((total_count += 1)) ((total_size_bytes += size))
removed_any=1 ((total_count += 1))
removed_any=1
else
((removal_failed_count++))
fi
fi fi
fi fi
((idx++)) ((idx++))
@@ -341,17 +347,25 @@ safe_clean() {
# Optimization: Skip expensive file counting # Optimization: Skip expensive file counting
if [[ "$size_bytes" -gt 0 ]]; then if [[ "$size_bytes" -gt 0 ]]; then
local removed=1
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
removed=0
# Handle symbolic links separately (only remove the link, not the target) # Handle symbolic links separately (only remove the link, not the target)
if [[ -L "$path" ]]; then if [[ -L "$path" ]]; then
rm "$path" 2> /dev/null || true rm "$path" 2> /dev/null && removed=1
else else
safe_remove "$path" true || true if safe_remove "$path" true; then
removed=1
fi
fi fi
fi fi
((total_size_bytes += size_bytes)) if [[ $removed -eq 1 ]]; then
((total_count += 1)) ((total_size_bytes += size_bytes))
removed_any=1 ((total_count += 1))
removed_any=1
else
((removal_failed_count++))
fi
fi fi
((idx++)) ((idx++))
done done
@@ -361,6 +375,16 @@ safe_clean() {
stop_section_spinner stop_section_spinner
fi fi
# Track permission failures reported by safe_remove
local permission_end=${MOLE_PERMISSION_DENIED_COUNT:-0}
if [[ $permission_end -gt $permission_start && $removed_any -eq 0 ]]; then
debug_log "Permission denied while cleaning: $description"
fi
if [[ $removal_failed_count -gt 0 && "$DRY_RUN" != "true" ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped $removal_failed_count items (permission denied or in use)"
note_activity
fi
if [[ $removed_any -eq 1 ]]; then if [[ $removed_any -eq 1 ]]; then
local size_human=$(bytes_to_human "$((total_size_bytes * 1024))") local size_human=$(bytes_to_human "$((total_size_bytes * 1024))")
@@ -562,10 +586,28 @@ perform_cleanup() {
fi fi
fi fi
# Hint about Full Disk Access for better results (only if not already granted)
if [[ -t 1 && "$DRY_RUN" != "true" ]]; then
local fda_status=0
has_full_disk_access
fda_status=$?
if [[ $fda_status -eq 1 ]]; then
echo ""
echo -e "${YELLOW}${ICON_WARNING}${NC} ${GRAY}Tip: Grant Full Disk Access to your terminal in System Settings for best results${NC}"
fi
fi
total_items=0 total_items=0
files_cleaned=0 files_cleaned=0
total_size_cleaned=0 total_size_cleaned=0
local had_errexit=0
[[ $- == *e* ]] && had_errexit=1
# Allow cleanup functions to fail without exiting the script
# Individual operations use || true for granular error handling
set +e
# ===== 1. Deep system cleanup (if admin) - Do this first while sudo is fresh ===== # ===== 1. Deep system cleanup (if admin) - Do this first while sudo is fresh =====
if [[ "$SYSTEM_CLEAN" == "true" ]]; then if [[ "$SYSTEM_CLEAN" == "true" ]]; then
start_section "Deep system" start_section "Deep system"
@@ -745,6 +787,11 @@ perform_cleanup() {
summary_details+=("Free space now: $(get_free_space)") summary_details+=("Free space now: $(get_free_space)")
fi fi
# Restore strict error handling only if it was enabled
if [[ $had_errexit -eq 1 ]]; then
set -e
fi
print_summary_block "$summary_heading" "${summary_details[@]}" print_summary_block "$summary_heading" "${summary_details[@]}"
printf '\n' printf '\n'
} }

View File

@@ -340,13 +340,13 @@ main() {
fi fi
print_header print_header
if ! command -v jq > /dev/null 2>&1; then if ! command -v jq > /dev/null 2>&1; then
echo -e "${RED}${ICON_ERROR}${NC} Missing dependency: jq" echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: jq"
echo -e "${GRAY}Install with: ${GREEN}brew install jq${NC}" echo -e "${GRAY}Install with: ${GREEN}brew install jq${NC}"
exit 1 exit 1
fi fi
if ! command -v bc > /dev/null 2>&1; then if ! command -v bc > /dev/null 2>&1; then
echo -e "${RED}${ICON_ERROR}${NC} Missing dependency: bc" echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: bc"
echo -e "${GRAY}Install with: ${GREEN}brew install bc${NC}" echo -e "${GRAY}Install with: ${GREEN}brew install bc${NC}"
exit 1 exit 1
fi fi

View File

@@ -50,7 +50,7 @@ ICON_ERROR="☻"
log_info() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}$1${NC}"; } log_info() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}$1${NC}"; }
log_success() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${GREEN}${ICON_SUCCESS}${NC} $1"; } log_success() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${GREEN}${ICON_SUCCESS}${NC} $1"; }
log_warning() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${YELLOW}$1${NC}"; } log_warning() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${YELLOW}$1${NC}"; }
log_error() { echo -e "${RED}${ICON_ERROR}${NC} $1"; } log_error() { echo -e "${YELLOW}${ICON_ERROR}${NC} $1"; }
log_admin() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_ADMIN}${NC} $1"; } log_admin() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_ADMIN}${NC} $1"; }
log_confirm() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_CONFIRM}${NC} $1"; } log_confirm() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_CONFIRM}${NC} $1"; }

View File

@@ -1,9 +1,7 @@
#!/bin/bash #!/bin/bash
# User GUI Applications Cleanup Module # User GUI Applications Cleanup Module
# Desktop applications, communication tools, media players, games, utilities # Desktop applications, communication tools, media players, games, utilities
set -euo pipefail set -euo pipefail
# Clean Xcode and iOS development tools # Clean Xcode and iOS development tools
clean_xcode_tools() { clean_xcode_tools() {
# Check if Xcode is running for safer cleanup of critical resources # Check if Xcode is running for safer cleanup of critical resources
@@ -11,7 +9,6 @@ clean_xcode_tools() {
if pgrep -x "Xcode" > /dev/null 2>&1; then if pgrep -x "Xcode" > /dev/null 2>&1; then
xcode_running=true xcode_running=true
fi fi
# Safe to clean regardless of Xcode state # Safe to clean regardless of Xcode state
safe_clean ~/Library/Developer/CoreSimulator/Caches/* "Simulator cache" safe_clean ~/Library/Developer/CoreSimulator/Caches/* "Simulator cache"
safe_clean ~/Library/Developer/CoreSimulator/Devices/*/data/tmp/* "Simulator temp files" safe_clean ~/Library/Developer/CoreSimulator/Devices/*/data/tmp/* "Simulator temp files"
@@ -19,7 +16,6 @@ clean_xcode_tools() {
safe_clean ~/Library/Developer/Xcode/iOS\ Device\ Logs/* "iOS device logs" safe_clean ~/Library/Developer/Xcode/iOS\ Device\ Logs/* "iOS device logs"
safe_clean ~/Library/Developer/Xcode/watchOS\ Device\ Logs/* "watchOS device logs" safe_clean ~/Library/Developer/Xcode/watchOS\ Device\ Logs/* "watchOS device logs"
safe_clean ~/Library/Developer/Xcode/Products/* "Xcode build products" safe_clean ~/Library/Developer/Xcode/Products/* "Xcode build products"
# Clean build artifacts only if Xcode is not running # Clean build artifacts only if Xcode is not running
if [[ "$xcode_running" == "false" ]]; then if [[ "$xcode_running" == "false" ]]; then
safe_clean ~/Library/Developer/Xcode/DerivedData/* "Xcode derived data" safe_clean ~/Library/Developer/Xcode/DerivedData/* "Xcode derived data"
@@ -28,7 +24,6 @@ clean_xcode_tools() {
echo -e " ${YELLOW}${ICON_WARNING}${NC} Xcode is running, skipping DerivedData and Archives cleanup" echo -e " ${YELLOW}${ICON_WARNING}${NC} Xcode is running, skipping DerivedData and Archives cleanup"
fi fi
} }
# Clean code editors (VS Code, Sublime, etc.) # Clean code editors (VS Code, Sublime, etc.)
clean_code_editors() { clean_code_editors() {
safe_clean ~/Library/Application\ Support/Code/logs/* "VS Code logs" safe_clean ~/Library/Application\ Support/Code/logs/* "VS Code logs"
@@ -37,7 +32,6 @@ clean_code_editors() {
safe_clean ~/Library/Application\ Support/Code/CachedData/* "VS Code data cache" safe_clean ~/Library/Application\ Support/Code/CachedData/* "VS Code data cache"
safe_clean ~/Library/Caches/com.sublimetext.*/* "Sublime Text cache" safe_clean ~/Library/Caches/com.sublimetext.*/* "Sublime Text cache"
} }
# Clean communication apps (Slack, Discord, Zoom, etc.) # Clean communication apps (Slack, Discord, Zoom, etc.)
clean_communication_apps() { clean_communication_apps() {
safe_clean ~/Library/Application\ Support/discord/Cache/* "Discord cache" safe_clean ~/Library/Application\ Support/discord/Cache/* "Discord cache"
@@ -53,7 +47,6 @@ clean_communication_apps() {
safe_clean ~/Library/Caches/com.tencent.WeWorkMac/* "WeCom cache" safe_clean ~/Library/Caches/com.tencent.WeWorkMac/* "WeCom cache"
safe_clean ~/Library/Caches/com.feishu.*/* "Feishu cache" safe_clean ~/Library/Caches/com.feishu.*/* "Feishu cache"
} }
# Clean DingTalk # Clean DingTalk
clean_dingtalk() { clean_dingtalk() {
safe_clean ~/Library/Caches/dd.work.exclusive4aliding/* "DingTalk iDingTalk cache" safe_clean ~/Library/Caches/dd.work.exclusive4aliding/* "DingTalk iDingTalk cache"
@@ -61,14 +54,12 @@ clean_dingtalk() {
safe_clean ~/Library/Application\ Support/iDingTalk/log/* "DingTalk logs" safe_clean ~/Library/Application\ Support/iDingTalk/log/* "DingTalk logs"
safe_clean ~/Library/Application\ Support/iDingTalk/holmeslogs/* "DingTalk holmes logs" safe_clean ~/Library/Application\ Support/iDingTalk/holmeslogs/* "DingTalk holmes logs"
} }
# Clean AI assistants # Clean AI assistants
clean_ai_apps() { clean_ai_apps() {
safe_clean ~/Library/Caches/com.openai.chat/* "ChatGPT cache" safe_clean ~/Library/Caches/com.openai.chat/* "ChatGPT cache"
safe_clean ~/Library/Caches/com.anthropic.claudefordesktop/* "Claude desktop cache" safe_clean ~/Library/Caches/com.anthropic.claudefordesktop/* "Claude desktop cache"
safe_clean ~/Library/Logs/Claude/* "Claude logs" safe_clean ~/Library/Logs/Claude/* "Claude logs"
} }
# Clean design and creative tools # Clean design and creative tools
clean_design_tools() { clean_design_tools() {
safe_clean ~/Library/Caches/com.bohemiancoding.sketch3/* "Sketch cache" safe_clean ~/Library/Caches/com.bohemiancoding.sketch3/* "Sketch cache"
@@ -78,7 +69,6 @@ clean_design_tools() {
safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache" safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache"
# Note: Raycast cache is protected - contains clipboard history (including images) # Note: Raycast cache is protected - contains clipboard history (including images)
} }
# Clean video editing tools # Clean video editing tools
clean_video_tools() { clean_video_tools() {
safe_clean ~/Library/Caches/net.telestream.screenflow10/* "ScreenFlow cache" safe_clean ~/Library/Caches/net.telestream.screenflow10/* "ScreenFlow cache"
@@ -86,7 +76,6 @@ clean_video_tools() {
safe_clean ~/Library/Caches/com.blackmagic-design.DaVinciResolve/* "DaVinci Resolve cache" safe_clean ~/Library/Caches/com.blackmagic-design.DaVinciResolve/* "DaVinci Resolve cache"
safe_clean ~/Library/Caches/com.adobe.PremierePro.*/* "Premiere Pro cache" safe_clean ~/Library/Caches/com.adobe.PremierePro.*/* "Premiere Pro cache"
} }
# Clean 3D and CAD tools # Clean 3D and CAD tools
clean_3d_tools() { clean_3d_tools() {
safe_clean ~/Library/Caches/org.blenderfoundation.blender/* "Blender cache" safe_clean ~/Library/Caches/org.blenderfoundation.blender/* "Blender cache"
@@ -94,7 +83,6 @@ clean_3d_tools() {
safe_clean ~/Library/Caches/com.autodesk.*/* "Autodesk cache" safe_clean ~/Library/Caches/com.autodesk.*/* "Autodesk cache"
safe_clean ~/Library/Caches/com.sketchup.*/* "SketchUp cache" safe_clean ~/Library/Caches/com.sketchup.*/* "SketchUp cache"
} }
# Clean productivity apps # Clean productivity apps
clean_productivity_apps() { clean_productivity_apps() {
safe_clean ~/Library/Caches/com.tw93.MiaoYan/* "MiaoYan cache" safe_clean ~/Library/Caches/com.tw93.MiaoYan/* "MiaoYan cache"
@@ -104,14 +92,12 @@ clean_productivity_apps() {
safe_clean ~/Library/Caches/com.filo.client/* "Filo cache" safe_clean ~/Library/Caches/com.filo.client/* "Filo cache"
safe_clean ~/Library/Caches/com.flomoapp.mac/* "Flomo cache" safe_clean ~/Library/Caches/com.flomoapp.mac/* "Flomo cache"
} }
# Clean music and media players (protects Spotify offline music) # Clean music and media players (protects Spotify offline music)
clean_media_players() { clean_media_players() {
# Spotify cache protection: check for offline music indicators # Spotify cache protection: check for offline music indicators
local spotify_cache="$HOME/Library/Caches/com.spotify.client" local spotify_cache="$HOME/Library/Caches/com.spotify.client"
local spotify_data="$HOME/Library/Application Support/Spotify" local spotify_data="$HOME/Library/Application Support/Spotify"
local has_offline_music=false local has_offline_music=false
# Check for offline music database or large cache (>500MB) # Check for offline music database or large cache (>500MB)
if [[ -f "$spotify_data/PersistentCache/Storage/offline.bnk" ]] || if [[ -f "$spotify_data/PersistentCache/Storage/offline.bnk" ]] ||
[[ -d "$spotify_data/PersistentCache/Storage" && -n "$(find "$spotify_data/PersistentCache/Storage" -type f -name "*.file" 2> /dev/null | head -1)" ]]; then [[ -d "$spotify_data/PersistentCache/Storage" && -n "$(find "$spotify_data/PersistentCache/Storage" -type f -name "*.file" 2> /dev/null | head -1)" ]]; then
@@ -124,7 +110,6 @@ clean_media_players() {
has_offline_music=true has_offline_music=true
fi fi
fi fi
if [[ "$has_offline_music" == "true" ]]; then if [[ "$has_offline_music" == "true" ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Spotify cache protected · offline music detected" echo -e " ${YELLOW}${ICON_WARNING}${NC} Spotify cache protected · offline music detected"
note_activity note_activity
@@ -140,7 +125,6 @@ clean_media_players() {
safe_clean ~/Library/Caches/com.kugou.mac/* "Kugou Music cache" safe_clean ~/Library/Caches/com.kugou.mac/* "Kugou Music cache"
safe_clean ~/Library/Caches/com.kuwo.mac/* "Kuwo Music cache" safe_clean ~/Library/Caches/com.kuwo.mac/* "Kuwo Music cache"
} }
# Clean video players # Clean video players
clean_video_players() { clean_video_players() {
safe_clean ~/Library/Caches/com.colliderli.iina "IINA cache" safe_clean ~/Library/Caches/com.colliderli.iina "IINA cache"
@@ -152,7 +136,6 @@ clean_video_players() {
safe_clean ~/Library/Caches/com.douyu.*/* "Douyu cache" safe_clean ~/Library/Caches/com.douyu.*/* "Douyu cache"
safe_clean ~/Library/Caches/com.huya.*/* "Huya cache" safe_clean ~/Library/Caches/com.huya.*/* "Huya cache"
} }
# Clean download managers # Clean download managers
clean_download_managers() { clean_download_managers() {
safe_clean ~/Library/Caches/net.xmac.aria2gui "Aria2 cache" safe_clean ~/Library/Caches/net.xmac.aria2gui "Aria2 cache"
@@ -162,7 +145,6 @@ clean_download_managers() {
safe_clean ~/Library/Caches/com.folx.*/* "Folx cache" safe_clean ~/Library/Caches/com.folx.*/* "Folx cache"
safe_clean ~/Library/Caches/com.charlessoft.pacifist/* "Pacifist cache" safe_clean ~/Library/Caches/com.charlessoft.pacifist/* "Pacifist cache"
} }
# Clean gaming platforms # Clean gaming platforms
clean_gaming_platforms() { clean_gaming_platforms() {
safe_clean ~/Library/Caches/com.valvesoftware.steam/* "Steam cache" safe_clean ~/Library/Caches/com.valvesoftware.steam/* "Steam cache"
@@ -174,33 +156,28 @@ clean_gaming_platforms() {
safe_clean ~/Library/Caches/com.gog.galaxy/* "GOG Galaxy cache" safe_clean ~/Library/Caches/com.gog.galaxy/* "GOG Galaxy cache"
safe_clean ~/Library/Caches/com.riotgames.*/* "Riot Games cache" safe_clean ~/Library/Caches/com.riotgames.*/* "Riot Games cache"
} }
# Clean translation and dictionary apps # Clean translation and dictionary apps
clean_translation_apps() { clean_translation_apps() {
safe_clean ~/Library/Caches/com.youdao.YoudaoDict "Youdao Dictionary cache" safe_clean ~/Library/Caches/com.youdao.YoudaoDict "Youdao Dictionary cache"
safe_clean ~/Library/Caches/com.eudic.* "Eudict cache" safe_clean ~/Library/Caches/com.eudic.* "Eudict cache"
safe_clean ~/Library/Caches/com.bob-build.Bob "Bob Translation cache" safe_clean ~/Library/Caches/com.bob-build.Bob "Bob Translation cache"
} }
# Clean screenshot and screen recording tools # Clean screenshot and screen recording tools
clean_screenshot_tools() { clean_screenshot_tools() {
safe_clean ~/Library/Caches/com.cleanshot.* "CleanShot cache" safe_clean ~/Library/Caches/com.cleanshot.* "CleanShot cache"
safe_clean ~/Library/Caches/com.reincubate.camo "Camo cache" safe_clean ~/Library/Caches/com.reincubate.camo "Camo cache"
safe_clean ~/Library/Caches/com.xnipapp.xnip "Xnip cache" safe_clean ~/Library/Caches/com.xnipapp.xnip "Xnip cache"
} }
# Clean email clients # Clean email clients
clean_email_clients() { clean_email_clients() {
safe_clean ~/Library/Caches/com.readdle.smartemail-Mac "Spark cache" safe_clean ~/Library/Caches/com.readdle.smartemail-Mac "Spark cache"
safe_clean ~/Library/Caches/com.airmail.* "Airmail cache" safe_clean ~/Library/Caches/com.airmail.* "Airmail cache"
} }
# Clean task management apps # Clean task management apps
clean_task_apps() { clean_task_apps() {
safe_clean ~/Library/Caches/com.todoist.mac.Todoist "Todoist cache" safe_clean ~/Library/Caches/com.todoist.mac.Todoist "Todoist cache"
safe_clean ~/Library/Caches/com.any.do.* "Any.do cache" safe_clean ~/Library/Caches/com.any.do.* "Any.do cache"
} }
# Clean shell and terminal utilities # Clean shell and terminal utilities
clean_shell_utils() { clean_shell_utils() {
safe_clean ~/.zcompdump* "Zsh completion cache" safe_clean ~/.zcompdump* "Zsh completion cache"
@@ -208,13 +185,11 @@ clean_shell_utils() {
safe_clean ~/.viminfo.tmp "Vim temporary files" safe_clean ~/.viminfo.tmp "Vim temporary files"
safe_clean ~/.wget-hsts "wget HSTS cache" safe_clean ~/.wget-hsts "wget HSTS cache"
} }
# Clean input method and system utilities # Clean input method and system utilities
clean_system_utils() { clean_system_utils() {
safe_clean ~/Library/Caches/com.runjuu.Input-Source-Pro/* "Input Source Pro cache" safe_clean ~/Library/Caches/com.runjuu.Input-Source-Pro/* "Input Source Pro cache"
safe_clean ~/Library/Caches/macos-wakatime.WakaTime/* "WakaTime cache" safe_clean ~/Library/Caches/macos-wakatime.WakaTime/* "WakaTime cache"
} }
# Clean note-taking apps # Clean note-taking apps
clean_note_apps() { clean_note_apps() {
safe_clean ~/Library/Caches/notion.id/* "Notion cache" safe_clean ~/Library/Caches/notion.id/* "Notion cache"
@@ -224,13 +199,11 @@ clean_note_apps() {
safe_clean ~/Library/Caches/com.evernote.*/* "Evernote cache" safe_clean ~/Library/Caches/com.evernote.*/* "Evernote cache"
safe_clean ~/Library/Caches/com.yinxiang.*/* "Yinxiang Note cache" safe_clean ~/Library/Caches/com.yinxiang.*/* "Yinxiang Note cache"
} }
# Clean launcher and automation tools # Clean launcher and automation tools
clean_launcher_apps() { clean_launcher_apps() {
safe_clean ~/Library/Caches/com.runningwithcrayons.Alfred/* "Alfred cache" safe_clean ~/Library/Caches/com.runningwithcrayons.Alfred/* "Alfred cache"
safe_clean ~/Library/Caches/cx.c3.theunarchiver/* "The Unarchiver cache" safe_clean ~/Library/Caches/cx.c3.theunarchiver/* "The Unarchiver cache"
} }
# Clean remote desktop tools # Clean remote desktop tools
clean_remote_desktop() { clean_remote_desktop() {
safe_clean ~/Library/Caches/com.teamviewer.*/* "TeamViewer cache" safe_clean ~/Library/Caches/com.teamviewer.*/* "TeamViewer cache"
@@ -238,11 +211,9 @@ clean_remote_desktop() {
safe_clean ~/Library/Caches/com.todesk.*/* "ToDesk cache" safe_clean ~/Library/Caches/com.todesk.*/* "ToDesk cache"
safe_clean ~/Library/Caches/com.sunlogin.*/* "Sunlogin cache" safe_clean ~/Library/Caches/com.sunlogin.*/* "Sunlogin cache"
} }
# Main function to clean all user GUI applications # Main function to clean all user GUI applications
clean_user_gui_applications() { clean_user_gui_applications() {
stop_section_spinner stop_section_spinner
clean_xcode_tools clean_xcode_tools
clean_code_editors clean_code_editors
clean_communication_apps clean_communication_apps

View File

@@ -1,26 +1,20 @@
#!/bin/bash #!/bin/bash
# Application Data Cleanup Module # Application Data Cleanup Module
set -euo pipefail set -euo pipefail
# Clean .DS_Store (Finder metadata), home uses maxdepth 5, excludes slow paths, max 500 files
# Args: $1=target_dir, $2=label # Args: $1=target_dir, $2=label
# Clean .DS_Store (Finder metadata), home uses maxdepth 5, excludes slow paths, max 500 files
clean_ds_store_tree() { clean_ds_store_tree() {
local target="$1" local target="$1"
local label="$2" local label="$2"
[[ -d "$target" ]] || return 0 [[ -d "$target" ]] || return 0
local file_count=0 local file_count=0
local total_bytes=0 local total_bytes=0
local spinner_active="false" local spinner_active="false"
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " MOLE_SPINNER_PREFIX=" "
start_inline_spinner "Cleaning Finder metadata..." start_inline_spinner "Cleaning Finder metadata..."
spinner_active="true" spinner_active="true"
fi fi
# Build exclusion paths for find (skip common slow/large directories) # Build exclusion paths for find (skip common slow/large directories)
local -a exclude_paths=( local -a exclude_paths=(
-path "*/Library/Application Support/MobileSync" -prune -o -path "*/Library/Application Support/MobileSync" -prune -o
@@ -30,14 +24,12 @@ clean_ds_store_tree() {
-path "*/.git" -prune -o -path "*/.git" -prune -o
-path "*/Library/Caches" -prune -o -path "*/Library/Caches" -prune -o
) )
# Build find command to avoid unbound array expansion with set -u # Build find command to avoid unbound array expansion with set -u
local -a find_cmd=("command" "find" "$target") local -a find_cmd=("command" "find" "$target")
if [[ "$target" == "$HOME" ]]; then if [[ "$target" == "$HOME" ]]; then
find_cmd+=("-maxdepth" "5") find_cmd+=("-maxdepth" "5")
fi fi
find_cmd+=("${exclude_paths[@]}" "-type" "f" "-name" ".DS_Store" "-print0") find_cmd+=("${exclude_paths[@]}" "-type" "f" "-name" ".DS_Store" "-print0")
# Find .DS_Store files with exclusions and depth limit # Find .DS_Store files with exclusions and depth limit
while IFS= read -r -d '' ds_file; do while IFS= read -r -d '' ds_file; do
local size local size
@@ -47,16 +39,13 @@ clean_ds_store_tree() {
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
rm -f "$ds_file" 2> /dev/null || true rm -f "$ds_file" 2> /dev/null || true
fi fi
if [[ $file_count -ge $MOLE_MAX_DS_STORE_FILES ]]; then if [[ $file_count -ge $MOLE_MAX_DS_STORE_FILES ]]; then
break break
fi fi
done < <("${find_cmd[@]}" 2> /dev/null || true) done < <("${find_cmd[@]}" 2> /dev/null || true)
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner stop_section_spinner
fi fi
if [[ $file_count -gt 0 ]]; then if [[ $file_count -gt 0 ]]; then
local size_human local size_human
size_human=$(bytes_to_human "$total_bytes") size_human=$(bytes_to_human "$total_bytes")
@@ -65,7 +54,6 @@ clean_ds_store_tree() {
else else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label ${GREEN}($file_count files, $size_human)${NC}" echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label ${GREEN}($file_count files, $size_human)${NC}"
fi fi
local size_kb=$(((total_bytes + 1023) / 1024)) local size_kb=$(((total_bytes + 1023) / 1024))
((files_cleaned += file_count)) ((files_cleaned += file_count))
((total_size_cleaned += size_kb)) ((total_size_cleaned += size_kb))
@@ -73,24 +61,20 @@ clean_ds_store_tree() {
note_activity note_activity
fi fi
} }
# Clean data for uninstalled apps (caches/logs/states older than 60 days) # Clean data for uninstalled apps (caches/logs/states older than 60 days)
# Protects system apps, major vendors, scans /Applications+running processes # Protects system apps, major vendors, scans /Applications+running processes
# Max 100 items/pattern, 2s du timeout. Env: ORPHAN_AGE_THRESHOLD, DRY_RUN # Max 100 items/pattern, 2s du timeout. Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
# Scan system for installed application bundle IDs
# Usage: scan_installed_apps "output_file" # Usage: scan_installed_apps "output_file"
# Scan system for installed application bundle IDs
scan_installed_apps() { scan_installed_apps() {
local installed_bundles="$1" local installed_bundles="$1"
# Performance optimization: cache results for 5 minutes # Performance optimization: cache results for 5 minutes
local cache_file="$HOME/.cache/mole/installed_apps_cache" local cache_file="$HOME/.cache/mole/installed_apps_cache"
local cache_age_seconds=300 # 5 minutes local cache_age_seconds=300 # 5 minutes
if [[ -f "$cache_file" ]]; then if [[ -f "$cache_file" ]]; then
local cache_mtime=$(get_file_mtime "$cache_file") local cache_mtime=$(get_file_mtime "$cache_file")
local current_time=$(date +%s) local current_time=$(date +%s)
local age=$((current_time - cache_mtime)) local age=$((current_time - cache_mtime))
if [[ $age -lt $cache_age_seconds ]]; then if [[ $age -lt $cache_age_seconds ]]; then
debug_log "Using cached app list (age: ${age}s)" debug_log "Using cached app list (age: ${age}s)"
# Verify cache file is readable and not empty # Verify cache file is readable and not empty
@@ -105,19 +89,15 @@ scan_installed_apps() {
fi fi
fi fi
fi fi
debug_log "Scanning installed applications (cache expired or missing)" debug_log "Scanning installed applications (cache expired or missing)"
# Scan all Applications directories # Scan all Applications directories
local -a app_dirs=( local -a app_dirs=(
"/Applications" "/Applications"
"/System/Applications" "/System/Applications"
"$HOME/Applications" "$HOME/Applications"
) )
# Create a temp dir for parallel results to avoid write contention # Create a temp dir for parallel results to avoid write contention
local scan_tmp_dir=$(create_temp_dir) local scan_tmp_dir=$(create_temp_dir)
# Parallel scan for applications # Parallel scan for applications
local pids=() local pids=()
local dir_idx=0 local dir_idx=0
@@ -129,109 +109,86 @@ scan_installed_apps() {
while IFS= read -r app_path; do while IFS= read -r app_path; do
[[ -n "$app_path" ]] && app_paths+=("$app_path") [[ -n "$app_path" ]] && app_paths+=("$app_path")
done < <(find "$app_dir" -name '*.app' -maxdepth 3 -type d 2> /dev/null) done < <(find "$app_dir" -name '*.app' -maxdepth 3 -type d 2> /dev/null)
# Read bundle IDs with PlistBuddy # Read bundle IDs with PlistBuddy
local count=0 local count=0
for app_path in "${app_paths[@]:-}"; do for app_path in "${app_paths[@]:-}"; do
local plist_path="$app_path/Contents/Info.plist" local plist_path="$app_path/Contents/Info.plist"
[[ ! -f "$plist_path" ]] && continue [[ ! -f "$plist_path" ]] && continue
local bundle_id=$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" "$plist_path" 2> /dev/null || echo "") local bundle_id=$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" "$plist_path" 2> /dev/null || echo "")
if [[ -n "$bundle_id" ]]; then if [[ -n "$bundle_id" ]]; then
echo "$bundle_id" echo "$bundle_id"
((count++)) ((count++))
fi fi
done done
) > "$scan_tmp_dir/apps_${dir_idx}.txt" & ) > "$scan_tmp_dir/apps_${dir_idx}.txt" &
pids+=($!) pids+=($!)
((dir_idx++)) ((dir_idx++))
done done
# Get running applications and LaunchAgents in parallel # Get running applications and LaunchAgents in parallel
( (
local running_apps=$(run_with_timeout 5 osascript -e 'tell application "System Events" to get bundle identifier of every application process' 2> /dev/null || echo "") local running_apps=$(run_with_timeout 5 osascript -e 'tell application "System Events" to get bundle identifier of every application process' 2> /dev/null || echo "")
echo "$running_apps" | tr ',' '\n' | sed -e 's/^ *//;s/ *$//' -e '/^$/d' > "$scan_tmp_dir/running.txt" echo "$running_apps" | tr ',' '\n' | sed -e 's/^ *//;s/ *$//' -e '/^$/d' > "$scan_tmp_dir/running.txt"
) & ) &
pids+=($!) pids+=($!)
( (
run_with_timeout 5 find ~/Library/LaunchAgents /Library/LaunchAgents \ run_with_timeout 5 find ~/Library/LaunchAgents /Library/LaunchAgents \
-name "*.plist" -type f 2> /dev/null | -name "*.plist" -type f 2> /dev/null |
xargs -I {} basename {} .plist > "$scan_tmp_dir/agents.txt" 2> /dev/null || true xargs -I {} basename {} .plist > "$scan_tmp_dir/agents.txt" 2> /dev/null || true
) & ) &
pids+=($!) pids+=($!)
# Wait for all background scans to complete # Wait for all background scans to complete
debug_log "Waiting for ${#pids[@]} background processes: ${pids[*]}" debug_log "Waiting for ${#pids[@]} background processes: ${pids[*]}"
for pid in "${pids[@]}"; do for pid in "${pids[@]}"; do
wait "$pid" 2> /dev/null || true wait "$pid" 2> /dev/null || true
done done
debug_log "All background processes completed" debug_log "All background processes completed"
cat "$scan_tmp_dir"/*.txt >> "$installed_bundles" 2> /dev/null || true cat "$scan_tmp_dir"/*.txt >> "$installed_bundles" 2> /dev/null || true
safe_remove "$scan_tmp_dir" true safe_remove "$scan_tmp_dir" true
sort -u "$installed_bundles" -o "$installed_bundles" sort -u "$installed_bundles" -o "$installed_bundles"
# Cache the results # Cache the results
ensure_user_dir "$(dirname "$cache_file")" ensure_user_dir "$(dirname "$cache_file")"
cp "$installed_bundles" "$cache_file" 2> /dev/null || true cp "$installed_bundles" "$cache_file" 2> /dev/null || true
local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ') local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ')
debug_log "Scanned $app_count unique applications" debug_log "Scanned $app_count unique applications"
} }
# Check if bundle is orphaned
# Usage: is_bundle_orphaned "bundle_id" "directory_path" "installed_bundles_file" # Usage: is_bundle_orphaned "bundle_id" "directory_path" "installed_bundles_file"
# Check if bundle is orphaned
is_bundle_orphaned() { is_bundle_orphaned() {
local bundle_id="$1" local bundle_id="$1"
local directory_path="$2" local directory_path="$2"
local installed_bundles="$3" local installed_bundles="$3"
# Skip system-critical and protected apps # Skip system-critical and protected apps
if should_protect_data "$bundle_id"; then if should_protect_data "$bundle_id"; then
return 1 return 1
fi fi
# Check if app exists in our scan # Check if app exists in our scan
if grep -Fxq "$bundle_id" "$installed_bundles" 2> /dev/null; then if grep -Fxq "$bundle_id" "$installed_bundles" 2> /dev/null; then
return 1 return 1
fi fi
# Check against centralized protected patterns (app_protection.sh) # Check against centralized protected patterns (app_protection.sh)
if should_protect_data "$bundle_id"; then if should_protect_data "$bundle_id"; then
return 1 return 1
fi fi
# Extra check for specific system bundles not covered by patterns # Extra check for specific system bundles not covered by patterns
case "$bundle_id" in case "$bundle_id" in
loginwindow | dock | systempreferences | systemsettings | settings | controlcenter | finder | safari) loginwindow | dock | systempreferences | systemsettings | settings | controlcenter | finder | safari)
return 1 return 1
;; ;;
esac esac
# Check file age - only clean if 60+ days inactive # Check file age - only clean if 60+ days inactive
if [[ -e "$directory_path" ]]; then if [[ -e "$directory_path" ]]; then
local last_modified_epoch=$(get_file_mtime "$directory_path") local last_modified_epoch=$(get_file_mtime "$directory_path")
local current_epoch=$(date +%s) local current_epoch=$(date +%s)
local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400)) local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400))
if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-60} ]]; then if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-60} ]]; then
return 1 return 1
fi fi
fi fi
return 0 return 0
} }
# Clean data for uninstalled apps (caches/logs/states older than 60 days) # Clean data for uninstalled apps (caches/logs/states older than 60 days)
# Protects system apps, major vendors, scans /Applications+running processes
# Max 100 items/pattern, 2s du timeout. Env: ORPHAN_AGE_THRESHOLD, DRY_RUN # Max 100 items/pattern, 2s du timeout. Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
# Protects system apps, major vendors, scans /Applications+running processes
clean_orphaned_app_data() { clean_orphaned_app_data() {
# Quick permission check - if we can't access Library folders, skip # Quick permission check - if we can't access Library folders, skip
if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then
@@ -239,24 +196,19 @@ clean_orphaned_app_data() {
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Library folders" echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Library folders"
return 0 return 0
fi fi
# Build list of installed/active apps # Build list of installed/active apps
start_section_spinner "Scanning installed apps..." start_section_spinner "Scanning installed apps..."
local installed_bundles=$(create_temp_file) local installed_bundles=$(create_temp_file)
scan_installed_apps "$installed_bundles" scan_installed_apps "$installed_bundles"
stop_section_spinner stop_section_spinner
# Display scan results # Display scan results
local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ') local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ')
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Found $app_count active/installed apps" echo -e " ${GREEN}${ICON_SUCCESS}${NC} Found $app_count active/installed apps"
# Track statistics # Track statistics
local orphaned_count=0 local orphaned_count=0
local total_orphaned_kb=0 local total_orphaned_kb=0
# Unified orphaned resource scanner (caches, logs, states, webkit, HTTP, cookies) # Unified orphaned resource scanner (caches, logs, states, webkit, HTTP, cookies)
start_section_spinner "Scanning orphaned app resources..." start_section_spinner "Scanning orphaned app resources..."
# Define resource types to scan # Define resource types to scan
# CRITICAL: NEVER add LaunchAgents or LaunchDaemons (breaks login items/startup apps) # CRITICAL: NEVER add LaunchAgents or LaunchDaemons (breaks login items/startup apps)
local -a resource_types=( local -a resource_types=(
@@ -267,49 +219,39 @@ clean_orphaned_app_data() {
"$HOME/Library/HTTPStorages|HTTP|com.*:org.*:net.*:io.*" "$HOME/Library/HTTPStorages|HTTP|com.*:org.*:net.*:io.*"
"$HOME/Library/Cookies|Cookies|*.binarycookies" "$HOME/Library/Cookies|Cookies|*.binarycookies"
) )
orphaned_count=0 orphaned_count=0
for resource_type in "${resource_types[@]}"; do for resource_type in "${resource_types[@]}"; do
IFS='|' read -r base_path label patterns <<< "$resource_type" IFS='|' read -r base_path label patterns <<< "$resource_type"
# Check both existence and permission to avoid hanging # Check both existence and permission to avoid hanging
if [[ ! -d "$base_path" ]]; then if [[ ! -d "$base_path" ]]; then
continue continue
fi fi
# Quick permission check - if we can't ls the directory, skip it # Quick permission check - if we can't ls the directory, skip it
if ! ls "$base_path" > /dev/null 2>&1; then if ! ls "$base_path" > /dev/null 2>&1; then
continue continue
fi fi
# Build file pattern array # Build file pattern array
local -a file_patterns=() local -a file_patterns=()
IFS=':' read -ra pattern_arr <<< "$patterns" IFS=':' read -ra pattern_arr <<< "$patterns"
for pat in "${pattern_arr[@]}"; do for pat in "${pattern_arr[@]}"; do
file_patterns+=("$base_path/$pat") file_patterns+=("$base_path/$pat")
done done
# Scan and clean orphaned items # Scan and clean orphaned items
for item_path in "${file_patterns[@]}"; do for item_path in "${file_patterns[@]}"; do
# Use shell glob (no ls needed) # Use shell glob (no ls needed)
# Limit iterations to prevent hanging on directories with too many files # Limit iterations to prevent hanging on directories with too many files
local iteration_count=0 local iteration_count=0
for match in $item_path; do for match in $item_path; do
[[ -e "$match" ]] || continue [[ -e "$match" ]] || continue
# Safety: limit iterations to prevent infinite loops on massive directories # Safety: limit iterations to prevent infinite loops on massive directories
((iteration_count++)) ((iteration_count++))
if [[ $iteration_count -gt $MOLE_MAX_ORPHAN_ITERATIONS ]]; then if [[ $iteration_count -gt $MOLE_MAX_ORPHAN_ITERATIONS ]]; then
break break
fi fi
# Extract bundle ID from filename # Extract bundle ID from filename
local bundle_id=$(basename "$match") local bundle_id=$(basename "$match")
bundle_id="${bundle_id%.savedState}" bundle_id="${bundle_id%.savedState}"
bundle_id="${bundle_id%.binarycookies}" bundle_id="${bundle_id%.binarycookies}"
if is_bundle_orphaned "$bundle_id" "$match" "$installed_bundles"; then if is_bundle_orphaned "$bundle_id" "$match" "$installed_bundles"; then
# Use timeout to prevent du from hanging on network mounts or problematic paths # Use timeout to prevent du from hanging on network mounts or problematic paths
local size_kb local size_kb
@@ -324,14 +266,11 @@ clean_orphaned_app_data() {
done done
done done
done done
stop_section_spinner stop_section_spinner
if [[ $orphaned_count -gt 0 ]]; then if [[ $orphaned_count -gt 0 ]]; then
local orphaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}') local orphaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}')
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count items (~${orphaned_mb}MB)" echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count items (~${orphaned_mb}MB)"
note_activity note_activity
fi fi
rm -f "$installed_bundles" rm -f "$installed_bundles"
} }

View File

@@ -1,23 +1,19 @@
#!/bin/bash #!/bin/bash
# Clean Homebrew caches and remove orphaned dependencies # Clean Homebrew caches and remove orphaned dependencies
# Skips if run within 7 days, runs cleanup/autoremove in parallel with 120s timeout
# Env: MO_BREW_TIMEOUT, DRY_RUN # Env: MO_BREW_TIMEOUT, DRY_RUN
# Skips if run within 7 days, runs cleanup/autoremove in parallel with 120s timeout
clean_homebrew() { clean_homebrew() {
command -v brew > /dev/null 2>&1 || return 0 command -v brew > /dev/null 2>&1 || return 0
# Dry run mode - just indicate what would happen # Dry run mode - just indicate what would happen
if [[ "${DRY_RUN:-false}" == "true" ]]; then if [[ "${DRY_RUN:-false}" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Homebrew · would cleanup and autoremove" echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Homebrew · would cleanup and autoremove"
return 0 return 0
fi fi
# Smart caching: check if brew cleanup was run recently (within 7 days) # Smart caching: check if brew cleanup was run recently (within 7 days)
# Extended from 2 days to 7 days to reduce cleanup frequency # Extended from 2 days to 7 days to reduce cleanup frequency
local brew_cache_file="${HOME}/.cache/mole/brew_last_cleanup" local brew_cache_file="${HOME}/.cache/mole/brew_last_cleanup"
local cache_valid_days=7 local cache_valid_days=7
local should_skip=false local should_skip=false
if [[ -f "$brew_cache_file" ]]; then if [[ -f "$brew_cache_file" ]]; then
local last_cleanup local last_cleanup
last_cleanup=$(cat "$brew_cache_file" 2> /dev/null || echo "0") last_cleanup=$(cat "$brew_cache_file" 2> /dev/null || echo "0")
@@ -25,15 +21,12 @@ clean_homebrew() {
current_time=$(date +%s) current_time=$(date +%s)
local time_diff=$((current_time - last_cleanup)) local time_diff=$((current_time - last_cleanup))
local days_diff=$((time_diff / 86400)) local days_diff=$((time_diff / 86400))
if [[ $days_diff -lt $cache_valid_days ]]; then if [[ $days_diff -lt $cache_valid_days ]]; then
should_skip=true should_skip=true
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew · cleaned ${days_diff}d ago, skipped" echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew · cleaned ${days_diff}d ago, skipped"
fi fi
fi fi
[[ "$should_skip" == "true" ]] && return 0 [[ "$should_skip" == "true" ]] && return 0
# Quick pre-check: determine if cleanup is needed based on cache size (<50MB) # Quick pre-check: determine if cleanup is needed based on cache size (<50MB)
# Use timeout to prevent slow du on very large caches # Use timeout to prevent slow du on very large caches
# If timeout occurs, assume cache is large and run cleanup # If timeout occurs, assume cache is large and run cleanup
@@ -42,13 +35,11 @@ clean_homebrew() {
if [[ -d ~/Library/Caches/Homebrew ]]; then if [[ -d ~/Library/Caches/Homebrew ]]; then
brew_cache_size=$(run_with_timeout 3 du -sk ~/Library/Caches/Homebrew 2> /dev/null | awk '{print $1}') brew_cache_size=$(run_with_timeout 3 du -sk ~/Library/Caches/Homebrew 2> /dev/null | awk '{print $1}')
local du_exit=$? local du_exit=$?
# Skip cleanup (but still run autoremove) if cache is small # Skip cleanup (but still run autoremove) if cache is small
if [[ $du_exit -eq 0 && -n "$brew_cache_size" && "$brew_cache_size" -lt 51200 ]]; then if [[ $du_exit -eq 0 && -n "$brew_cache_size" && "$brew_cache_size" -lt 51200 ]]; then
skip_cleanup=true skip_cleanup=true
fi fi
fi fi
# Display appropriate spinner message # Display appropriate spinner message
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
if [[ "$skip_cleanup" == "true" ]]; then if [[ "$skip_cleanup" == "true" ]]; then
@@ -57,30 +48,23 @@ clean_homebrew() {
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew cleanup and autoremove..." MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew cleanup and autoremove..."
fi fi
fi fi
local timeout_seconds=${MO_BREW_TIMEOUT:-120} local timeout_seconds=${MO_BREW_TIMEOUT:-120}
# Run brew cleanup and/or autoremove based on cache size # Run brew cleanup and/or autoremove based on cache size
local brew_tmp_file autoremove_tmp_file local brew_tmp_file autoremove_tmp_file
local brew_pid autoremove_pid local brew_pid autoremove_pid
if [[ "$skip_cleanup" == "false" ]]; then if [[ "$skip_cleanup" == "false" ]]; then
brew_tmp_file=$(create_temp_file) brew_tmp_file=$(create_temp_file)
(brew cleanup > "$brew_tmp_file" 2>&1) & (brew cleanup > "$brew_tmp_file" 2>&1) &
brew_pid=$! brew_pid=$!
fi fi
autoremove_tmp_file=$(create_temp_file) autoremove_tmp_file=$(create_temp_file)
(brew autoremove > "$autoremove_tmp_file" 2>&1) & (brew autoremove > "$autoremove_tmp_file" 2>&1) &
autoremove_pid=$! autoremove_pid=$!
local elapsed=0 local elapsed=0
local brew_done=false local brew_done=false
local autoremove_done=false local autoremove_done=false
# Mark cleanup as done if it was skipped # Mark cleanup as done if it was skipped
[[ "$skip_cleanup" == "true" ]] && brew_done=true [[ "$skip_cleanup" == "true" ]] && brew_done=true
# Wait for both to complete or timeout # Wait for both to complete or timeout
while [[ "$brew_done" == "false" ]] || [[ "$autoremove_done" == "false" ]]; do while [[ "$brew_done" == "false" ]] || [[ "$autoremove_done" == "false" ]]; do
if [[ $elapsed -ge $timeout_seconds ]]; then if [[ $elapsed -ge $timeout_seconds ]]; then
@@ -88,14 +72,11 @@ clean_homebrew() {
kill -TERM $autoremove_pid 2> /dev/null || true kill -TERM $autoremove_pid 2> /dev/null || true
break break
fi fi
[[ -n "$brew_pid" ]] && { kill -0 $brew_pid 2> /dev/null || brew_done=true; } [[ -n "$brew_pid" ]] && { kill -0 $brew_pid 2> /dev/null || brew_done=true; }
kill -0 $autoremove_pid 2> /dev/null || autoremove_done=true kill -0 $autoremove_pid 2> /dev/null || autoremove_done=true
sleep 1 sleep 1
((elapsed++)) ((elapsed++))
done done
# Wait for processes to finish # Wait for processes to finish
local brew_success=false local brew_success=false
if [[ "$skip_cleanup" == "false" && -n "$brew_pid" ]]; then if [[ "$skip_cleanup" == "false" && -n "$brew_pid" ]]; then
@@ -103,14 +84,11 @@ clean_homebrew() {
brew_success=true brew_success=true
fi fi
fi fi
local autoremove_success=false local autoremove_success=false
if wait $autoremove_pid 2> /dev/null; then if wait $autoremove_pid 2> /dev/null; then
autoremove_success=true autoremove_success=true
fi fi
if [[ -t 1 ]]; then stop_inline_spinner; fi if [[ -t 1 ]]; then stop_inline_spinner; fi
# Process cleanup output and extract metrics # Process cleanup output and extract metrics
if [[ "$skip_cleanup" == "true" ]]; then if [[ "$skip_cleanup" == "true" ]]; then
# Cleanup was skipped due to small cache size # Cleanup was skipped due to small cache size
@@ -122,7 +100,6 @@ clean_homebrew() {
local removed_count freed_space local removed_count freed_space
removed_count=$(printf '%s\n' "$brew_output" | grep -c "Removing:" 2> /dev/null || true) removed_count=$(printf '%s\n' "$brew_output" | grep -c "Removing:" 2> /dev/null || true)
freed_space=$(printf '%s\n' "$brew_output" | grep -o "[0-9.]*[KMGT]B freed" 2> /dev/null | tail -1 || true) freed_space=$(printf '%s\n' "$brew_output" | grep -o "[0-9.]*[KMGT]B freed" 2> /dev/null | tail -1 || true)
if [[ $removed_count -gt 0 ]] || [[ -n "$freed_space" ]]; then if [[ $removed_count -gt 0 ]] || [[ -n "$freed_space" ]]; then
if [[ -n "$freed_space" ]]; then if [[ -n "$freed_space" ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup ${GREEN}($freed_space)${NC}" echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup ${GREEN}($freed_space)${NC}"
@@ -133,21 +110,18 @@ clean_homebrew() {
elif [[ $elapsed -ge $timeout_seconds ]]; then elif [[ $elapsed -ge $timeout_seconds ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Homebrew cleanup timed out · run ${GRAY}brew cleanup${NC} manually" echo -e " ${YELLOW}${ICON_WARNING}${NC} Homebrew cleanup timed out · run ${GRAY}brew cleanup${NC} manually"
fi fi
# Process autoremove output - only show if packages were removed # Process autoremove output - only show if packages were removed
if [[ "$autoremove_success" == "true" && -f "$autoremove_tmp_file" ]]; then if [[ "$autoremove_success" == "true" && -f "$autoremove_tmp_file" ]]; then
local autoremove_output local autoremove_output
autoremove_output=$(cat "$autoremove_tmp_file" 2> /dev/null || echo "") autoremove_output=$(cat "$autoremove_tmp_file" 2> /dev/null || echo "")
local removed_packages local removed_packages
removed_packages=$(printf '%s\n' "$autoremove_output" | grep -c "^Uninstalling" 2> /dev/null || true) removed_packages=$(printf '%s\n' "$autoremove_output" | grep -c "^Uninstalling" 2> /dev/null || true)
if [[ $removed_packages -gt 0 ]]; then if [[ $removed_packages -gt 0 ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed orphaned dependencies (${removed_packages} packages)" echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed orphaned dependencies (${removed_packages} packages)"
fi fi
elif [[ $elapsed -ge $timeout_seconds ]]; then elif [[ $elapsed -ge $timeout_seconds ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Autoremove timed out · run ${GRAY}brew autoremove${NC} manually" echo -e " ${YELLOW}${ICON_WARNING}${NC} Autoremove timed out · run ${GRAY}brew autoremove${NC} manually"
fi fi
# Update cache timestamp on successful completion or when cleanup was intelligently skipped # Update cache timestamp on successful completion or when cleanup was intelligently skipped
# This prevents repeated cache size checks within the 7-day window # This prevents repeated cache size checks within the 7-day window
if [[ "$skip_cleanup" == "true" ]] || [[ "$brew_success" == "true" ]] || [[ "$autoremove_success" == "true" ]]; then if [[ "$skip_cleanup" == "true" ]] || [[ "$brew_success" == "true" ]] || [[ "$autoremove_success" == "true" ]]; then

View File

@@ -1,19 +1,14 @@
#!/bin/bash #!/bin/bash
# Cache Cleanup Module # Cache Cleanup Module
set -euo pipefail set -euo pipefail
# Trigger all TCC permission dialogs upfront to avoid random interruptions
# Only runs once (uses ~/.cache/mole/permissions_granted flag) # Only runs once (uses ~/.cache/mole/permissions_granted flag)
# Trigger all TCC permission dialogs upfront to avoid random interruptions
check_tcc_permissions() { check_tcc_permissions() {
# Only check in interactive mode # Only check in interactive mode
[[ -t 1 ]] || return 0 [[ -t 1 ]] || return 0
local permission_flag="$HOME/.cache/mole/permissions_granted" local permission_flag="$HOME/.cache/mole/permissions_granted"
# Skip if permissions were already granted # Skip if permissions were already granted
[[ -f "$permission_flag" ]] && return 0 [[ -f "$permission_flag" ]] && return 0
# Key protected directories that require TCC approval # Key protected directories that require TCC approval
local -a tcc_dirs=( local -a tcc_dirs=(
"$HOME/Library/Caches" "$HOME/Library/Caches"
@@ -22,14 +17,12 @@ check_tcc_permissions() {
"$HOME/Library/Containers" "$HOME/Library/Containers"
"$HOME/.cache" "$HOME/.cache"
) )
# Quick permission test - if first directory is accessible, likely others are too # Quick permission test - if first directory is accessible, likely others are too
# Use simple ls test instead of find to avoid triggering permission dialogs prematurely # Use simple ls test instead of find to avoid triggering permission dialogs prematurely
local needs_permission_check=false local needs_permission_check=false
if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then
needs_permission_check=true needs_permission_check=true
fi fi
if [[ "$needs_permission_check" == "true" ]]; then if [[ "$needs_permission_check" == "true" ]]; then
echo "" echo ""
echo -e "${BLUE}First-time setup${NC}" echo -e "${BLUE}First-time setup${NC}"
@@ -38,44 +31,35 @@ check_tcc_permissions() {
echo "" echo ""
echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to continue: " echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to continue: "
read -r read -r
MOLE_SPINNER_PREFIX="" start_inline_spinner "Requesting permissions..." MOLE_SPINNER_PREFIX="" start_inline_spinner "Requesting permissions..."
# Trigger all TCC prompts upfront by accessing each directory # Trigger all TCC prompts upfront by accessing each directory
# Using find -maxdepth 1 ensures we touch the directory without deep scanning # Using find -maxdepth 1 ensures we touch the directory without deep scanning
for dir in "${tcc_dirs[@]}"; do for dir in "${tcc_dirs[@]}"; do
[[ -d "$dir" ]] && command find "$dir" -maxdepth 1 -type d > /dev/null 2>&1 [[ -d "$dir" ]] && command find "$dir" -maxdepth 1 -type d > /dev/null 2>&1
done done
stop_inline_spinner stop_inline_spinner
echo "" echo ""
fi fi
# Mark permissions as granted (won't prompt again) # Mark permissions as granted (won't prompt again)
ensure_user_file "$permission_flag" ensure_user_file "$permission_flag"
return 0
} }
# Clean browser Service Worker cache, protecting web editing tools (capcut, photopea, pixlr)
# Args: $1=browser_name, $2=cache_path # Args: $1=browser_name, $2=cache_path
# Clean browser Service Worker cache, protecting web editing tools (capcut, photopea, pixlr)
clean_service_worker_cache() { clean_service_worker_cache() {
local browser_name="$1" local browser_name="$1"
local cache_path="$2" local cache_path="$2"
[[ ! -d "$cache_path" ]] && return 0 [[ ! -d "$cache_path" ]] && return 0
local cleaned_size=0 local cleaned_size=0
local protected_count=0 local protected_count=0
# Find all cache directories and calculate sizes with timeout protection # Find all cache directories and calculate sizes with timeout protection
while IFS= read -r cache_dir; do while IFS= read -r cache_dir; do
[[ ! -d "$cache_dir" ]] && continue [[ ! -d "$cache_dir" ]] && continue
# Extract domain from path using regex # Extract domain from path using regex
# Pattern matches: letters/numbers, hyphens, then dot, then TLD # Pattern matches: letters/numbers, hyphens, then dot, then TLD
# Example: "abc123_https_example.com_0" → "example.com" # Example: "abc123_https_example.com_0" → "example.com"
local domain=$(basename "$cache_dir" | grep -oE '[a-zA-Z0-9][-a-zA-Z0-9]*\.[a-zA-Z]{2,}' | head -1 || echo "") local domain=$(basename "$cache_dir" | grep -oE '[a-zA-Z0-9][-a-zA-Z0-9]*\.[a-zA-Z]{2,}' | head -1 || echo "")
local size=$(run_with_timeout 5 get_path_size_kb "$cache_dir") local size=$(run_with_timeout 5 get_path_size_kb "$cache_dir")
# Check if domain is protected # Check if domain is protected
local is_protected=false local is_protected=false
for protected_domain in "${PROTECTED_SW_DOMAINS[@]}"; do for protected_domain in "${PROTECTED_SW_DOMAINS[@]}"; do
@@ -85,7 +69,6 @@ clean_service_worker_cache() {
break break
fi fi
done done
# Clean if not protected # Clean if not protected
if [[ "$is_protected" == "false" ]]; then if [[ "$is_protected" == "false" ]]; then
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
@@ -94,7 +77,6 @@ clean_service_worker_cache() {
cleaned_size=$((cleaned_size + size)) cleaned_size=$((cleaned_size + size))
fi fi
done < <(run_with_timeout 10 sh -c "find '$cache_path' -type d -depth 2 2> /dev/null || true") done < <(run_with_timeout 10 sh -c "find '$cache_path' -type d -depth 2 2> /dev/null || true")
if [[ $cleaned_size -gt 0 ]]; then if [[ $cleaned_size -gt 0 ]]; then
# Temporarily stop spinner for clean output # Temporarily stop spinner for clean output
local spinner_was_running=false local spinner_was_running=false
@@ -102,7 +84,6 @@ clean_service_worker_cache() {
stop_inline_spinner stop_inline_spinner
spinner_was_running=true spinner_was_running=true
fi fi
local cleaned_mb=$((cleaned_size / 1024)) local cleaned_mb=$((cleaned_size / 1024))
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
if [[ $protected_count -gt 0 ]]; then if [[ $protected_count -gt 0 ]]; then
@@ -114,19 +95,16 @@ clean_service_worker_cache() {
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $browser_name Service Worker (would clean ${cleaned_mb}MB, ${protected_count} protected)" echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $browser_name Service Worker (would clean ${cleaned_mb}MB, ${protected_count} protected)"
fi fi
note_activity note_activity
# Restart spinner if it was running # Restart spinner if it was running
if [[ "$spinner_was_running" == "true" ]]; then if [[ "$spinner_was_running" == "true" ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning browser Service Worker caches..." MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning browser Service Worker caches..."
fi fi
fi fi
} }
# Clean Next.js (.next/cache) and Python (__pycache__) build caches
# Uses maxdepth 3, excludes Library/.Trash/node_modules, 10s timeout per scan # Uses maxdepth 3, excludes Library/.Trash/node_modules, 10s timeout per scan
# Clean Next.js (.next/cache) and Python (__pycache__) build caches
clean_project_caches() { clean_project_caches() {
stop_inline_spinner 2> /dev/null || true stop_inline_spinner 2> /dev/null || true
# Quick check: skip if user likely doesn't have development projects # Quick check: skip if user likely doesn't have development projects
local has_dev_projects=false local has_dev_projects=false
local -a common_dev_dirs=( local -a common_dev_dirs=(
@@ -149,14 +127,12 @@ clean_project_caches() {
"$HOME/dotnet" "$HOME/dotnet"
"$HOME/node" "$HOME/node"
) )
for dir in "${common_dev_dirs[@]}"; do for dir in "${common_dev_dirs[@]}"; do
if [[ -d "$dir" ]]; then if [[ -d "$dir" ]]; then
has_dev_projects=true has_dev_projects=true
break break
fi fi
done done
# If no common dev directories found, perform feature-based detection # If no common dev directories found, perform feature-based detection
# Check for project markers in $HOME (node_modules, .git, target, etc.) # Check for project markers in $HOME (node_modules, .git, target, etc.)
if [[ "$has_dev_projects" == "false" ]]; then if [[ "$has_dev_projects" == "false" ]]; then
@@ -170,14 +146,12 @@ clean_project_caches() {
"pom.xml" "pom.xml"
"build.gradle" "build.gradle"
) )
local spinner_active=false local spinner_active=false
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " MOLE_SPINNER_PREFIX=" "
start_inline_spinner "Detecting dev projects..." start_inline_spinner "Detecting dev projects..."
spinner_active=true spinner_active=true
fi fi
for marker in "${project_markers[@]}"; do for marker in "${project_markers[@]}"; do
# Quick check with maxdepth 2 and 3s timeout to avoid slow scans # Quick check with maxdepth 2 and 3s timeout to avoid slow scans
if run_with_timeout 3 sh -c "find '$HOME' -maxdepth 2 -name '$marker' -not -path '*/Library/*' -not -path '*/.Trash/*' 2>/dev/null | head -1" | grep -q .; then if run_with_timeout 3 sh -c "find '$HOME' -maxdepth 2 -name '$marker' -not -path '*/Library/*' -not -path '*/.Trash/*' 2>/dev/null | head -1" | grep -q .; then
@@ -185,26 +159,21 @@ clean_project_caches() {
break break
fi fi
done done
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
stop_inline_spinner 2> /dev/null || true stop_inline_spinner 2> /dev/null || true
fi fi
# If still no dev projects found, skip scanning # If still no dev projects found, skip scanning
[[ "$has_dev_projects" == "false" ]] && return 0 [[ "$has_dev_projects" == "false" ]] && return 0
fi fi
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " MOLE_SPINNER_PREFIX=" "
start_inline_spinner "Searching project caches..." start_inline_spinner "Searching project caches..."
fi fi
local nextjs_tmp_file local nextjs_tmp_file
nextjs_tmp_file=$(create_temp_file) nextjs_tmp_file=$(create_temp_file)
local pycache_tmp_file local pycache_tmp_file
pycache_tmp_file=$(create_temp_file) pycache_tmp_file=$(create_temp_file)
local find_timeout=10 local find_timeout=10
# 1. Start Next.js search # 1. Start Next.js search
( (
command find "$HOME" -P -mount -type d -name ".next" -maxdepth 3 \ command find "$HOME" -P -mount -type d -name ".next" -maxdepth 3 \
@@ -215,7 +184,6 @@ clean_project_caches() {
2> /dev/null || true 2> /dev/null || true
) > "$nextjs_tmp_file" 2>&1 & ) > "$nextjs_tmp_file" 2>&1 &
local next_pid=$! local next_pid=$!
# 2. Start Python search # 2. Start Python search
( (
command find "$HOME" -P -mount -type d -name "__pycache__" -maxdepth 3 \ command find "$HOME" -P -mount -type d -name "__pycache__" -maxdepth 3 \
@@ -226,7 +194,6 @@ clean_project_caches() {
2> /dev/null || true 2> /dev/null || true
) > "$pycache_tmp_file" 2>&1 & ) > "$pycache_tmp_file" 2>&1 &
local py_pid=$! local py_pid=$!
# 3. Wait for both with timeout (using smaller intervals for better responsiveness) # 3. Wait for both with timeout (using smaller intervals for better responsiveness)
local elapsed=0 local elapsed=0
local check_interval=0.2 # Check every 200ms instead of 1s for smoother experience local check_interval=0.2 # Check every 200ms instead of 1s for smoother experience
@@ -237,13 +204,11 @@ clean_project_caches() {
sleep $check_interval sleep $check_interval
elapsed=$(echo "$elapsed + $check_interval" | awk '{print $1 + $2}') elapsed=$(echo "$elapsed + $check_interval" | awk '{print $1 + $2}')
done done
# 4. Clean up any stuck processes # 4. Clean up any stuck processes
for pid in $next_pid $py_pid; do for pid in $next_pid $py_pid; do
if kill -0 "$pid" 2> /dev/null; then if kill -0 "$pid" 2> /dev/null; then
# Send TERM signal first # Send TERM signal first
kill -TERM "$pid" 2> /dev/null || true kill -TERM "$pid" 2> /dev/null || true
# Wait up to 2 seconds for graceful termination # Wait up to 2 seconds for graceful termination
local grace_period=0 local grace_period=0
while [[ $grace_period -lt 20 ]]; do while [[ $grace_period -lt 20 ]]; do
@@ -253,28 +218,23 @@ clean_project_caches() {
sleep 0.1 sleep 0.1
((grace_period++)) ((grace_period++))
done done
# Force kill if still running # Force kill if still running
if kill -0 "$pid" 2> /dev/null; then if kill -0 "$pid" 2> /dev/null; then
kill -KILL "$pid" 2> /dev/null || true kill -KILL "$pid" 2> /dev/null || true
fi fi
# Final wait (should be instant now) # Final wait (should be instant now)
wait "$pid" 2> /dev/null || true wait "$pid" 2> /dev/null || true
else else
wait "$pid" 2> /dev/null || true wait "$pid" 2> /dev/null || true
fi fi
done done
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
stop_inline_spinner stop_inline_spinner
fi fi
# 5. Process Next.js results # 5. Process Next.js results
while IFS= read -r next_dir; do while IFS= read -r next_dir; do
[[ -d "$next_dir/cache" ]] && safe_clean "$next_dir/cache"/* "Next.js build cache" || true [[ -d "$next_dir/cache" ]] && safe_clean "$next_dir/cache"/* "Next.js build cache" || true
done < "$nextjs_tmp_file" done < "$nextjs_tmp_file"
# 6. Process Python results # 6. Process Python results
while IFS= read -r pycache; do while IFS= read -r pycache; do
[[ -d "$pycache" ]] && safe_clean "$pycache"/* "Python bytecode cache" || true [[ -d "$pycache" ]] && safe_clean "$pycache"/* "Python bytecode cache" || true

View File

@@ -1,13 +1,11 @@
#!/bin/bash #!/bin/bash
# Developer Tools Cleanup Module # Developer Tools Cleanup Module
set -euo pipefail set -euo pipefail
# Helper function to clean tool caches using their built-in commands # Helper function to clean tool caches using their built-in commands
# Args: $1 - description, $@ - command to execute # Args: $1 - description, $@ - command to execute
# Env: DRY_RUN # Env: DRY_RUN
# Note: Try to estimate potential savings (many tool caches don't have a direct path,
# so we just report the action if we can't easily find a path) # so we just report the action if we can't easily find a path)
# Note: Try to estimate potential savings (many tool caches don't have a direct path,
clean_tool_cache() { clean_tool_cache() {
local description="$1" local description="$1"
shift shift
@@ -18,30 +16,27 @@ clean_tool_cache() {
else else
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $description · would clean" echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $description · would clean"
fi fi
return 0
} }
# Clean npm cache (command + directories) # Clean npm cache (command + directories)
# npm cache clean clears official npm cache, safe_clean handles alternative package managers
# Env: DRY_RUN # Env: DRY_RUN
# npm cache clean clears official npm cache, safe_clean handles alternative package managers
clean_dev_npm() { clean_dev_npm() {
if command -v npm > /dev/null 2>&1; then if command -v npm > /dev/null 2>&1; then
# clean_tool_cache now calculates size before cleanup for better statistics # clean_tool_cache now calculates size before cleanup for better statistics
clean_tool_cache "npm cache" npm cache clean --force clean_tool_cache "npm cache" npm cache clean --force
note_activity note_activity
fi fi
# Clean pnpm store cache # Clean pnpm store cache
local pnpm_default_store=~/Library/pnpm/store local pnpm_default_store=~/Library/pnpm/store
if command -v pnpm > /dev/null 2>&1; then if command -v pnpm > /dev/null 2>&1; then
# Use pnpm's built-in prune command # Use pnpm's built-in prune command
clean_tool_cache "pnpm cache" pnpm store prune clean_tool_cache "pnpm cache" pnpm store prune
# Get the actual store path to check if default is orphaned # Get the actual store path to check if default is orphaned
local pnpm_store_path local pnpm_store_path
start_section_spinner "Checking store path..." start_section_spinner "Checking store path..."
pnpm_store_path=$(run_with_timeout 2 pnpm store path 2> /dev/null) || pnpm_store_path="" pnpm_store_path=$(run_with_timeout 2 pnpm store path 2> /dev/null) || pnpm_store_path=""
stop_section_spinner stop_section_spinner
# If store path is different from default, clean the orphaned default # If store path is different from default, clean the orphaned default
if [[ -n "$pnpm_store_path" && "$pnpm_store_path" != "$pnpm_default_store" ]]; then if [[ -n "$pnpm_store_path" && "$pnpm_store_path" != "$pnpm_default_store" ]]; then
safe_clean "$pnpm_default_store"/* "Orphaned pnpm store" safe_clean "$pnpm_default_store"/* "Orphaned pnpm store"
@@ -51,24 +46,21 @@ clean_dev_npm() {
safe_clean "$pnpm_default_store"/* "pnpm store" safe_clean "$pnpm_default_store"/* "pnpm store"
fi fi
note_activity note_activity
# Clean alternative package manager caches # Clean alternative package manager caches
safe_clean ~/.tnpm/_cacache/* "tnpm cache directory" safe_clean ~/.tnpm/_cacache/* "tnpm cache directory"
safe_clean ~/.tnpm/_logs/* "tnpm logs" safe_clean ~/.tnpm/_logs/* "tnpm logs"
safe_clean ~/.yarn/cache/* "Yarn cache" safe_clean ~/.yarn/cache/* "Yarn cache"
safe_clean ~/.bun/install/cache/* "Bun cache" safe_clean ~/.bun/install/cache/* "Bun cache"
} }
# Clean Python/pip cache (command + directories) # Clean Python/pip cache (command + directories)
# pip cache purge clears official pip cache, safe_clean handles other Python tools
# Env: DRY_RUN # Env: DRY_RUN
# pip cache purge clears official pip cache, safe_clean handles other Python tools
clean_dev_python() { clean_dev_python() {
if command -v pip3 > /dev/null 2>&1; then if command -v pip3 > /dev/null 2>&1; then
# clean_tool_cache now calculates size before cleanup for better statistics # clean_tool_cache now calculates size before cleanup for better statistics
clean_tool_cache "pip cache" bash -c 'pip3 cache purge >/dev/null 2>&1 || true' clean_tool_cache "pip cache" bash -c 'pip3 cache purge >/dev/null 2>&1 || true'
note_activity note_activity
fi fi
# Clean Python ecosystem caches # Clean Python ecosystem caches
safe_clean ~/.pyenv/cache/* "pyenv cache" safe_clean ~/.pyenv/cache/* "pyenv cache"
safe_clean ~/.cache/poetry/* "Poetry cache" safe_clean ~/.cache/poetry/* "Poetry cache"
@@ -84,10 +76,9 @@ clean_dev_python() {
safe_clean ~/anaconda3/pkgs/* "Anaconda packages cache" safe_clean ~/anaconda3/pkgs/* "Anaconda packages cache"
safe_clean ~/.cache/wandb/* "Weights & Biases cache" safe_clean ~/.cache/wandb/* "Weights & Biases cache"
} }
# Clean Go cache (command + directories) # Clean Go cache (command + directories)
# go clean handles build and module caches comprehensively
# Env: DRY_RUN # Env: DRY_RUN
# go clean handles build and module caches comprehensively
clean_dev_go() { clean_dev_go() {
if command -v go > /dev/null 2>&1; then if command -v go > /dev/null 2>&1; then
# clean_tool_cache now calculates size before cleanup for better statistics # clean_tool_cache now calculates size before cleanup for better statistics
@@ -95,16 +86,14 @@ clean_dev_go() {
note_activity note_activity
fi fi
} }
# Clean Rust/cargo cache directories # Clean Rust/cargo cache directories
clean_dev_rust() { clean_dev_rust() {
safe_clean ~/.cargo/registry/cache/* "Rust cargo cache" safe_clean ~/.cargo/registry/cache/* "Rust cargo cache"
safe_clean ~/.cargo/git/* "Cargo git cache" safe_clean ~/.cargo/git/* "Cargo git cache"
safe_clean ~/.rustup/downloads/* "Rust downloads cache" safe_clean ~/.rustup/downloads/* "Rust downloads cache"
} }
# Clean Docker cache (command + directories)
# Env: DRY_RUN # Env: DRY_RUN
# Clean Docker cache (command + directories)
clean_dev_docker() { clean_dev_docker() {
if command -v docker > /dev/null 2>&1; then if command -v docker > /dev/null 2>&1; then
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
@@ -115,7 +104,6 @@ clean_dev_docker() {
docker_running=true docker_running=true
fi fi
stop_section_spinner stop_section_spinner
if [[ "$docker_running" == "true" ]]; then if [[ "$docker_running" == "true" ]]; then
clean_tool_cache "Docker build cache" docker builder prune -af clean_tool_cache "Docker build cache" docker builder prune -af
else else
@@ -131,7 +119,6 @@ clean_dev_docker() {
retry_success=true retry_success=true
fi fi
stop_section_spinner stop_section_spinner
if [[ "$retry_success" == "true" ]]; then if [[ "$retry_success" == "true" ]]; then
clean_tool_cache "Docker build cache" docker builder prune -af clean_tool_cache "Docker build cache" docker builder prune -af
else else
@@ -150,12 +137,10 @@ clean_dev_docker() {
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker build cache · would clean" echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker build cache · would clean"
fi fi
fi fi
safe_clean ~/.docker/buildx/cache/* "Docker BuildX cache" safe_clean ~/.docker/buildx/cache/* "Docker BuildX cache"
} }
# Clean Nix package manager
# Env: DRY_RUN # Env: DRY_RUN
# Clean Nix package manager
clean_dev_nix() { clean_dev_nix() {
if command -v nix-collect-garbage > /dev/null 2>&1; then if command -v nix-collect-garbage > /dev/null 2>&1; then
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
@@ -166,7 +151,6 @@ clean_dev_nix() {
note_activity note_activity
fi fi
} }
# Clean cloud CLI tools cache # Clean cloud CLI tools cache
clean_dev_cloud() { clean_dev_cloud() {
safe_clean ~/.kube/cache/* "Kubernetes cache" safe_clean ~/.kube/cache/* "Kubernetes cache"
@@ -175,7 +159,6 @@ clean_dev_cloud() {
safe_clean ~/.config/gcloud/logs/* "Google Cloud logs" safe_clean ~/.config/gcloud/logs/* "Google Cloud logs"
safe_clean ~/.azure/logs/* "Azure CLI logs" safe_clean ~/.azure/logs/* "Azure CLI logs"
} }
# Clean frontend build tool caches # Clean frontend build tool caches
clean_dev_frontend() { clean_dev_frontend() {
safe_clean ~/.cache/typescript/* "TypeScript cache" safe_clean ~/.cache/typescript/* "TypeScript cache"
@@ -190,23 +173,20 @@ clean_dev_frontend() {
safe_clean ~/.cache/eslint/* "ESLint cache" safe_clean ~/.cache/eslint/* "ESLint cache"
safe_clean ~/.cache/prettier/* "Prettier cache" safe_clean ~/.cache/prettier/* "Prettier cache"
} }
# Clean mobile development tools # Clean mobile development tools
# iOS simulator cleanup can free significant space (70GB+ in some cases) # iOS simulator cleanup can free significant space (70GB+ in some cases)
# DeviceSupport files accumulate for each iOS version connected
# Simulator runtime caches can grow large over time # Simulator runtime caches can grow large over time
# DeviceSupport files accumulate for each iOS version connected
clean_dev_mobile() { clean_dev_mobile() {
# Clean Xcode unavailable simulators # Clean Xcode unavailable simulators
# Removes old and unused local iOS simulator data from old unused runtimes # Removes old and unused local iOS simulator data from old unused runtimes
# Can free up significant space (70GB+ in some cases) # Can free up significant space (70GB+ in some cases)
if command -v xcrun > /dev/null 2>&1; then if command -v xcrun > /dev/null 2>&1; then
debug_log "Checking for unavailable Xcode simulators" debug_log "Checking for unavailable Xcode simulators"
if [[ "$DRY_RUN" == "true" ]]; then if [[ "$DRY_RUN" == "true" ]]; then
clean_tool_cache "Xcode unavailable simulators" xcrun simctl delete unavailable clean_tool_cache "Xcode unavailable simulators" xcrun simctl delete unavailable
else else
start_section_spinner "Checking unavailable simulators..." start_section_spinner "Checking unavailable simulators..."
# Run command manually to control UI output order # Run command manually to control UI output order
if xcrun simctl delete unavailable > /dev/null 2>&1; then if xcrun simctl delete unavailable > /dev/null 2>&1; then
stop_section_spinner stop_section_spinner
@@ -218,7 +198,6 @@ clean_dev_mobile() {
fi fi
note_activity note_activity
fi fi
# Clean iOS DeviceSupport - more comprehensive cleanup # Clean iOS DeviceSupport - more comprehensive cleanup
# DeviceSupport directories store debug symbols for each iOS version # DeviceSupport directories store debug symbols for each iOS version
# Safe to clean caches and logs, but preserve device support files themselves # Safe to clean caches and logs, but preserve device support files themselves
@@ -226,11 +205,9 @@ clean_dev_mobile() {
safe_clean ~/Library/Developer/Xcode/iOS\ DeviceSupport/*.log "iOS device support logs" safe_clean ~/Library/Developer/Xcode/iOS\ DeviceSupport/*.log "iOS device support logs"
safe_clean ~/Library/Developer/Xcode/watchOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "watchOS device symbol cache" safe_clean ~/Library/Developer/Xcode/watchOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "watchOS device symbol cache"
safe_clean ~/Library/Developer/Xcode/tvOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "tvOS device symbol cache" safe_clean ~/Library/Developer/Xcode/tvOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "tvOS device symbol cache"
# Clean simulator runtime caches # Clean simulator runtime caches
# RuntimeRoot caches can accumulate system library caches # RuntimeRoot caches can accumulate system library caches
safe_clean ~/Library/Developer/CoreSimulator/Profiles/Runtimes/*/Contents/Resources/RuntimeRoot/System/Library/Caches/* "Simulator runtime cache" safe_clean ~/Library/Developer/CoreSimulator/Profiles/Runtimes/*/Contents/Resources/RuntimeRoot/System/Library/Caches/* "Simulator runtime cache"
safe_clean ~/Library/Caches/Google/AndroidStudio*/* "Android Studio cache" safe_clean ~/Library/Caches/Google/AndroidStudio*/* "Android Studio cache"
safe_clean ~/Library/Caches/CocoaPods/* "CocoaPods cache" safe_clean ~/Library/Caches/CocoaPods/* "CocoaPods cache"
safe_clean ~/.cache/flutter/* "Flutter cache" safe_clean ~/.cache/flutter/* "Flutter cache"
@@ -239,7 +216,6 @@ clean_dev_mobile() {
safe_clean ~/Library/Developer/Xcode/UserData/IB\ Support/* "Xcode Interface Builder cache" safe_clean ~/Library/Developer/Xcode/UserData/IB\ Support/* "Xcode Interface Builder cache"
safe_clean ~/.cache/swift-package-manager/* "Swift package manager cache" safe_clean ~/.cache/swift-package-manager/* "Swift package manager cache"
} }
# Clean JVM ecosystem tools # Clean JVM ecosystem tools
clean_dev_jvm() { clean_dev_jvm() {
safe_clean ~/.gradle/caches/* "Gradle caches" safe_clean ~/.gradle/caches/* "Gradle caches"
@@ -247,7 +223,6 @@ clean_dev_jvm() {
safe_clean ~/.sbt/* "SBT cache" safe_clean ~/.sbt/* "SBT cache"
safe_clean ~/.ivy2/cache/* "Ivy cache" safe_clean ~/.ivy2/cache/* "Ivy cache"
} }
# Clean other language tools # Clean other language tools
clean_dev_other_langs() { clean_dev_other_langs() {
safe_clean ~/.bundle/cache/* "Ruby Bundler cache" safe_clean ~/.bundle/cache/* "Ruby Bundler cache"
@@ -258,7 +233,6 @@ clean_dev_other_langs() {
safe_clean ~/.cache/zig/* "Zig cache" safe_clean ~/.cache/zig/* "Zig cache"
safe_clean ~/Library/Caches/deno/* "Deno cache" safe_clean ~/Library/Caches/deno/* "Deno cache"
} }
# Clean CI/CD and DevOps tools # Clean CI/CD and DevOps tools
clean_dev_cicd() { clean_dev_cicd() {
safe_clean ~/.cache/terraform/* "Terraform cache" safe_clean ~/.cache/terraform/* "Terraform cache"
@@ -270,7 +244,6 @@ clean_dev_cicd() {
safe_clean ~/.circleci/cache/* "CircleCI cache" safe_clean ~/.circleci/cache/* "CircleCI cache"
safe_clean ~/.sonar/* "SonarQube cache" safe_clean ~/.sonar/* "SonarQube cache"
} }
# Clean database tools # Clean database tools
clean_dev_database() { clean_dev_database() {
safe_clean ~/Library/Caches/com.sequel-ace.sequel-ace/* "Sequel Ace cache" safe_clean ~/Library/Caches/com.sequel-ace.sequel-ace/* "Sequel Ace cache"
@@ -280,7 +253,6 @@ clean_dev_database() {
safe_clean ~/Library/Caches/com.dbeaver.* "DBeaver cache" safe_clean ~/Library/Caches/com.dbeaver.* "DBeaver cache"
safe_clean ~/Library/Caches/com.redis.RedisInsight "Redis Insight cache" safe_clean ~/Library/Caches/com.redis.RedisInsight "Redis Insight cache"
} }
# Clean API/network debugging tools # Clean API/network debugging tools
clean_dev_api_tools() { clean_dev_api_tools() {
safe_clean ~/Library/Caches/com.postmanlabs.mac/* "Postman cache" safe_clean ~/Library/Caches/com.postmanlabs.mac/* "Postman cache"
@@ -290,7 +262,6 @@ clean_dev_api_tools() {
safe_clean ~/Library/Caches/com.charlesproxy.charles/* "Charles Proxy cache" safe_clean ~/Library/Caches/com.charlesproxy.charles/* "Charles Proxy cache"
safe_clean ~/Library/Caches/com.proxyman.NSProxy/* "Proxyman cache" safe_clean ~/Library/Caches/com.proxyman.NSProxy/* "Proxyman cache"
} }
# Clean misc dev tools # Clean misc dev tools
clean_dev_misc() { clean_dev_misc() {
safe_clean ~/Library/Caches/com.unity3d.*/* "Unity cache" safe_clean ~/Library/Caches/com.unity3d.*/* "Unity cache"
@@ -301,7 +272,6 @@ clean_dev_misc() {
safe_clean ~/Library/Caches/KSCrash/* "KSCrash reports" safe_clean ~/Library/Caches/KSCrash/* "KSCrash reports"
safe_clean ~/Library/Caches/com.crashlytics.data/* "Crashlytics data" safe_clean ~/Library/Caches/com.crashlytics.data/* "Crashlytics data"
} }
# Clean shell and version control # Clean shell and version control
clean_dev_shell() { clean_dev_shell() {
safe_clean ~/.gitconfig.lock "Git config lock" safe_clean ~/.gitconfig.lock "Git config lock"
@@ -312,7 +282,6 @@ clean_dev_shell() {
safe_clean ~/.zsh_history.bak* "Zsh history backup" safe_clean ~/.zsh_history.bak* "Zsh history backup"
safe_clean ~/.cache/pre-commit/* "pre-commit cache" safe_clean ~/.cache/pre-commit/* "pre-commit cache"
} }
# Clean network utilities # Clean network utilities
clean_dev_network() { clean_dev_network() {
safe_clean ~/.cache/curl/* "curl cache" safe_clean ~/.cache/curl/* "curl cache"
@@ -320,26 +289,22 @@ clean_dev_network() {
safe_clean ~/Library/Caches/curl/* "macOS curl cache" safe_clean ~/Library/Caches/curl/* "macOS curl cache"
safe_clean ~/Library/Caches/wget/* "macOS wget cache" safe_clean ~/Library/Caches/wget/* "macOS wget cache"
} }
# Clean orphaned SQLite temporary files (-shm and -wal files) # Clean orphaned SQLite temporary files (-shm and -wal files)
# Strategy: Only clean truly orphaned temp files where base database is missing # Strategy: Only clean truly orphaned temp files where base database is missing
# This is fast and safe - skip complex checks for files with existing base DB
# Env: DRY_RUN # Env: DRY_RUN
# This is fast and safe - skip complex checks for files with existing base DB
clean_sqlite_temp_files() { clean_sqlite_temp_files() {
# Skip this cleanup due to low ROI (收益比低,经常没东西可清理) # Skip this cleanup due to low ROI (收益比低,经常没东西可清理)
# Find scan is still slow even optimized, and orphaned files are rare # Find scan is still slow even optimized, and orphaned files are rare
return 0 return 0
} }
# Main developer tools cleanup function # Main developer tools cleanup function
# Calls all specialized cleanup functions
# Env: DRY_RUN # Env: DRY_RUN
# Calls all specialized cleanup functions
clean_developer_tools() { clean_developer_tools() {
stop_section_spinner stop_section_spinner
# Clean SQLite temporary files first # Clean SQLite temporary files first
clean_sqlite_temp_files clean_sqlite_temp_files
clean_dev_npm clean_dev_npm
clean_dev_python clean_dev_python
clean_dev_go clean_dev_go
@@ -349,10 +314,8 @@ clean_developer_tools() {
clean_dev_nix clean_dev_nix
clean_dev_shell clean_dev_shell
clean_dev_frontend clean_dev_frontend
# Project build caches (delegated to clean_caches module) # Project build caches (delegated to clean_caches module)
clean_project_caches clean_project_caches
clean_dev_mobile clean_dev_mobile
clean_dev_jvm clean_dev_jvm
clean_dev_other_langs clean_dev_other_langs
@@ -361,16 +324,13 @@ clean_developer_tools() {
clean_dev_api_tools clean_dev_api_tools
clean_dev_network clean_dev_network
clean_dev_misc clean_dev_misc
# Homebrew caches and cleanup (delegated to clean_brew module) # Homebrew caches and cleanup (delegated to clean_brew module)
safe_clean ~/Library/Caches/Homebrew/* "Homebrew cache" safe_clean ~/Library/Caches/Homebrew/* "Homebrew cache"
# Clean Homebrew locks intelligently (avoid repeated sudo prompts) # Clean Homebrew locks intelligently (avoid repeated sudo prompts)
local brew_lock_dirs=( local brew_lock_dirs=(
"/opt/homebrew/var/homebrew/locks" "/opt/homebrew/var/homebrew/locks"
"/usr/local/var/homebrew/locks" "/usr/local/var/homebrew/locks"
) )
for lock_dir in "${brew_lock_dirs[@]}"; do for lock_dir in "${brew_lock_dirs[@]}"; do
if [[ -d "$lock_dir" && -w "$lock_dir" ]]; then if [[ -d "$lock_dir" && -w "$lock_dir" ]]; then
# User can write, safe to clean # User can write, safe to clean
@@ -384,6 +344,5 @@ clean_developer_tools() {
fi fi
fi fi
done done
clean_homebrew clean_homebrew
} }

View File

@@ -1,9 +1,7 @@
#!/bin/bash #!/bin/bash
# Project Purge Module (mo purge) # Project Purge Module (mo purge)
# Removes heavy project build artifacts and dependencies # Removes heavy project build artifacts and dependencies
set -euo pipefail set -euo pipefail
# Targets to look for (heavy build artifacts) # Targets to look for (heavy build artifacts)
readonly PURGE_TARGETS=( readonly PURGE_TARGETS=(
"node_modules" "node_modules"
@@ -23,14 +21,11 @@ readonly PURGE_TARGETS=(
".parcel-cache" # Parcel bundler ".parcel-cache" # Parcel bundler
".dart_tool" # Flutter/Dart build cache ".dart_tool" # Flutter/Dart build cache
) )
# Minimum age in days before considering for cleanup # Minimum age in days before considering for cleanup
readonly MIN_AGE_DAYS=7 readonly MIN_AGE_DAYS=7
# Scan depth defaults (relative to search root) # Scan depth defaults (relative to search root)
readonly PURGE_MIN_DEPTH_DEFAULT=2 readonly PURGE_MIN_DEPTH_DEFAULT=2
readonly PURGE_MAX_DEPTH_DEFAULT=8 readonly PURGE_MAX_DEPTH_DEFAULT=8
# Search paths (only project directories) # Search paths (only project directories)
readonly PURGE_SEARCH_PATHS=( readonly PURGE_SEARCH_PATHS=(
"$HOME/www" "$HOME/www"
@@ -42,43 +37,36 @@ readonly PURGE_SEARCH_PATHS=(
"$HOME/Repos" "$HOME/Repos"
"$HOME/Development" "$HOME/Development"
) )
# Check if path is safe to clean (must be inside a project directory)
# Args: $1 - path to check # Args: $1 - path to check
# Check if path is safe to clean (must be inside a project directory)
is_safe_project_artifact() { is_safe_project_artifact() {
local path="$1" local path="$1"
local search_path="$2" local search_path="$2"
# Path must be absolute # Path must be absolute
if [[ "$path" != /* ]]; then if [[ "$path" != /* ]]; then
return 1 return 1
fi fi
# Must not be a direct child of HOME directory # Must not be a direct child of HOME directory
# e.g., ~/.gradle is NOT safe, but ~/Projects/foo/.gradle IS safe # e.g., ~/.gradle is NOT safe, but ~/Projects/foo/.gradle IS safe
local relative_path="${path#"$search_path"/}" local relative_path="${path#"$search_path"/}"
local depth=$(echo "$relative_path" | tr -cd '/' | wc -c) local depth=$(echo "$relative_path" | tr -cd '/' | wc -c)
# Require at least 1 level deep (inside a project folder) # Require at least 1 level deep (inside a project folder)
# e.g., ~/www/weekly/node_modules is OK (depth >= 1) # e.g., ~/www/weekly/node_modules is OK (depth >= 1)
# but ~/www/node_modules is NOT OK (depth < 1) # but ~/www/node_modules is NOT OK (depth < 1)
if [[ $depth -lt 1 ]]; then if [[ $depth -lt 1 ]]; then
return 1 return 1
fi fi
return 0 return 0
} }
# Fast scan using fd or optimized find # Fast scan using fd or optimized find
# Args: $1 - search path, $2 - output file # Args: $1 - search path, $2 - output file
# Scan for purge targets using strict project boundary checks
# Args: $1 - search path, $2 - output file # Args: $1 - search path, $2 - output file
# Scan for purge targets using strict project boundary checks
scan_purge_targets() { scan_purge_targets() {
local search_path="$1" local search_path="$1"
local output_file="$2" local output_file="$2"
local min_depth="${MOLE_PURGE_MIN_DEPTH:-$PURGE_MIN_DEPTH_DEFAULT}" local min_depth="${MOLE_PURGE_MIN_DEPTH:-$PURGE_MIN_DEPTH_DEFAULT}"
local max_depth="${MOLE_PURGE_MAX_DEPTH:-$PURGE_MAX_DEPTH_DEFAULT}" local max_depth="${MOLE_PURGE_MAX_DEPTH:-$PURGE_MAX_DEPTH_DEFAULT}"
if [[ ! "$min_depth" =~ ^[0-9]+$ ]]; then if [[ ! "$min_depth" =~ ^[0-9]+$ ]]; then
min_depth="$PURGE_MIN_DEPTH_DEFAULT" min_depth="$PURGE_MIN_DEPTH_DEFAULT"
fi fi
@@ -88,11 +76,9 @@ scan_purge_targets() {
if [[ "$max_depth" -lt "$min_depth" ]]; then if [[ "$max_depth" -lt "$min_depth" ]]; then
max_depth="$min_depth" max_depth="$min_depth"
fi fi
if [[ ! -d "$search_path" ]]; then if [[ ! -d "$search_path" ]]; then
return return
fi fi
# Use fd for fast parallel search if available # Use fd for fast parallel search if available
if command -v fd > /dev/null 2>&1; then if command -v fd > /dev/null 2>&1; then
local fd_args=( local fd_args=(
@@ -108,11 +94,9 @@ scan_purge_targets() {
"--exclude" ".Trash" "--exclude" ".Trash"
"--exclude" "Applications" "--exclude" "Applications"
) )
for target in "${PURGE_TARGETS[@]}"; do for target in "${PURGE_TARGETS[@]}"; do
fd_args+=("-g" "$target") fd_args+=("-g" "$target")
done done
# Run fd command # Run fd command
fd "${fd_args[@]}" . "$search_path" 2> /dev/null | while IFS= read -r item; do fd "${fd_args[@]}" . "$search_path" 2> /dev/null | while IFS= read -r item; do
if is_safe_project_artifact "$item" "$search_path"; then if is_safe_project_artifact "$item" "$search_path"; then
@@ -123,68 +107,55 @@ scan_purge_targets() {
# Fallback to optimized find with pruning # Fallback to optimized find with pruning
# This prevents descending into heavily nested dirs like node_modules once found, # This prevents descending into heavily nested dirs like node_modules once found,
# providing a massive speedup (O(project_dirs) vs O(files)). # providing a massive speedup (O(project_dirs) vs O(files)).
local prune_args=() local prune_args=()
# 1. Directories to prune (ignore completely) # 1. Directories to prune (ignore completely)
local prune_dirs=(".git" "Library" ".Trash" "Applications") local prune_dirs=(".git" "Library" ".Trash" "Applications")
for dir in "${prune_dirs[@]}"; do for dir in "${prune_dirs[@]}"; do
# -name "DIR" -prune -o # -name "DIR" -prune -o
prune_args+=("-name" "$dir" "-prune" "-o") prune_args+=("-name" "$dir" "-prune" "-o")
done done
# 2. Targets to find (print AND prune) # 2. Targets to find (print AND prune)
# If we find node_modules, we print it and STOP looking inside it # If we find node_modules, we print it and STOP looking inside it
for target in "${PURGE_TARGETS[@]}"; do for target in "${PURGE_TARGETS[@]}"; do
# -name "TARGET" -print -prune -o # -name "TARGET" -print -prune -o
prune_args+=("-name" "$target" "-print" "-prune" "-o") prune_args+=("-name" "$target" "-print" "-prune" "-o")
done done
# Run find command # Run find command
# Logic: ( prune_pattern -prune -o target_pattern -print -prune ) # Logic: ( prune_pattern -prune -o target_pattern -print -prune )
# Note: We rely on implicit recursion for directories that don't match any pattern. # Note: We rely on implicit recursion for directories that don't match any pattern.
# -print is only called explicitly on targets. # -print is only called explicitly on targets.
# Removing the trailing -o from loop construction if necessary? # Removing the trailing -o from loop construction if necessary?
# Actually my loop adds -o at the end. I need to handle that. # Actually my loop adds -o at the end. I need to handle that.
# Let's verify the array construction. # Let's verify the array construction.
# Re-building args cleanly: # Re-building args cleanly:
local find_expr=() local find_expr=()
# Excludes # Excludes
for dir in "${prune_dirs[@]}"; do for dir in "${prune_dirs[@]}"; do
find_expr+=("-name" "$dir" "-prune" "-o") find_expr+=("-name" "$dir" "-prune" "-o")
done done
# Targets # Targets
local i=0 local i=0
for target in "${PURGE_TARGETS[@]}"; do for target in "${PURGE_TARGETS[@]}"; do
find_expr+=("-name" "$target" "-print" "-prune") find_expr+=("-name" "$target" "-print" "-prune")
# Add -o unless it's the very last item of targets # Add -o unless it's the very last item of targets
if [[ $i -lt $((${#PURGE_TARGETS[@]} - 1)) ]]; then if [[ $i -lt $((${#PURGE_TARGETS[@]} - 1)) ]]; then
find_expr+=("-o") find_expr+=("-o")
fi fi
((i++)) ((i++))
done done
command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \ command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \
\( "${find_expr[@]}" \) 2> /dev/null | while IFS= read -r item; do \( "${find_expr[@]}" \) 2> /dev/null | while IFS= read -r item; do
if is_safe_project_artifact "$item" "$search_path"; then if is_safe_project_artifact "$item" "$search_path"; then
echo "$item" echo "$item"
fi fi
done | filter_nested_artifacts > "$output_file" done | filter_nested_artifacts > "$output_file"
fi fi
} }
# Filter out nested artifacts (e.g. node_modules inside node_modules) # Filter out nested artifacts (e.g. node_modules inside node_modules)
filter_nested_artifacts() { filter_nested_artifacts() {
while IFS= read -r item; do while IFS= read -r item; do
local parent_dir=$(dirname "$item") local parent_dir=$(dirname "$item")
local is_nested=false local is_nested=false
for target in "${PURGE_TARGETS[@]}"; do for target in "${PURGE_TARGETS[@]}"; do
# Check if parent directory IS a target or IS INSIDE a target # Check if parent directory IS a target or IS INSIDE a target
# e.g. .../node_modules/foo/node_modules -> parent has node_modules # e.g. .../node_modules/foo/node_modules -> parent has node_modules
@@ -194,39 +165,33 @@ filter_nested_artifacts() {
break break
fi fi
done done
if [[ "$is_nested" == "false" ]]; then if [[ "$is_nested" == "false" ]]; then
echo "$item" echo "$item"
fi fi
done done
} }
# Check if a path was modified recently (safety check)
# Args: $1 - path # Args: $1 - path
# Check if a path was modified recently (safety check)
is_recently_modified() { is_recently_modified() {
local path="$1" local path="$1"
local age_days=$MIN_AGE_DAYS local age_days=$MIN_AGE_DAYS
if [[ ! -e "$path" ]]; then if [[ ! -e "$path" ]]; then
return 1 return 1
fi fi
# Get modification time using base.sh helper (handles GNU vs BSD stat) # Get modification time using base.sh helper (handles GNU vs BSD stat)
local mod_time local mod_time
mod_time=$(get_file_mtime "$path") mod_time=$(get_file_mtime "$path")
local current_time=$(date +%s) local current_time=$(date +%s)
local age_seconds=$((current_time - mod_time)) local age_seconds=$((current_time - mod_time))
local age_in_days=$((age_seconds / 86400)) local age_in_days=$((age_seconds / 86400))
if [[ $age_in_days -lt $age_days ]]; then if [[ $age_in_days -lt $age_days ]]; then
return 0 # Recently modified return 0 # Recently modified
else else
return 1 # Old enough to clean return 1 # Old enough to clean
fi fi
} }
# Get human-readable size of directory
# Args: $1 - path # Args: $1 - path
# Get human-readable size of directory
get_dir_size_kb() { get_dir_size_kb() {
local path="$1" local path="$1"
if [[ -d "$path" ]]; then if [[ -d "$path" ]]; then
@@ -235,20 +200,17 @@ get_dir_size_kb() {
echo "0" echo "0"
fi fi
} }
# Simple category selector (for purge only) # Simple category selector (for purge only)
# Args: category names and metadata as arrays (passed via global vars) # Args: category names and metadata as arrays (passed via global vars)
# Returns: selected indices in PURGE_SELECTION_RESULT (comma-separated)
# Uses PURGE_RECENT_CATEGORIES to mark categories with recent items (default unselected) # Uses PURGE_RECENT_CATEGORIES to mark categories with recent items (default unselected)
# Returns: selected indices in PURGE_SELECTION_RESULT (comma-separated)
select_purge_categories() { select_purge_categories() {
local -a categories=("$@") local -a categories=("$@")
local total_items=${#categories[@]} local total_items=${#categories[@]}
local clear_line=$'\r\033[2K' local clear_line=$'\r\033[2K'
if [[ $total_items -eq 0 ]]; then if [[ $total_items -eq 0 ]]; then
return 1 return 1
fi fi
# Initialize selection (all selected by default, except recent ones) # Initialize selection (all selected by default, except recent ones)
local -a selected=() local -a selected=()
IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}" IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}"
@@ -260,13 +222,11 @@ select_purge_categories() {
selected[i]=true selected[i]=true
fi fi
done done
local cursor_pos=0 local cursor_pos=0
local original_stty="" local original_stty=""
if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then
original_stty=$(stty -g 2> /dev/null || echo "") original_stty=$(stty -g 2> /dev/null || echo "")
fi fi
# Terminal control functions # Terminal control functions
restore_terminal() { restore_terminal() {
trap - EXIT INT TERM trap - EXIT INT TERM
@@ -275,13 +235,11 @@ select_purge_categories() {
stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || true stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || true
fi fi
} }
# shellcheck disable=SC2329 # shellcheck disable=SC2329
handle_interrupt() { handle_interrupt() {
restore_terminal restore_terminal
exit 130 exit 130
} }
draw_menu() { draw_menu() {
printf "\033[H" printf "\033[H"
# Calculate total size of selected items for header # Calculate total size of selected items for header
@@ -296,48 +254,37 @@ select_purge_categories() {
done done
local selected_gb local selected_gb
selected_gb=$(echo "scale=1; $selected_size/1024/1024" | bc) selected_gb=$(echo "scale=1; $selected_size/1024/1024" | bc)
printf "%s\n" "$clear_line" printf "%s\n" "$clear_line"
printf "%s${PURPLE_BOLD}Select Categories to Clean${NC} ${GRAY}- ${selected_gb}GB ($selected_count selected)${NC}\n" "$clear_line" printf "%s${PURPLE_BOLD}Select Categories to Clean${NC} ${GRAY}- ${selected_gb}GB ($selected_count selected)${NC}\n" "$clear_line"
printf "%s\n" "$clear_line" printf "%s\n" "$clear_line"
IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}" IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}"
for ((i = 0; i < total_items; i++)); do for ((i = 0; i < total_items; i++)); do
local checkbox="$ICON_EMPTY" local checkbox="$ICON_EMPTY"
[[ ${selected[i]} == true ]] && checkbox="$ICON_SOLID" [[ ${selected[i]} == true ]] && checkbox="$ICON_SOLID"
local recent_marker="" local recent_marker=""
[[ ${recent_flags[i]:-false} == "true" ]] && recent_marker=" ${GRAY}| Recent${NC}" [[ ${recent_flags[i]:-false} == "true" ]] && recent_marker=" ${GRAY}| Recent${NC}"
if [[ $i -eq $cursor_pos ]]; then if [[ $i -eq $cursor_pos ]]; then
printf "%s${CYAN}${ICON_ARROW} %s %s%s${NC}\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker" printf "%s${CYAN}${ICON_ARROW} %s %s%s${NC}\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker"
else else
printf "%s %s %s%s\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker" printf "%s %s %s%s\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker"
fi fi
done done
printf "%s\n" "$clear_line" printf "%s\n" "$clear_line"
printf "%s${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space Select | Enter Confirm | A All | I Invert | Q Quit${NC}\n" "$clear_line" printf "%s${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space Select | Enter Confirm | A All | I Invert | Q Quit${NC}\n" "$clear_line"
} }
trap restore_terminal EXIT trap restore_terminal EXIT
trap handle_interrupt INT TERM trap handle_interrupt INT TERM
# Preserve interrupt character for Ctrl-C # Preserve interrupt character for Ctrl-C
stty -echo -icanon intr ^C 2> /dev/null || true stty -echo -icanon intr ^C 2> /dev/null || true
hide_cursor hide_cursor
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
clear_screen clear_screen
fi fi
# Main loop # Main loop
while true; do while true; do
draw_menu draw_menu
# Read key # Read key
IFS= read -r -s -n1 key || key="" IFS= read -r -s -n1 key || key=""
case "$key" in case "$key" in
$'\x1b') $'\x1b')
# Arrow keys or ESC # Arrow keys or ESC
@@ -393,20 +340,17 @@ select_purge_categories() {
PURGE_SELECTION_RESULT+="$i" PURGE_SELECTION_RESULT+="$i"
fi fi
done done
restore_terminal restore_terminal
return 0 return 0
;; ;;
esac esac
done done
} }
# Main cleanup function - scans and prompts user to select artifacts to clean # Main cleanup function - scans and prompts user to select artifacts to clean
clean_project_artifacts() { clean_project_artifacts() {
local -a all_found_items=() local -a all_found_items=()
local -a safe_to_clean=() local -a safe_to_clean=()
local -a recently_modified=() local -a recently_modified=()
# Set up cleanup on interrupt # Set up cleanup on interrupt
# Note: Declared without 'local' so cleanup_scan trap can access them # Note: Declared without 'local' so cleanup_scan trap can access them
scan_pids=() scan_pids=()
@@ -428,35 +372,29 @@ clean_project_artifacts() {
exit 130 exit 130
} }
trap cleanup_scan INT TERM trap cleanup_scan INT TERM
# Start parallel scanning of all paths at once # Start parallel scanning of all paths at once
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
start_inline_spinner "Scanning projects..." start_inline_spinner "Scanning projects..."
fi fi
# Launch all scans in parallel # Launch all scans in parallel
for path in "${PURGE_SEARCH_PATHS[@]}"; do for path in "${PURGE_SEARCH_PATHS[@]}"; do
if [[ -d "$path" ]]; then if [[ -d "$path" ]]; then
local scan_output local scan_output
scan_output=$(mktemp) scan_output=$(mktemp)
scan_temps+=("$scan_output") scan_temps+=("$scan_output")
# Launch scan in background for true parallelism # Launch scan in background for true parallelism
scan_purge_targets "$path" "$scan_output" & scan_purge_targets "$path" "$scan_output" &
local scan_pid=$! local scan_pid=$!
scan_pids+=("$scan_pid") scan_pids+=("$scan_pid")
fi fi
done done
# Wait for all scans to complete # Wait for all scans to complete
for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do
wait "$pid" 2> /dev/null || true wait "$pid" 2> /dev/null || true
done done
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
stop_inline_spinner stop_inline_spinner
fi fi
# Collect all results # Collect all results
for scan_output in "${scan_temps[@]+"${scan_temps[@]}"}"; do for scan_output in "${scan_temps[@]+"${scan_temps[@]}"}"; do
if [[ -f "$scan_output" ]]; then if [[ -f "$scan_output" ]]; then
@@ -468,17 +406,14 @@ clean_project_artifacts() {
rm -f "$scan_output" rm -f "$scan_output"
fi fi
done done
# Clean up trap # Clean up trap
trap - INT TERM trap - INT TERM
if [[ ${#all_found_items[@]} -eq 0 ]]; then if [[ ${#all_found_items[@]} -eq 0 ]]; then
echo "" echo ""
echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No old project artifacts to clean" echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No old project artifacts to clean"
printf '\n' printf '\n'
return 2 # Special code: nothing to clean return 2 # Special code: nothing to clean
fi fi
# Mark recently modified items (for default selection state) # Mark recently modified items (for default selection state)
for item in "${all_found_items[@]}"; do for item in "${all_found_items[@]}"; do
if is_recently_modified "$item"; then if is_recently_modified "$item"; then
@@ -487,23 +422,19 @@ clean_project_artifacts() {
# Add all items to safe_to_clean, let user choose # Add all items to safe_to_clean, let user choose
safe_to_clean+=("$item") safe_to_clean+=("$item")
done done
# Build menu options - one per artifact # Build menu options - one per artifact
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
start_inline_spinner "Calculating sizes..." start_inline_spinner "Calculating sizes..."
fi fi
local -a menu_options=() local -a menu_options=()
local -a item_paths=() local -a item_paths=()
local -a item_sizes=() local -a item_sizes=()
local -a item_recent_flags=() local -a item_recent_flags=()
# Helper to get project name from path # Helper to get project name from path
# For ~/www/pake/src-tauri/target -> returns "pake" # For ~/www/pake/src-tauri/target -> returns "pake"
# For ~/www/project/node_modules/xxx/node_modules -> returns "project" # For ~/www/project/node_modules/xxx/node_modules -> returns "project"
get_project_name() { get_project_name() {
local path="$1" local path="$1"
# Find the project root by looking for direct child of search paths # Find the project root by looking for direct child of search paths
local search_roots=() local search_roots=()
if [[ ${#PURGE_SEARCH_PATHS[@]} -gt 0 ]]; then if [[ ${#PURGE_SEARCH_PATHS[@]} -gt 0 ]]; then
@@ -511,7 +442,6 @@ clean_project_artifacts() {
else else
search_roots=("$HOME/www" "$HOME/dev" "$HOME/Projects") search_roots=("$HOME/www" "$HOME/dev" "$HOME/Projects")
fi fi
for root in "${search_roots[@]}"; do for root in "${search_roots[@]}"; do
# Normalize trailing slash for consistent matching # Normalize trailing slash for consistent matching
root="${root%/}" root="${root%/}"
@@ -523,44 +453,36 @@ clean_project_artifacts() {
return 0 return 0
fi fi
done done
# Fallback: use grandparent directory # Fallback: use grandparent directory
dirname "$(dirname "$path")" | xargs basename dirname "$(dirname "$path")" | xargs basename
} }
# Format display with alignment (like app_selector) # Format display with alignment (like app_selector)
format_purge_display() { format_purge_display() {
local project_name="$1" local project_name="$1"
local artifact_type="$2" local artifact_type="$2"
local size_str="$3" local size_str="$3"
# Terminal width for alignment # Terminal width for alignment
local terminal_width=$(tput cols 2> /dev/null || echo 80) local terminal_width=$(tput cols 2> /dev/null || echo 80)
local fixed_width=28 # Reserve for type and size local fixed_width=28 # Reserve for type and size
local available_width=$((terminal_width - fixed_width)) local available_width=$((terminal_width - fixed_width))
# Bounds: 24-35 chars for project name # Bounds: 24-35 chars for project name
[[ $available_width -lt 24 ]] && available_width=24 [[ $available_width -lt 24 ]] && available_width=24
[[ $available_width -gt 35 ]] && available_width=35 [[ $available_width -gt 35 ]] && available_width=35
# Truncate project name if needed # Truncate project name if needed
local truncated_name=$(truncate_by_display_width "$project_name" "$available_width") local truncated_name=$(truncate_by_display_width "$project_name" "$available_width")
local current_width=$(get_display_width "$truncated_name") local current_width=$(get_display_width "$truncated_name")
local char_count=${#truncated_name} local char_count=${#truncated_name}
local padding=$((available_width - current_width)) local padding=$((available_width - current_width))
local printf_width=$((char_count + padding)) local printf_width=$((char_count + padding))
# Format: "project_name size | artifact_type" # Format: "project_name size | artifact_type"
printf "%-*s %9s | %-13s" "$printf_width" "$truncated_name" "$size_str" "$artifact_type" printf "%-*s %9s | %-13s" "$printf_width" "$truncated_name" "$size_str" "$artifact_type"
} }
# Build menu options - one line per artifact # Build menu options - one line per artifact
for item in "${safe_to_clean[@]}"; do for item in "${safe_to_clean[@]}"; do
local project_name=$(get_project_name "$item") local project_name=$(get_project_name "$item")
local artifact_type=$(basename "$item") local artifact_type=$(basename "$item")
local size_kb=$(get_dir_size_kb "$item") local size_kb=$(get_dir_size_kb "$item")
local size_human=$(bytes_to_human "$((size_kb * 1024))") local size_human=$(bytes_to_human "$((size_kb * 1024))")
# Check if recent # Check if recent
local is_recent=false local is_recent=false
for recent_item in "${recently_modified[@]+"${recently_modified[@]}"}"; do for recent_item in "${recently_modified[@]+"${recently_modified[@]}"}"; do
@@ -569,17 +491,14 @@ clean_project_artifacts() {
break break
fi fi
done done
menu_options+=("$(format_purge_display "$project_name" "$artifact_type" "$size_human")") menu_options+=("$(format_purge_display "$project_name" "$artifact_type" "$size_human")")
item_paths+=("$item") item_paths+=("$item")
item_sizes+=("$size_kb") item_sizes+=("$size_kb")
item_recent_flags+=("$is_recent") item_recent_flags+=("$is_recent")
done done
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
stop_inline_spinner stop_inline_spinner
fi fi
# Set global vars for selector # Set global vars for selector
export PURGE_CATEGORY_SIZES=$( export PURGE_CATEGORY_SIZES=$(
IFS=, IFS=,
@@ -589,7 +508,6 @@ clean_project_artifacts() {
IFS=, IFS=,
echo "${item_recent_flags[*]}" echo "${item_recent_flags[*]}"
) )
# Interactive selection (only if terminal is available) # Interactive selection (only if terminal is available)
PURGE_SELECTION_RESULT="" PURGE_SELECTION_RESULT=""
if [[ -t 0 ]]; then if [[ -t 0 ]]; then
@@ -606,7 +524,6 @@ clean_project_artifacts() {
fi fi
done done
fi fi
if [[ -z "$PURGE_SELECTION_RESULT" ]]; then if [[ -z "$PURGE_SELECTION_RESULT" ]]; then
echo "" echo ""
echo -e "${GRAY}No items selected${NC}" echo -e "${GRAY}No items selected${NC}"
@@ -614,48 +531,38 @@ clean_project_artifacts() {
unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT
return 0 return 0
fi fi
# Clean selected items # Clean selected items
echo "" echo ""
IFS=',' read -r -a selected_indices <<< "$PURGE_SELECTION_RESULT" IFS=',' read -r -a selected_indices <<< "$PURGE_SELECTION_RESULT"
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole" local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
local cleaned_count=0 local cleaned_count=0
for idx in "${selected_indices[@]}"; do for idx in "${selected_indices[@]}"; do
local item_path="${item_paths[idx]}" local item_path="${item_paths[idx]}"
local artifact_type=$(basename "$item_path") local artifact_type=$(basename "$item_path")
local project_name=$(get_project_name "$item_path") local project_name=$(get_project_name "$item_path")
local size_kb="${item_sizes[idx]}" local size_kb="${item_sizes[idx]}"
local size_human=$(bytes_to_human "$((size_kb * 1024))") local size_human=$(bytes_to_human "$((size_kb * 1024))")
# Safety checks # Safety checks
if [[ -z "$item_path" || "$item_path" == "/" || "$item_path" == "$HOME" || "$item_path" != "$HOME/"* ]]; then if [[ -z "$item_path" || "$item_path" == "/" || "$item_path" == "$HOME" || "$item_path" != "$HOME/"* ]]; then
continue continue
fi fi
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
start_inline_spinner "Cleaning $project_name/$artifact_type..." start_inline_spinner "Cleaning $project_name/$artifact_type..."
fi fi
if [[ -e "$item_path" ]]; then if [[ -e "$item_path" ]]; then
safe_remove "$item_path" true safe_remove "$item_path" true
if [[ ! -e "$item_path" ]]; then if [[ ! -e "$item_path" ]]; then
local current_total=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0") local current_total=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0")
echo "$((current_total + size_kb))" > "$stats_dir/purge_stats" echo "$((current_total + size_kb))" > "$stats_dir/purge_stats"
((cleaned_count++)) ((cleaned_count++))
fi fi
fi fi
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
stop_inline_spinner stop_inline_spinner
echo -e "${GREEN}${ICON_SUCCESS}${NC} $project_name - $artifact_type ${GREEN}($size_human)${NC}" echo -e "${GREEN}${ICON_SUCCESS}${NC} $project_name - $artifact_type ${GREEN}($size_human)${NC}"
fi fi
done done
# Update count # Update count
echo "$cleaned_count" > "$stats_dir/purge_count" echo "$cleaned_count" > "$stats_dir/purge_count"
unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT
} }

View File

@@ -1,35 +1,28 @@
#!/bin/bash #!/bin/bash
# System-Level Cleanup Module # System-Level Cleanup Module
# Deep system cleanup (requires sudo) and Time Machine failed backups # Deep system cleanup (requires sudo) and Time Machine failed backups
set -euo pipefail set -euo pipefail
# Deep system cleanup (requires sudo) # Deep system cleanup (requires sudo)
clean_deep_system() { clean_deep_system() {
stop_section_spinner stop_section_spinner
# Clean old system caches # Clean old system caches
local cache_cleaned=0 local cache_cleaned=0
safe_sudo_find_delete "/Library/Caches" "*.cache" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true safe_sudo_find_delete "/Library/Caches" "*.cache" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true
safe_sudo_find_delete "/Library/Caches" "*.tmp" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true safe_sudo_find_delete "/Library/Caches" "*.tmp" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true
safe_sudo_find_delete "/Library/Caches" "*.log" "$MOLE_LOG_AGE_DAYS" "f" && cache_cleaned=1 || true safe_sudo_find_delete "/Library/Caches" "*.log" "$MOLE_LOG_AGE_DAYS" "f" && cache_cleaned=1 || true
[[ $cache_cleaned -eq 1 ]] && log_success "System caches" [[ $cache_cleaned -eq 1 ]] && log_success "System caches"
# Clean temporary files (macOS /tmp is a symlink to /private/tmp) # Clean temporary files (macOS /tmp is a symlink to /private/tmp)
local tmp_cleaned=0 local tmp_cleaned=0
safe_sudo_find_delete "/private/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true safe_sudo_find_delete "/private/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true
safe_sudo_find_delete "/private/var/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true safe_sudo_find_delete "/private/var/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true
[[ $tmp_cleaned -eq 1 ]] && log_success "System temp files" [[ $tmp_cleaned -eq 1 ]] && log_success "System temp files"
# Clean crash reports # Clean crash reports
safe_sudo_find_delete "/Library/Logs/DiagnosticReports" "*" "$MOLE_CRASH_REPORT_AGE_DAYS" "f" || true safe_sudo_find_delete "/Library/Logs/DiagnosticReports" "*" "$MOLE_CRASH_REPORT_AGE_DAYS" "f" || true
log_success "System crash reports" log_success "System crash reports"
# Clean system logs (macOS /var is a symlink to /private/var) # Clean system logs (macOS /var is a symlink to /private/var)
safe_sudo_find_delete "/private/var/log" "*.log" "$MOLE_LOG_AGE_DAYS" "f" || true safe_sudo_find_delete "/private/var/log" "*.log" "$MOLE_LOG_AGE_DAYS" "f" || true
safe_sudo_find_delete "/private/var/log" "*.gz" "$MOLE_LOG_AGE_DAYS" "f" || true safe_sudo_find_delete "/private/var/log" "*.gz" "$MOLE_LOG_AGE_DAYS" "f" || true
log_success "System logs" log_success "System logs"
# Clean Library Updates safely (skip if SIP is enabled) # Clean Library Updates safely (skip if SIP is enabled)
if [[ -d "/Library/Updates" && ! -L "/Library/Updates" ]]; then if [[ -d "/Library/Updates" && ! -L "/Library/Updates" ]]; then
if ! is_sip_enabled; then if ! is_sip_enabled; then
@@ -41,14 +34,12 @@ clean_deep_system() {
debug_log "Skipping malformed path: $item" debug_log "Skipping malformed path: $item"
continue continue
fi fi
# Skip system-protected files (restricted flag) # Skip system-protected files (restricted flag)
local item_flags local item_flags
item_flags=$($STAT_BSD -f%Sf "$item" 2> /dev/null || echo "") item_flags=$($STAT_BSD -f%Sf "$item" 2> /dev/null || echo "")
if [[ "$item_flags" == *"restricted"* ]]; then if [[ "$item_flags" == *"restricted"* ]]; then
continue continue
fi fi
if safe_sudo_remove "$item"; then if safe_sudo_remove "$item"; then
((updates_cleaned++)) ((updates_cleaned++))
fi fi
@@ -56,20 +47,16 @@ clean_deep_system() {
[[ $updates_cleaned -gt 0 ]] && log_success "System library updates" [[ $updates_cleaned -gt 0 ]] && log_success "System library updates"
fi fi
fi fi
# Clean macOS Install Data (legacy upgrade leftovers) # Clean macOS Install Data (legacy upgrade leftovers)
if [[ -d "/macOS Install Data" ]]; then if [[ -d "/macOS Install Data" ]]; then
local mtime=$(get_file_mtime "/macOS Install Data") local mtime=$(get_file_mtime "/macOS Install Data")
local age_days=$((($(date +%s) - mtime) / 86400)) local age_days=$((($(date +%s) - mtime) / 86400))
debug_log "Found macOS Install Data (age: ${age_days} days)" debug_log "Found macOS Install Data (age: ${age_days} days)"
if [[ $age_days -ge 30 ]]; then if [[ $age_days -ge 30 ]]; then
local size_kb=$(get_path_size_kb "/macOS Install Data") local size_kb=$(get_path_size_kb "/macOS Install Data")
if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then
local size_human=$(bytes_to_human "$((size_kb * 1024))") local size_human=$(bytes_to_human "$((size_kb * 1024))")
debug_log "Cleaning macOS Install Data: $size_human (${age_days} days old)" debug_log "Cleaning macOS Install Data: $size_human (${age_days} days old)"
if safe_sudo_remove "/macOS Install Data"; then if safe_sudo_remove "/macOS Install Data"; then
log_success "macOS Install Data ($size_human)" log_success "macOS Install Data ($size_human)"
fi fi
@@ -78,21 +65,18 @@ clean_deep_system() {
debug_log "Keeping macOS Install Data (only ${age_days} days old, needs 30+)" debug_log "Keeping macOS Install Data (only ${age_days} days old, needs 30+)"
fi fi
fi fi
# Clean browser code signature caches # Clean browser code signature caches
start_section_spinner "Scanning system caches..." start_section_spinner "Scanning system caches..."
local code_sign_cleaned=0 local code_sign_cleaned=0
local found_count=0 local found_count=0
local last_update_time=$(date +%s) local last_update_time=$(date +%s)
local update_interval=2 # Update spinner every 2 seconds instead of every 50 files local update_interval=2 # Update spinner every 2 seconds instead of every 50 files
# Efficient stream processing for large directories # Efficient stream processing for large directories
while IFS= read -r -d '' cache_dir; do while IFS= read -r -d '' cache_dir; do
if safe_remove "$cache_dir" true; then if safe_remove "$cache_dir" true; then
((code_sign_cleaned++)) ((code_sign_cleaned++))
fi fi
((found_count++)) ((found_count++))
# Update progress spinner periodically based on time, not count # Update progress spinner periodically based on time, not count
local current_time=$(date +%s) local current_time=$(date +%s)
if [[ $((current_time - last_update_time)) -ge $update_interval ]]; then if [[ $((current_time - last_update_time)) -ge $update_interval ]]; then
@@ -100,26 +84,20 @@ clean_deep_system() {
last_update_time=$current_time last_update_time=$current_time
fi fi
done < <(run_with_timeout 5 command find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true) done < <(run_with_timeout 5 command find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true)
stop_section_spinner stop_section_spinner
[[ $code_sign_cleaned -gt 0 ]] && log_success "Browser code signature caches ($code_sign_cleaned items)" [[ $code_sign_cleaned -gt 0 ]] && log_success "Browser code signature caches ($code_sign_cleaned items)"
# Clean system diagnostics logs # Clean system diagnostics logs
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*" "$MOLE_LOG_AGE_DAYS" "f" || true safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*" "$MOLE_LOG_AGE_DAYS" "f" || true safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" || true safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
log_success "System diagnostic logs" log_success "System diagnostic logs"
# Clean power logs # Clean power logs
safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" || true safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
log_success "Power logs" log_success "Power logs"
# Clean memory exception reports (can accumulate to 1-2GB, thousands of files) # Clean memory exception reports (can accumulate to 1-2GB, thousands of files)
# These track app memory limit violations, safe to clean old ones # These track app memory limit violations, safe to clean old ones
safe_sudo_find_delete "/private/var/db/reportmemoryexception/MemoryLimitViolations" "*" "30" "f" || true safe_sudo_find_delete "/private/var/db/reportmemoryexception/MemoryLimitViolations" "*" "30" "f" || true
log_success "Memory exception reports" log_success "Memory exception reports"
# Clean system diagnostic tracev3 logs (can be 1-2GB) # Clean system diagnostic tracev3 logs (can be 1-2GB)
# System generates these continuously, safe to clean old ones # System generates these continuously, safe to clean old ones
start_section_spinner "Cleaning diagnostic trace logs..." start_section_spinner "Cleaning diagnostic trace logs..."
@@ -128,7 +106,6 @@ clean_deep_system() {
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*.tracev3" "30" "f" && diag_logs_cleaned=1 || true safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*.tracev3" "30" "f" && diag_logs_cleaned=1 || true
stop_section_spinner stop_section_spinner
[[ $diag_logs_cleaned -eq 1 ]] && log_success "System diagnostic trace logs" [[ $diag_logs_cleaned -eq 1 ]] && log_success "System diagnostic trace logs"
# Clean core symbolication cache (can be 3-5GB, mostly for crash report debugging) # Clean core symbolication cache (can be 3-5GB, mostly for crash report debugging)
# Will regenerate when needed for crash analysis # Will regenerate when needed for crash analysis
# Use faster du with timeout instead of get_path_size_kb to avoid hanging # Use faster du with timeout instead of get_path_size_kb to avoid hanging
@@ -138,11 +115,9 @@ clean_deep_system() {
# Quick size check with timeout (max 5 seconds) # Quick size check with timeout (max 5 seconds)
local symbolication_size_mb="" local symbolication_size_mb=""
symbolication_size_mb=$(run_with_timeout 5 du -sm "/System/Library/Caches/com.apple.coresymbolicationd/data" 2> /dev/null | awk '{print $1}') symbolication_size_mb=$(run_with_timeout 5 du -sm "/System/Library/Caches/com.apple.coresymbolicationd/data" 2> /dev/null | awk '{print $1}')
# Validate that we got a valid size (non-empty and numeric) # Validate that we got a valid size (non-empty and numeric)
if [[ -n "$symbolication_size_mb" && "$symbolication_size_mb" =~ ^[0-9]+$ ]]; then if [[ -n "$symbolication_size_mb" && "$symbolication_size_mb" =~ ^[0-9]+$ ]]; then
debug_log "Symbolication cache size: ${symbolication_size_mb}MB" debug_log "Symbolication cache size: ${symbolication_size_mb}MB"
# Only clean if larger than 1GB (1024MB) # Only clean if larger than 1GB (1024MB)
if [[ $symbolication_size_mb -gt 1024 ]]; then if [[ $symbolication_size_mb -gt 1024 ]]; then
debug_log "Cleaning symbolication cache (size > 1GB)..." debug_log "Cleaning symbolication cache (size > 1GB)..."
@@ -156,21 +131,17 @@ clean_deep_system() {
fi fi
debug_log "Core symbolication cache section completed" debug_log "Core symbolication cache section completed"
} }
# Clean incomplete Time Machine backups # Clean incomplete Time Machine backups
clean_time_machine_failed_backups() { clean_time_machine_failed_backups() {
local tm_cleaned=0 local tm_cleaned=0
# Check if tmutil is available # Check if tmutil is available
if ! command -v tmutil > /dev/null 2>&1; then if ! command -v tmutil > /dev/null 2>&1; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
return 0 return 0
fi fi
# Start spinner early (before potentially slow tmutil command) # Start spinner early (before potentially slow tmutil command)
start_section_spinner "Checking Time Machine configuration..." start_section_spinner "Checking Time Machine configuration..."
local spinner_active=true local spinner_active=true
# Check if Time Machine is configured (with short timeout for faster response) # Check if Time Machine is configured (with short timeout for faster response)
local tm_info local tm_info
tm_info=$(run_with_timeout 2 tmutil destinationinfo 2>&1 || echo "failed") tm_info=$(run_with_timeout 2 tmutil destinationinfo 2>&1 || echo "failed")
@@ -181,7 +152,6 @@ clean_time_machine_failed_backups() {
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
return 0 return 0
fi fi
if [[ ! -d "/Volumes" ]]; then if [[ ! -d "/Volumes" ]]; then
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner stop_section_spinner
@@ -189,7 +159,6 @@ clean_time_machine_failed_backups() {
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
return 0 return 0
fi fi
# Skip if backup is running (check actual Running status, not just daemon existence) # Skip if backup is running (check actual Running status, not just daemon existence)
if tmutil status 2> /dev/null | grep -q "Running = 1"; then if tmutil status 2> /dev/null | grep -q "Running = 1"; then
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
@@ -198,25 +167,21 @@ clean_time_machine_failed_backups() {
echo -e " ${YELLOW}!${NC} Time Machine backup in progress, skipping cleanup" echo -e " ${YELLOW}!${NC} Time Machine backup in progress, skipping cleanup"
return 0 return 0
fi fi
# Update spinner message for volume scanning # Update spinner message for volume scanning
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
start_section_spinner "Checking backup volumes..." start_section_spinner "Checking backup volumes..."
fi fi
# Fast pre-scan: check which volumes have Backups.backupdb (avoid expensive tmutil checks) # Fast pre-scan: check which volumes have Backups.backupdb (avoid expensive tmutil checks)
local -a backup_volumes=() local -a backup_volumes=()
for volume in /Volumes/*; do for volume in /Volumes/*; do
[[ -d "$volume" ]] || continue [[ -d "$volume" ]] || continue
[[ "$volume" == "/Volumes/MacintoshHD" || "$volume" == "/" ]] && continue [[ "$volume" == "/Volumes/MacintoshHD" || "$volume" == "/" ]] && continue
[[ -L "$volume" ]] && continue [[ -L "$volume" ]] && continue
# Quick check: does this volume have backup directories? # Quick check: does this volume have backup directories?
if [[ -d "$volume/Backups.backupdb" ]] || [[ -d "$volume/.MobileBackups" ]]; then if [[ -d "$volume/Backups.backupdb" ]] || [[ -d "$volume/.MobileBackups" ]]; then
backup_volumes+=("$volume") backup_volumes+=("$volume")
fi fi
done done
# If no backup volumes found, stop spinner and return # If no backup volumes found, stop spinner and return
if [[ ${#backup_volumes[@]} -eq 0 ]]; then if [[ ${#backup_volumes[@]} -eq 0 ]]; then
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
@@ -225,7 +190,6 @@ clean_time_machine_failed_backups() {
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
return 0 return 0
fi fi
# Update spinner message: we have potential backup volumes, now scan them # Update spinner message: we have potential backup volumes, now scan them
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
start_section_spinner "Scanning backup volumes..." start_section_spinner "Scanning backup volumes..."
@@ -237,47 +201,38 @@ clean_time_machine_failed_backups() {
case "$fs_type" in case "$fs_type" in
nfs | smbfs | afpfs | cifs | webdav | unknown) continue ;; nfs | smbfs | afpfs | cifs | webdav | unknown) continue ;;
esac esac
# HFS+ style backups (Backups.backupdb) # HFS+ style backups (Backups.backupdb)
local backupdb_dir="$volume/Backups.backupdb" local backupdb_dir="$volume/Backups.backupdb"
if [[ -d "$backupdb_dir" ]]; then if [[ -d "$backupdb_dir" ]]; then
while IFS= read -r inprogress_file; do while IFS= read -r inprogress_file; do
[[ -d "$inprogress_file" ]] || continue [[ -d "$inprogress_file" ]] || continue
# Only delete old incomplete backups (safety window) # Only delete old incomplete backups (safety window)
local file_mtime=$(get_file_mtime "$inprogress_file") local file_mtime=$(get_file_mtime "$inprogress_file")
local current_time=$(date +%s) local current_time=$(date +%s)
local hours_old=$(((current_time - file_mtime) / 3600)) local hours_old=$(((current_time - file_mtime) / 3600))
if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then
continue continue
fi fi
local size_kb=$(get_path_size_kb "$inprogress_file") local size_kb=$(get_path_size_kb "$inprogress_file")
[[ "$size_kb" -le 0 ]] && continue [[ "$size_kb" -le 0 ]] && continue
# Stop spinner before first output # Stop spinner before first output
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner stop_section_spinner
spinner_active=false spinner_active=false
fi fi
local backup_name=$(basename "$inprogress_file") local backup_name=$(basename "$inprogress_file")
local size_human=$(bytes_to_human "$((size_kb * 1024))") local size_human=$(bytes_to_human "$((size_kb * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete backup: $backup_name ${YELLOW}($size_human dry)${NC}" echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete backup: $backup_name ${YELLOW}($size_human dry)${NC}"
((tm_cleaned++)) ((tm_cleaned++))
note_activity note_activity
continue continue
fi fi
# Real deletion # Real deletion
if ! command -v tmutil > /dev/null 2>&1; then if ! command -v tmutil > /dev/null 2>&1; then
echo -e " ${YELLOW}!${NC} tmutil not available, skipping: $backup_name" echo -e " ${YELLOW}!${NC} tmutil not available, skipping: $backup_name"
continue continue
fi fi
if tmutil delete "$inprogress_file" 2> /dev/null; then if tmutil delete "$inprogress_file" 2> /dev/null; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete backup: $backup_name ${GREEN}($size_human)${NC}" echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete backup: $backup_name ${GREEN}($size_human)${NC}"
((tm_cleaned++)) ((tm_cleaned++))
@@ -290,53 +245,42 @@ clean_time_machine_failed_backups() {
fi fi
done < <(run_with_timeout 15 find "$backupdb_dir" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true) done < <(run_with_timeout 15 find "$backupdb_dir" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true)
fi fi
# APFS style backups (.backupbundle or .sparsebundle) # APFS style backups (.backupbundle or .sparsebundle)
for bundle in "$volume"/*.backupbundle "$volume"/*.sparsebundle; do for bundle in "$volume"/*.backupbundle "$volume"/*.sparsebundle; do
[[ -e "$bundle" ]] || continue [[ -e "$bundle" ]] || continue
[[ -d "$bundle" ]] || continue [[ -d "$bundle" ]] || continue
# Check if bundle is mounted # Check if bundle is mounted
local bundle_name=$(basename "$bundle") local bundle_name=$(basename "$bundle")
local mounted_path=$(hdiutil info 2> /dev/null | grep -A 5 "image-path.*$bundle_name" | grep "/Volumes/" | awk '{print $1}' | head -1 || echo "") local mounted_path=$(hdiutil info 2> /dev/null | grep -A 5 "image-path.*$bundle_name" | grep "/Volumes/" | awk '{print $1}' | head -1 || echo "")
if [[ -n "$mounted_path" && -d "$mounted_path" ]]; then if [[ -n "$mounted_path" && -d "$mounted_path" ]]; then
while IFS= read -r inprogress_file; do while IFS= read -r inprogress_file; do
[[ -d "$inprogress_file" ]] || continue [[ -d "$inprogress_file" ]] || continue
# Only delete old incomplete backups (safety window) # Only delete old incomplete backups (safety window)
local file_mtime=$(get_file_mtime "$inprogress_file") local file_mtime=$(get_file_mtime "$inprogress_file")
local current_time=$(date +%s) local current_time=$(date +%s)
local hours_old=$(((current_time - file_mtime) / 3600)) local hours_old=$(((current_time - file_mtime) / 3600))
if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then
continue continue
fi fi
local size_kb=$(get_path_size_kb "$inprogress_file") local size_kb=$(get_path_size_kb "$inprogress_file")
[[ "$size_kb" -le 0 ]] && continue [[ "$size_kb" -le 0 ]] && continue
# Stop spinner before first output # Stop spinner before first output
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner stop_section_spinner
spinner_active=false spinner_active=false
fi fi
local backup_name=$(basename "$inprogress_file") local backup_name=$(basename "$inprogress_file")
local size_human=$(bytes_to_human "$((size_kb * 1024))") local size_human=$(bytes_to_human "$((size_kb * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${YELLOW}($size_human dry)${NC}" echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${YELLOW}($size_human dry)${NC}"
((tm_cleaned++)) ((tm_cleaned++))
note_activity note_activity
continue continue
fi fi
# Real deletion # Real deletion
if ! command -v tmutil > /dev/null 2>&1; then if ! command -v tmutil > /dev/null 2>&1; then
continue continue
fi fi
if tmutil delete "$inprogress_file" 2> /dev/null; then if tmutil delete "$inprogress_file" 2> /dev/null; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${GREEN}($size_human)${NC}" echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${GREEN}($size_human)${NC}"
((tm_cleaned++)) ((tm_cleaned++))
@@ -351,55 +295,42 @@ clean_time_machine_failed_backups() {
fi fi
done done
done done
# Stop spinner if still active (no backups found) # Stop spinner if still active (no backups found)
if [[ "$spinner_active" == "true" ]]; then if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner stop_section_spinner
fi fi
if [[ $tm_cleaned -eq 0 ]]; then if [[ $tm_cleaned -eq 0 ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
fi fi
} }
# Clean local APFS snapshots (older than 24 hours) # Clean local APFS snapshots (older than 24 hours)
clean_local_snapshots() { clean_local_snapshots() {
# Check if tmutil is available # Check if tmutil is available
if ! command -v tmutil > /dev/null 2>&1; then if ! command -v tmutil > /dev/null 2>&1; then
return 0 return 0
fi fi
start_section_spinner "Checking local snapshots..." start_section_spinner "Checking local snapshots..."
# Check for local snapshots # Check for local snapshots
local snapshot_list local snapshot_list
snapshot_list=$(tmutil listlocalsnapshots / 2> /dev/null) snapshot_list=$(tmutil listlocalsnapshots / 2> /dev/null)
stop_section_spinner stop_section_spinner
[[ -z "$snapshot_list" ]] && return 0 [[ -z "$snapshot_list" ]] && return 0
# Parse and clean snapshots # Parse and clean snapshots
local cleaned_count=0 local cleaned_count=0
local total_cleaned_size=0 # Estimation not possible without thin local total_cleaned_size=0 # Estimation not possible without thin
# Get current time # Get current time
local current_ts=$(date +%s) local current_ts=$(date +%s)
local one_day_ago=$((current_ts - 86400)) local one_day_ago=$((current_ts - 86400))
while IFS= read -r line; do while IFS= read -r line; do
# Format: com.apple.TimeMachine.2023-10-25-120000 # Format: com.apple.TimeMachine.2023-10-25-120000
if [[ "$line" =~ com\.apple\.TimeMachine\.([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{6}) ]]; then if [[ "$line" =~ com\.apple\.TimeMachine\.([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{6}) ]]; then
local date_str="${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]} ${BASH_REMATCH[4]:0:2}:${BASH_REMATCH[4]:2:2}:${BASH_REMATCH[4]:4:2}" local date_str="${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]} ${BASH_REMATCH[4]:0:2}:${BASH_REMATCH[4]:2:2}:${BASH_REMATCH[4]:4:2}"
local snap_ts=$(date -j -f "%Y-%m-%d %H:%M:%S" "$date_str" "+%s" 2> /dev/null || echo "0") local snap_ts=$(date -j -f "%Y-%m-%d %H:%M:%S" "$date_str" "+%s" 2> /dev/null || echo "0")
# Skip if parsing failed # Skip if parsing failed
[[ "$snap_ts" == "0" ]] && continue [[ "$snap_ts" == "0" ]] && continue
# If snapshot is older than 24 hours # If snapshot is older than 24 hours
if [[ $snap_ts -lt $one_day_ago ]]; then if [[ $snap_ts -lt $one_day_ago ]]; then
local snap_name="${BASH_REMATCH[0]}" local snap_name="${BASH_REMATCH[0]}"
if [[ "$DRY_RUN" == "true" ]]; then if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Old local snapshot: $snap_name ${YELLOW}(dry)${NC}" echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Old local snapshot: $snap_name ${YELLOW}(dry)${NC}"
((cleaned_count++)) ((cleaned_count++))
@@ -417,7 +348,6 @@ clean_local_snapshots() {
fi fi
fi fi
done <<< "$snapshot_list" done <<< "$snapshot_list"
if [[ $cleaned_count -gt 0 && "$DRY_RUN" != "true" ]]; then if [[ $cleaned_count -gt 0 && "$DRY_RUN" != "true" ]]; then
log_success "Cleaned $cleaned_count old local snapshots" log_success "Cleaned $cleaned_count old local snapshots"
fi fi

View File

@@ -1,18 +1,12 @@
#!/bin/bash #!/bin/bash
# User Data Cleanup Module # User Data Cleanup Module
set -euo pipefail set -euo pipefail
# Clean user essentials (caches, logs, trash) # Clean user essentials (caches, logs, trash)
clean_user_essentials() { clean_user_essentials() {
start_section_spinner "Scanning caches..." start_section_spinner "Scanning caches..."
safe_clean ~/Library/Caches/* "User app cache" safe_clean ~/Library/Caches/* "User app cache"
stop_section_spinner stop_section_spinner
safe_clean ~/Library/Logs/* "User app logs" safe_clean ~/Library/Logs/* "User app logs"
# Check if Trash directory is whitelisted # Check if Trash directory is whitelisted
if is_path_whitelisted "$HOME/.Trash"; then if is_path_whitelisted "$HOME/.Trash"; then
note_activity note_activity
@@ -21,33 +15,27 @@ clean_user_essentials() {
safe_clean ~/.Trash/* "Trash" safe_clean ~/.Trash/* "Trash"
fi fi
} }
# Helper: Scan external volumes for cleanup (Trash & DS_Store) # Helper: Scan external volumes for cleanup (Trash & DS_Store)
scan_external_volumes() { scan_external_volumes() {
[[ -d "/Volumes" ]] || return 0 [[ -d "/Volumes" ]] || return 0
# Fast pre-check: collect non-system external volumes and detect network volumes # Fast pre-check: collect non-system external volumes and detect network volumes
local -a candidate_volumes=() local -a candidate_volumes=()
local -a network_volumes=() local -a network_volumes=()
for volume in /Volumes/*; do for volume in /Volumes/*; do
# Basic checks (directory, writable, not a symlink) # Basic checks (directory, writable, not a symlink)
[[ -d "$volume" && -w "$volume" && ! -L "$volume" ]] || continue [[ -d "$volume" && -w "$volume" && ! -L "$volume" ]] || continue
# Skip system root if it appears in /Volumes # Skip system root if it appears in /Volumes
[[ "$volume" == "/" || "$volume" == "/Volumes/Macintosh HD" ]] && continue [[ "$volume" == "/" || "$volume" == "/Volumes/Macintosh HD" ]] && continue
# Use diskutil to intelligently detect network volumes (SMB/NFS/AFP) # Use diskutil to intelligently detect network volumes (SMB/NFS/AFP)
# Timeout protection: 1s per volume to avoid slow network responses # Timeout protection: 1s per volume to avoid slow network responses
local protocol="" local protocol=""
protocol=$(run_with_timeout 1 command diskutil info "$volume" 2> /dev/null | grep -i "Protocol:" | awk '{print $2}' || echo "") protocol=$(run_with_timeout 1 command diskutil info "$volume" 2> /dev/null | grep -i "Protocol:" | awk '{print $2}' || echo "")
case "$protocol" in case "$protocol" in
SMB | NFS | AFP | CIFS | WebDAV) SMB | NFS | AFP | CIFS | WebDAV)
network_volumes+=("$volume") network_volumes+=("$volume")
continue continue
;; ;;
esac esac
# Fallback: Check filesystem type via df if diskutil didn't identify protocol # Fallback: Check filesystem type via df if diskutil didn't identify protocol
local fs_type="" local fs_type=""
fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "") fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "")
@@ -57,14 +45,11 @@ scan_external_volumes() {
continue continue
;; ;;
esac esac
candidate_volumes+=("$volume") candidate_volumes+=("$volume")
done done
# If no external volumes found, return immediately (zero overhead) # If no external volumes found, return immediately (zero overhead)
local volume_count=${#candidate_volumes[@]} local volume_count=${#candidate_volumes[@]}
local network_count=${#network_volumes[@]} local network_count=${#network_volumes[@]}
if [[ $volume_count -eq 0 ]]; then if [[ $volume_count -eq 0 ]]; then
# Show info if network volumes were skipped # Show info if network volumes were skipped
if [[ $network_count -gt 0 ]]; then if [[ $network_count -gt 0 ]]; then
@@ -73,18 +58,14 @@ scan_external_volumes() {
fi fi
return 0 return 0
fi fi
# We have local external volumes, now perform full scan # We have local external volumes, now perform full scan
start_section_spinner "Scanning $volume_count external volume(s)..." start_section_spinner "Scanning $volume_count external volume(s)..."
for volume in "${candidate_volumes[@]}"; do for volume in "${candidate_volumes[@]}"; do
# Re-verify volume is still accessible (may have been unmounted since initial scan) # Re-verify volume is still accessible (may have been unmounted since initial scan)
# Use simple directory check instead of slow mount command for better performance # Use simple directory check instead of slow mount command for better performance
[[ -d "$volume" && -r "$volume" ]] || continue [[ -d "$volume" && -r "$volume" ]] || continue
# 1. Clean Trash on volume # 1. Clean Trash on volume
local volume_trash="$volume/.Trashes" local volume_trash="$volume/.Trashes"
# Check if external volume Trash is whitelisted # Check if external volume Trash is whitelisted
if [[ -d "$volume_trash" && "$DRY_RUN" != "true" ]] && ! is_path_whitelisted "$volume_trash"; then if [[ -d "$volume_trash" && "$DRY_RUN" != "true" ]] && ! is_path_whitelisted "$volume_trash"; then
# Safely iterate and remove each item # Safely iterate and remove each item
@@ -92,71 +73,56 @@ scan_external_volumes() {
safe_remove "$item" true || true safe_remove "$item" true || true
done < <(command find "$volume_trash" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true) done < <(command find "$volume_trash" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
fi fi
# 2. Clean .DS_Store # 2. Clean .DS_Store
if [[ "$PROTECT_FINDER_METADATA" != "true" ]]; then if [[ "$PROTECT_FINDER_METADATA" != "true" ]]; then
clean_ds_store_tree "$volume" "$(basename "$volume") volume (.DS_Store)" clean_ds_store_tree "$volume" "$(basename "$volume") volume (.DS_Store)"
fi fi
done done
stop_section_spinner stop_section_spinner
} }
# Clean Finder metadata (.DS_Store files) # Clean Finder metadata (.DS_Store files)
clean_finder_metadata() { clean_finder_metadata() {
stop_section_spinner stop_section_spinner
if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then
note_activity note_activity
echo -e " ${GREEN}${ICON_EMPTY}${NC} Finder metadata · whitelist protected" echo -e " ${GREEN}${ICON_EMPTY}${NC} Finder metadata · whitelist protected"
return return
fi fi
clean_ds_store_tree "$HOME" "Home directory (.DS_Store)" clean_ds_store_tree "$HOME" "Home directory (.DS_Store)"
} }
# Clean macOS system caches # Clean macOS system caches
clean_macos_system_caches() { clean_macos_system_caches() {
stop_section_spinner stop_section_spinner
# Clean saved application states with protection for System Settings # Clean saved application states with protection for System Settings
# Note: safe_clean already calls should_protect_path for each file # Note: safe_clean already calls should_protect_path for each file
safe_clean ~/Library/Saved\ Application\ State/* "Saved application states" safe_clean ~/Library/Saved\ Application\ State/* "Saved application states" || true
# REMOVED: Spotlight cache cleanup can cause system UI issues # REMOVED: Spotlight cache cleanup can cause system UI issues
# Spotlight indexes should be managed by macOS automatically # Spotlight indexes should be managed by macOS automatically
# safe_clean ~/Library/Caches/com.apple.spotlight "Spotlight cache" # safe_clean ~/Library/Caches/com.apple.spotlight "Spotlight cache"
safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache" || true
safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache" safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache" || true
safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache" safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache" || true
safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache"
# Extra user items # Extra user items
safe_clean ~/Library/DiagnosticReports/* "Diagnostic reports" safe_clean ~/Library/DiagnosticReports/* "Diagnostic reports" || true
safe_clean ~/Library/Caches/com.apple.QuickLook.thumbnailcache "QuickLook thumbnails" safe_clean ~/Library/Caches/com.apple.QuickLook.thumbnailcache "QuickLook thumbnails" || true
safe_clean ~/Library/Caches/Quick\ Look/* "QuickLook cache" safe_clean ~/Library/Caches/Quick\ Look/* "QuickLook cache" || true
safe_clean ~/Library/Caches/com.apple.iconservices* "Icon services cache" safe_clean ~/Library/Caches/com.apple.iconservices* "Icon services cache" || true
safe_clean ~/Library/Caches/CloudKit/* "CloudKit cache" safe_clean ~/Library/Caches/CloudKit/* "CloudKit cache" || true
# Clean incomplete downloads # Clean incomplete downloads
safe_clean ~/Downloads/*.download "Safari incomplete downloads" safe_clean ~/Downloads/*.download "Safari incomplete downloads" || true
safe_clean ~/Downloads/*.crdownload "Chrome incomplete downloads" safe_clean ~/Downloads/*.crdownload "Chrome incomplete downloads" || true
safe_clean ~/Downloads/*.part "Partial incomplete downloads" safe_clean ~/Downloads/*.part "Partial incomplete downloads" || true
# Additional user-level caches # Additional user-level caches
safe_clean ~/Library/Autosave\ Information/* "Autosave information" safe_clean ~/Library/Autosave\ Information/* "Autosave information" || true
safe_clean ~/Library/IdentityCaches/* "Identity caches" safe_clean ~/Library/IdentityCaches/* "Identity caches" || true
safe_clean ~/Library/Suggestions/* "Siri suggestions cache" safe_clean ~/Library/Suggestions/* "Siri suggestions cache" || true
safe_clean ~/Library/Calendars/Calendar\ Cache "Calendar cache" safe_clean ~/Library/Calendars/Calendar\ Cache "Calendar cache" || true
safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache" safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache" || true
} }
# Clean recent items lists # Clean recent items lists
clean_recent_items() { clean_recent_items() {
stop_section_spinner stop_section_spinner
local shared_dir="$HOME/Library/Application Support/com.apple.sharedfilelist" local shared_dir="$HOME/Library/Application Support/com.apple.sharedfilelist"
# Target only the global recent item lists to avoid touching per-app/System Settings SFL files # Target only the global recent item lists to avoid touching per-app/System Settings SFL files
local -a recent_lists=( local -a recent_lists=(
"$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl2" "$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl2"
@@ -168,53 +134,40 @@ clean_recent_items() {
"$shared_dir/com.apple.LSSharedFileList.RecentServers.sfl" "$shared_dir/com.apple.LSSharedFileList.RecentServers.sfl"
"$shared_dir/com.apple.LSSharedFileList.RecentHosts.sfl" "$shared_dir/com.apple.LSSharedFileList.RecentHosts.sfl"
) )
if [[ -d "$shared_dir" ]]; then if [[ -d "$shared_dir" ]]; then
for sfl_file in "${recent_lists[@]}"; do for sfl_file in "${recent_lists[@]}"; do
[[ -e "$sfl_file" ]] && safe_clean "$sfl_file" "Recent items list" [[ -e "$sfl_file" ]] && safe_clean "$sfl_file" "Recent items list" || true
done done
fi fi
# Clean recent items preferences # Clean recent items preferences
safe_clean ~/Library/Preferences/com.apple.recentitems.plist "Recent items preferences" safe_clean ~/Library/Preferences/com.apple.recentitems.plist "Recent items preferences" || true
} }
# Clean old mail downloads # Clean old mail downloads
clean_mail_downloads() { clean_mail_downloads() {
stop_section_spinner stop_section_spinner
local mail_age_days=${MOLE_MAIL_AGE_DAYS:-30} local mail_age_days=${MOLE_MAIL_AGE_DAYS:-30}
if ! [[ "$mail_age_days" =~ ^[0-9]+$ ]]; then if ! [[ "$mail_age_days" =~ ^[0-9]+$ ]]; then
mail_age_days=30 mail_age_days=30
fi fi
local -a mail_dirs=( local -a mail_dirs=(
"$HOME/Library/Mail Downloads" "$HOME/Library/Mail Downloads"
"$HOME/Library/Containers/com.apple.mail/Data/Library/Mail Downloads" "$HOME/Library/Containers/com.apple.mail/Data/Library/Mail Downloads"
) )
local count=0 local count=0
local cleaned_kb=0 local cleaned_kb=0
for target_path in "${mail_dirs[@]}"; do for target_path in "${mail_dirs[@]}"; do
if [[ -d "$target_path" ]]; then if [[ -d "$target_path" ]]; then
# Check directory size threshold # Check directory size threshold
local dir_size_kb=0 local dir_size_kb=0
if command -v du > /dev/null 2>&1; then dir_size_kb=$(get_path_size_kb "$target_path")
dir_size_kb=$(du -sk "$target_path" 2> /dev/null | awk 'NR==1{print $1}')
dir_size_kb=${dir_size_kb:-0}
fi
# Skip if below threshold # Skip if below threshold
if [[ $dir_size_kb -lt ${MOLE_MAIL_DOWNLOADS_MIN_KB:-5120} ]]; then if [[ $dir_size_kb -lt ${MOLE_MAIL_DOWNLOADS_MIN_KB:-5120} ]]; then
continue continue
fi fi
# Find and remove files older than specified days # Find and remove files older than specified days
while IFS= read -r -d '' file_path; do while IFS= read -r -d '' file_path; do
if [[ -f "$file_path" ]]; then if [[ -f "$file_path" ]]; then
local file_size_kb=$(du -sk "$file_path" 2> /dev/null | awk 'NR==1{print $1}') local file_size_kb=$(get_path_size_kb "$file_path")
file_size_kb=${file_size_kb:-0}
if safe_remove "$file_path" true; then if safe_remove "$file_path" true; then
((count++)) ((count++))
((cleaned_kb += file_size_kb)) ((cleaned_kb += file_size_kb))
@@ -223,47 +176,36 @@ clean_mail_downloads() {
done < <(command find "$target_path" -type f -mtime +"$mail_age_days" -print0 2> /dev/null || true) done < <(command find "$target_path" -type f -mtime +"$mail_age_days" -print0 2> /dev/null || true)
fi fi
done done
if [[ $count -gt 0 ]]; then if [[ $count -gt 0 ]]; then
local cleaned_mb=$(echo "$cleaned_kb" | awk '{printf "%.1f", $1/1024}') local cleaned_mb=$(echo "$cleaned_kb" | awk '{printf "%.1f", $1/1024}' || echo "0.0")
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $count mail attachments (~${cleaned_mb}MB)" echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $count mail attachments (~${cleaned_mb}MB)"
note_activity note_activity
fi fi
} }
# Clean sandboxed app caches # Clean sandboxed app caches
clean_sandboxed_app_caches() { clean_sandboxed_app_caches() {
stop_section_spinner stop_section_spinner
safe_clean ~/Library/Containers/com.apple.wallpaper.agent/Data/Library/Caches/* "Wallpaper agent cache" safe_clean ~/Library/Containers/com.apple.wallpaper.agent/Data/Library/Caches/* "Wallpaper agent cache"
safe_clean ~/Library/Containers/com.apple.mediaanalysisd/Data/Library/Caches/* "Media analysis cache" safe_clean ~/Library/Containers/com.apple.mediaanalysisd/Data/Library/Caches/* "Media analysis cache"
safe_clean ~/Library/Containers/com.apple.AppStore/Data/Library/Caches/* "App Store cache" safe_clean ~/Library/Containers/com.apple.AppStore/Data/Library/Caches/* "App Store cache"
safe_clean ~/Library/Containers/com.apple.configurator.xpc.InternetService/Data/tmp/* "Apple Configurator temp files" safe_clean ~/Library/Containers/com.apple.configurator.xpc.InternetService/Data/tmp/* "Apple Configurator temp files"
# Clean sandboxed app caches - iterate quietly to avoid UI flashing # Clean sandboxed app caches - iterate quietly to avoid UI flashing
local containers_dir="$HOME/Library/Containers" local containers_dir="$HOME/Library/Containers"
[[ ! -d "$containers_dir" ]] && return 0 [[ ! -d "$containers_dir" ]] && return 0
start_section_spinner "Scanning sandboxed apps..." start_section_spinner "Scanning sandboxed apps..."
local total_size=0 local total_size=0
local cleaned_count=0 local cleaned_count=0
local found_any=false local found_any=false
# Enable nullglob for safe globbing; restore afterwards # Enable nullglob for safe globbing; restore afterwards
local _ng_state local _ng_state
_ng_state=$(shopt -p nullglob || true) _ng_state=$(shopt -p nullglob || true)
shopt -s nullglob shopt -s nullglob
for container_dir in "$containers_dir"/*; do for container_dir in "$containers_dir"/*; do
process_container_cache "$container_dir" process_container_cache "$container_dir"
done done
# Restore nullglob to previous state # Restore nullglob to previous state
eval "$_ng_state" eval "$_ng_state"
stop_section_spinner stop_section_spinner
if [[ "$found_any" == "true" ]]; then if [[ "$found_any" == "true" ]]; then
local size_human=$(bytes_to_human "$((total_size * 1024))") local size_human=$(bytes_to_human "$((total_size * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then if [[ "$DRY_RUN" == "true" ]]; then
@@ -277,12 +219,10 @@ clean_sandboxed_app_caches() {
note_activity note_activity
fi fi
} }
# Process a single container cache directory (reduces nesting) # Process a single container cache directory (reduces nesting)
process_container_cache() { process_container_cache() {
local container_dir="$1" local container_dir="$1"
[[ -d "$container_dir" ]] || return 0 [[ -d "$container_dir" ]] || return 0
# Extract bundle ID and check protection status early # Extract bundle ID and check protection status early
local bundle_id=$(basename "$container_dir") local bundle_id=$(basename "$container_dir")
if is_critical_system_component "$bundle_id"; then if is_critical_system_component "$bundle_id"; then
@@ -291,11 +231,9 @@ process_container_cache() {
if should_protect_data "$bundle_id" || should_protect_data "$(echo "$bundle_id" | tr '[:upper:]' '[:lower:]')"; then if should_protect_data "$bundle_id" || should_protect_data "$(echo "$bundle_id" | tr '[:upper:]' '[:lower:]')"; then
return 0 return 0
fi fi
local cache_dir="$container_dir/Data/Library/Caches" local cache_dir="$container_dir/Data/Library/Caches"
# Check if dir exists and has content # Check if dir exists and has content
[[ -d "$cache_dir" ]] || return 0 [[ -d "$cache_dir" ]] || return 0
# Fast check if empty using find (more efficient than ls) # Fast check if empty using find (more efficient than ls)
if find "$cache_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then if find "$cache_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
# Use global variables from caller for tracking # Use global variables from caller for tracking
@@ -303,35 +241,28 @@ process_container_cache() {
((total_size += size)) ((total_size += size))
found_any=true found_any=true
((cleaned_count++)) ((cleaned_count++))
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
# Clean contents safely with local nullglob management # Clean contents safely with local nullglob management
local _ng_state local _ng_state
_ng_state=$(shopt -p nullglob || true) _ng_state=$(shopt -p nullglob || true)
shopt -s nullglob shopt -s nullglob
for item in "$cache_dir"/*; do for item in "$cache_dir"/*; do
[[ -e "$item" ]] || continue [[ -e "$item" ]] || continue
safe_remove "$item" true || true safe_remove "$item" true || true
done done
eval "$_ng_state" eval "$_ng_state"
fi fi
fi fi
} }
# Clean browser caches (Safari, Chrome, Edge, Firefox, etc.) # Clean browser caches (Safari, Chrome, Edge, Firefox, etc.)
clean_browsers() { clean_browsers() {
stop_section_spinner stop_section_spinner
safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache" safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache"
# Chrome/Chromium # Chrome/Chromium
safe_clean ~/Library/Caches/Google/Chrome/* "Chrome cache" safe_clean ~/Library/Caches/Google/Chrome/* "Chrome cache"
safe_clean ~/Library/Application\ Support/Google/Chrome/*/Application\ Cache/* "Chrome app cache" safe_clean ~/Library/Application\ Support/Google/Chrome/*/Application\ Cache/* "Chrome app cache"
safe_clean ~/Library/Application\ Support/Google/Chrome/*/GPUCache/* "Chrome GPU cache" safe_clean ~/Library/Application\ Support/Google/Chrome/*/GPUCache/* "Chrome GPU cache"
safe_clean ~/Library/Caches/Chromium/* "Chromium cache" safe_clean ~/Library/Caches/Chromium/* "Chromium cache"
safe_clean ~/Library/Caches/com.microsoft.edgemac/* "Edge cache" safe_clean ~/Library/Caches/com.microsoft.edgemac/* "Edge cache"
safe_clean ~/Library/Caches/company.thebrowser.Browser/* "Arc cache" safe_clean ~/Library/Caches/company.thebrowser.Browser/* "Arc cache"
safe_clean ~/Library/Caches/company.thebrowser.dia/* "Dia cache" safe_clean ~/Library/Caches/company.thebrowser.dia/* "Dia cache"
@@ -344,11 +275,9 @@ clean_browsers() {
safe_clean ~/Library/Caches/zen/* "Zen cache" safe_clean ~/Library/Caches/zen/* "Zen cache"
safe_clean ~/Library/Application\ Support/Firefox/Profiles/*/cache2/* "Firefox profile cache" safe_clean ~/Library/Application\ Support/Firefox/Profiles/*/cache2/* "Firefox profile cache"
} }
# Clean cloud storage app caches # Clean cloud storage app caches
clean_cloud_storage() { clean_cloud_storage() {
stop_section_spinner stop_section_spinner
safe_clean ~/Library/Caches/com.dropbox.* "Dropbox cache" safe_clean ~/Library/Caches/com.dropbox.* "Dropbox cache"
safe_clean ~/Library/Caches/com.getdropbox.dropbox "Dropbox cache" safe_clean ~/Library/Caches/com.getdropbox.dropbox "Dropbox cache"
safe_clean ~/Library/Caches/com.google.GoogleDrive "Google Drive cache" safe_clean ~/Library/Caches/com.google.GoogleDrive "Google Drive cache"
@@ -357,11 +286,9 @@ clean_cloud_storage() {
safe_clean ~/Library/Caches/com.box.desktop "Box cache" safe_clean ~/Library/Caches/com.box.desktop "Box cache"
safe_clean ~/Library/Caches/com.microsoft.OneDrive "OneDrive cache" safe_clean ~/Library/Caches/com.microsoft.OneDrive "OneDrive cache"
} }
# Clean office application caches # Clean office application caches
clean_office_applications() { clean_office_applications() {
stop_section_spinner stop_section_spinner
safe_clean ~/Library/Caches/com.microsoft.Word "Microsoft Word cache" safe_clean ~/Library/Caches/com.microsoft.Word "Microsoft Word cache"
safe_clean ~/Library/Caches/com.microsoft.Excel "Microsoft Excel cache" safe_clean ~/Library/Caches/com.microsoft.Excel "Microsoft Excel cache"
safe_clean ~/Library/Caches/com.microsoft.Powerpoint "Microsoft PowerPoint cache" safe_clean ~/Library/Caches/com.microsoft.Powerpoint "Microsoft PowerPoint cache"
@@ -371,62 +298,48 @@ clean_office_applications() {
safe_clean ~/Library/Caches/org.mozilla.thunderbird/* "Thunderbird cache" safe_clean ~/Library/Caches/org.mozilla.thunderbird/* "Thunderbird cache"
safe_clean ~/Library/Caches/com.apple.mail/* "Apple Mail cache" safe_clean ~/Library/Caches/com.apple.mail/* "Apple Mail cache"
} }
# Clean virtualization tools # Clean virtualization tools
clean_virtualization_tools() { clean_virtualization_tools() {
stop_section_spinner stop_section_spinner
safe_clean ~/Library/Caches/com.vmware.fusion "VMware Fusion cache" safe_clean ~/Library/Caches/com.vmware.fusion "VMware Fusion cache"
safe_clean ~/Library/Caches/com.parallels.* "Parallels cache" safe_clean ~/Library/Caches/com.parallels.* "Parallels cache"
safe_clean ~/VirtualBox\ VMs/.cache "VirtualBox cache" safe_clean ~/VirtualBox\ VMs/.cache "VirtualBox cache"
safe_clean ~/.vagrant.d/tmp/* "Vagrant temporary files" safe_clean ~/.vagrant.d/tmp/* "Vagrant temporary files"
} }
# Clean Application Support logs and caches # Clean Application Support logs and caches
clean_application_support_logs() { clean_application_support_logs() {
stop_section_spinner stop_section_spinner
if [[ ! -d "$HOME/Library/Application Support" ]] || ! ls "$HOME/Library/Application Support" > /dev/null 2>&1; then if [[ ! -d "$HOME/Library/Application Support" ]] || ! ls "$HOME/Library/Application Support" > /dev/null 2>&1; then
note_activity note_activity
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Application Support" echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Application Support"
return 0 return 0
fi fi
start_section_spinner "Scanning Application Support..." start_section_spinner "Scanning Application Support..."
local total_size=0 local total_size=0
local cleaned_count=0 local cleaned_count=0
local found_any=false local found_any=false
# Enable nullglob for safe globbing # Enable nullglob for safe globbing
local _ng_state local _ng_state
_ng_state=$(shopt -p nullglob || true) _ng_state=$(shopt -p nullglob || true)
shopt -s nullglob shopt -s nullglob
# Clean log directories and cache patterns # Clean log directories and cache patterns
for app_dir in ~/Library/Application\ Support/*; do for app_dir in ~/Library/Application\ Support/*; do
[[ -d "$app_dir" ]] || continue [[ -d "$app_dir" ]] || continue
local app_name=$(basename "$app_dir") local app_name=$(basename "$app_dir")
local app_name_lower=$(echo "$app_name" | tr '[:upper:]' '[:lower:]') local app_name_lower=$(echo "$app_name" | tr '[:upper:]' '[:lower:]')
local is_protected=false local is_protected=false
if should_protect_data "$app_name"; then if should_protect_data "$app_name"; then
is_protected=true is_protected=true
elif should_protect_data "$app_name_lower"; then elif should_protect_data "$app_name_lower"; then
is_protected=true is_protected=true
fi fi
if [[ "$is_protected" == "true" ]]; then if [[ "$is_protected" == "true" ]]; then
continue continue
fi fi
if is_critical_system_component "$app_name"; then if is_critical_system_component "$app_name"; then
continue continue
fi fi
local -a start_candidates=("$app_dir/log" "$app_dir/logs" "$app_dir/activitylog" "$app_dir/Cache/Cache_Data" "$app_dir/Crashpad/completed") local -a start_candidates=("$app_dir/log" "$app_dir/logs" "$app_dir/activitylog" "$app_dir/Cache/Cache_Data" "$app_dir/Crashpad/completed")
for candidate in "${start_candidates[@]}"; do for candidate in "${start_candidates[@]}"; do
if [[ -d "$candidate" ]]; then if [[ -d "$candidate" ]]; then
if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
@@ -434,7 +347,6 @@ clean_application_support_logs() {
((total_size += size)) ((total_size += size))
((cleaned_count++)) ((cleaned_count++))
found_any=true found_any=true
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
for item in "$candidate"/*; do for item in "$candidate"/*; do
[[ -e "$item" ]] || continue [[ -e "$item" ]] || continue
@@ -445,16 +357,13 @@ clean_application_support_logs() {
fi fi
done done
done done
# Clean Group Containers logs # Clean Group Containers logs
local known_group_containers=( local known_group_containers=(
"group.com.apple.contentdelivery" "group.com.apple.contentdelivery"
) )
for container in "${known_group_containers[@]}"; do for container in "${known_group_containers[@]}"; do
local container_path="$HOME/Library/Group Containers/$container" local container_path="$HOME/Library/Group Containers/$container"
local -a gc_candidates=("$container_path/Logs" "$container_path/Library/Logs") local -a gc_candidates=("$container_path/Logs" "$container_path/Library/Logs")
for candidate in "${gc_candidates[@]}"; do for candidate in "${gc_candidates[@]}"; do
if [[ -d "$candidate" ]]; then if [[ -d "$candidate" ]]; then
if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
@@ -462,7 +371,6 @@ clean_application_support_logs() {
((total_size += size)) ((total_size += size))
((cleaned_count++)) ((cleaned_count++))
found_any=true found_any=true
if [[ "$DRY_RUN" != "true" ]]; then if [[ "$DRY_RUN" != "true" ]]; then
for item in "$candidate"/*; do for item in "$candidate"/*; do
[[ -e "$item" ]] || continue [[ -e "$item" ]] || continue
@@ -473,12 +381,9 @@ clean_application_support_logs() {
fi fi
done done
done done
# Restore nullglob to previous state # Restore nullglob to previous state
eval "$_ng_state" eval "$_ng_state"
stop_section_spinner stop_section_spinner
if [[ "$found_any" == "true" ]]; then if [[ "$found_any" == "true" ]]; then
local size_human=$(bytes_to_human "$((total_size * 1024))") local size_human=$(bytes_to_human "$((total_size * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then if [[ "$DRY_RUN" == "true" ]]; then
@@ -492,7 +397,6 @@ clean_application_support_logs() {
note_activity note_activity
fi fi
} }
# Check and show iOS device backup info # Check and show iOS device backup info
check_ios_device_backups() { check_ios_device_backups() {
local backup_dir="$HOME/Library/Application Support/MobileSync/Backup" local backup_dir="$HOME/Library/Application Support/MobileSync/Backup"
@@ -508,15 +412,14 @@ check_ios_device_backups() {
fi fi
fi fi
fi fi
return 0
} }
# Clean Apple Silicon specific caches
# Env: IS_M_SERIES # Env: IS_M_SERIES
# Clean Apple Silicon specific caches
clean_apple_silicon_caches() { clean_apple_silicon_caches() {
if [[ "${IS_M_SERIES:-false}" != "true" ]]; then if [[ "${IS_M_SERIES:-false}" != "true" ]]; then
return 0 return 0
fi fi
start_section "Apple Silicon updates" start_section "Apple Silicon updates"
safe_clean /Library/Apple/usr/share/rosetta/rosetta_update_bundle "Rosetta 2 cache" safe_clean /Library/Apple/usr/share/rosetta/rosetta_update_bundle "Rosetta 2 cache"
safe_clean ~/Library/Caches/com.apple.rosetta.update "Rosetta 2 user cache" safe_clean ~/Library/Caches/com.apple.rosetta.update "Rosetta 2 user cache"

View File

@@ -30,8 +30,8 @@ readonly NC="${ESC}[0m"
readonly ICON_CONFIRM="◎" readonly ICON_CONFIRM="◎"
readonly ICON_ADMIN="⚙" readonly ICON_ADMIN="⚙"
readonly ICON_SUCCESS="✓" readonly ICON_SUCCESS="✓"
readonly ICON_ERROR="☹︎" readonly ICON_ERROR=""
readonly ICON_WARNING="☺︎" readonly ICON_WARNING=""
readonly ICON_EMPTY="○" readonly ICON_EMPTY="○"
readonly ICON_SOLID="●" readonly ICON_SOLID="●"
readonly ICON_LIST="•" readonly ICON_LIST="•"

View File

@@ -97,10 +97,23 @@ safe_remove() {
debug_log "Removing: $path" debug_log "Removing: $path"
# Perform the deletion # Perform the deletion
if rm -rf "$path" 2> /dev/null; then # SAFE: safe_remove implementation # Use || to capture the exit code so set -e won't abort on rm failures
local error_msg
local rm_exit=0
error_msg=$(rm -rf "$path" 2>&1) || rm_exit=$?
if [[ $rm_exit -eq 0 ]]; then
return 0 return 0
else else
[[ "$silent" != "true" ]] && log_error "Failed to remove: $path" # Check if it's a permission error
if [[ "$error_msg" == *"Permission denied"* ]] || [[ "$error_msg" == *"Operation not permitted"* ]]; then
MOLE_PERMISSION_DENIED_COUNT=${MOLE_PERMISSION_DENIED_COUNT:-0}
MOLE_PERMISSION_DENIED_COUNT=$((MOLE_PERMISSION_DENIED_COUNT + 1))
export MOLE_PERMISSION_DENIED_COUNT
debug_log "Permission denied: $path (may need Full Disk Access)"
else
[[ "$silent" != "true" ]] && log_error "Failed to remove: $path"
fi
return 1 return 1
fi fi
} }
@@ -241,8 +254,10 @@ get_path_size_kb() {
return return
} }
# Direct execution without timeout overhead - critical for performance in loops # Direct execution without timeout overhead - critical for performance in loops
# Use || echo 0 to ensure failure in du (e.g. permission error) doesn't exit script under set -e
# Pipefail would normally cause the pipeline to fail if du fails, but || handle catches it.
local size local size
size=$(command du -sk "$path" 2> /dev/null | awk '{print $1}') size=$(command du -sk "$path" 2> /dev/null | awk '{print $1}' || echo "0")
echo "${size:-0}" echo "${size:-0}"
} }

View File

@@ -81,7 +81,7 @@ log_warning() {
# Log error message # Log error message
log_error() { log_error() {
echo -e "${RED}${ICON_ERROR}${NC} $1" >&2 echo -e "${YELLOW}${ICON_ERROR}${NC} $1" >&2
local timestamp=$(date '+%Y-%m-%d %H:%M:%S') local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] ERROR: $1" >> "$LOG_FILE" 2> /dev/null || true echo "[$timestamp] ERROR: $1" >> "$LOG_FILE" 2> /dev/null || true
if [[ "${MO_DEBUG:-}" == "1" ]]; then if [[ "${MO_DEBUG:-}" == "1" ]]; then

View File

@@ -249,28 +249,35 @@ show_menu_option() {
# Background spinner implementation # Background spinner implementation
INLINE_SPINNER_PID="" INLINE_SPINNER_PID=""
INLINE_SPINNER_STOP_FILE=""
start_inline_spinner() { start_inline_spinner() {
stop_inline_spinner 2> /dev/null || true stop_inline_spinner 2> /dev/null || true
local message="$1" local message="$1"
if [[ -t 1 ]]; then if [[ -t 1 ]]; then
( # Create unique stop flag file for this spinner instance
# Clean exit handler for spinner subprocess (invoked by trap) INLINE_SPINNER_STOP_FILE="${TMPDIR:-/tmp}/mole_spinner_$$_$RANDOM.stop"
# shellcheck disable=SC2329
cleanup_spinner() { exit 0; }
trap cleanup_spinner TERM INT EXIT
(
local stop_file="$INLINE_SPINNER_STOP_FILE"
local chars local chars
chars="$(mo_spinner_chars)" chars="$(mo_spinner_chars)"
[[ -z "$chars" ]] && chars="|/-\\" [[ -z "$chars" ]] && chars="|/-\\"
local i=0 local i=0
while true; do
# Cooperative exit: check for stop file instead of relying on signals
while [[ ! -f "$stop_file" ]]; do
local c="${chars:$((i % ${#chars})):1}" local c="${chars:$((i % ${#chars})):1}"
# Output to stderr to avoid interfering with stdout # Output to stderr to avoid interfering with stdout
printf "\r${MOLE_SPINNER_PREFIX:-}${BLUE}%s${NC} %s" "$c" "$message" >&2 || exit 0 printf "\r${MOLE_SPINNER_PREFIX:-}${BLUE}%s${NC} %s" "$c" "$message" >&2 || break
((i++)) ((i++))
sleep 0.1 sleep 0.1
done done
# Clean up stop file before exiting
rm -f "$stop_file" 2> /dev/null || true
exit 0
) & ) &
INLINE_SPINNER_PID=$! INLINE_SPINNER_PID=$!
disown 2> /dev/null || true disown 2> /dev/null || true
@@ -281,17 +288,30 @@ start_inline_spinner() {
stop_inline_spinner() { stop_inline_spinner() {
if [[ -n "$INLINE_SPINNER_PID" ]]; then if [[ -n "$INLINE_SPINNER_PID" ]]; then
# Try graceful TERM first, then force KILL if needed # Cooperative stop: create stop file to signal spinner to exit
if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then if [[ -n "$INLINE_SPINNER_STOP_FILE" ]]; then
kill -TERM "$INLINE_SPINNER_PID" 2> /dev/null || true touch "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true
sleep 0.1 2> /dev/null || true
# Force kill if still running
if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then
kill -KILL "$INLINE_SPINNER_PID" 2> /dev/null || true
fi
fi fi
# Wait briefly for cooperative exit
local wait_count=0
while kill -0 "$INLINE_SPINNER_PID" 2> /dev/null && [[ $wait_count -lt 5 ]]; do
sleep 0.05 2> /dev/null || true
((wait_count++))
done
# Only use SIGKILL as last resort if process is stuck
if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then
kill -KILL "$INLINE_SPINNER_PID" 2> /dev/null || true
fi
wait "$INLINE_SPINNER_PID" 2> /dev/null || true wait "$INLINE_SPINNER_PID" 2> /dev/null || true
# Cleanup
rm -f "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true
INLINE_SPINNER_PID="" INLINE_SPINNER_PID=""
INLINE_SPINNER_STOP_FILE=""
# Clear the line - use \033[2K to clear entire line, not just to end # Clear the line - use \033[2K to clear entire line, not just to end
[[ -t 1 ]] && printf "\r\033[2K" >&2 || true [[ -t 1 ]] && printf "\r\033[2K" >&2 || true
fi fi
@@ -355,3 +375,60 @@ format_last_used_summary() {
fi fi
echo "$value" echo "$value"
} }
# Check if terminal has Full Disk Access
# Returns 0 if FDA is granted, 1 if denied, 2 if unknown
has_full_disk_access() {
# Cache the result to avoid repeated checks
if [[ -n "${MOLE_HAS_FDA:-}" ]]; then
if [[ "$MOLE_HAS_FDA" == "1" ]]; then
return 0
elif [[ "$MOLE_HAS_FDA" == "unknown" ]]; then
return 2
else
return 1
fi
fi
# Test access to protected directories that require FDA
# Strategy: Try to access directories that are commonly protected
# If ANY of them are accessible, we likely have FDA
# If ALL fail, we definitely don't have FDA
local -a protected_dirs=(
"$HOME/Library/Safari/LocalStorage"
"$HOME/Library/Mail/V10"
"$HOME/Library/Messages/chat.db"
)
local accessible_count=0
local tested_count=0
for test_path in "${protected_dirs[@]}"; do
# Only test when the protected path exists
if [[ -e "$test_path" ]]; then
tested_count=$((tested_count + 1))
# Try to stat the ACTUAL protected path - this requires FDA
if stat "$test_path" > /dev/null 2>&1; then
accessible_count=$((accessible_count + 1))
fi
fi
done
# Three possible outcomes:
# 1. tested_count = 0: Can't determine (test paths don't exist) → unknown
# 2. tested_count > 0 && accessible_count > 0: Has FDA → yes
# 3. tested_count > 0 && accessible_count = 0: No FDA → no
if [[ $tested_count -eq 0 ]]; then
# Can't determine - test paths don't exist, treat as unknown
export MOLE_HAS_FDA="unknown"
return 2
elif [[ $accessible_count -gt 0 ]]; then
# At least one path is accessible → has FDA
export MOLE_HAS_FDA=1
return 0
else
# Tested paths exist but not accessible → no FDA
export MOLE_HAS_FDA=0
return 1
fi
}