mirror of
https://github.com/tw93/Mole.git
synced 2026-02-11 01:19:16 +00:00
feat: Enhance clean and optimize operations with new configuration constants
This commit is contained in:
@@ -38,7 +38,6 @@ clean_code_editors() {
|
||||
safe_clean ~/Library/Application\ Support/Code/Cache/* "VS Code cache"
|
||||
safe_clean ~/Library/Application\ Support/Code/CachedExtensions/* "VS Code extension cache"
|
||||
safe_clean ~/Library/Application\ Support/Code/CachedData/* "VS Code data cache"
|
||||
# safe_clean ~/Library/Caches/JetBrains/* "JetBrains cache"
|
||||
safe_clean ~/Library/Caches/com.sublimetext.*/* "Sublime Text cache"
|
||||
}
|
||||
|
||||
|
||||
@@ -193,55 +193,3 @@ clean_project_caches() {
|
||||
[[ -d "$pycache" ]] && safe_clean "$pycache"/* "Python bytecode cache" || true
|
||||
done < "$pycache_tmp_file"
|
||||
}
|
||||
|
||||
# Clean Spotlight user caches
|
||||
clean_spotlight_caches() {
|
||||
local cleaned_size=0
|
||||
local cleaned_count=0
|
||||
|
||||
# CoreSpotlight user cache (can grow very large, safe to delete)
|
||||
local spotlight_cache="$HOME/Library/Metadata/CoreSpotlight"
|
||||
if [[ -d "$spotlight_cache" ]]; then
|
||||
local size_kb=$(get_path_size_kb "$spotlight_cache")
|
||||
if [[ "$size_kb" -gt 0 ]]; then
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
safe_remove "$spotlight_cache" true && {
|
||||
((cleaned_size += size_kb))
|
||||
((cleaned_count++))
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Spotlight cache ($(bytes_to_human $((size_kb * 1024))))"
|
||||
note_activity
|
||||
}
|
||||
else
|
||||
((cleaned_size += size_kb))
|
||||
echo -e " ${YELLOW}→${NC} Spotlight cache (would clean $(bytes_to_human $((size_kb * 1024))))"
|
||||
note_activity
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Spotlight saved application state
|
||||
local spotlight_state="$HOME/Library/Saved Application State/com.apple.spotlight.Spotlight.savedState"
|
||||
if [[ -d "$spotlight_state" ]]; then
|
||||
local size_kb=$(get_path_size_kb "$spotlight_state")
|
||||
if [[ "$size_kb" -gt 0 ]]; then
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
safe_remove "$spotlight_state" true && {
|
||||
((cleaned_size += size_kb))
|
||||
((cleaned_count++))
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Spotlight state ($(bytes_to_human $((size_kb * 1024))))"
|
||||
note_activity
|
||||
}
|
||||
else
|
||||
((cleaned_size += size_kb))
|
||||
echo -e " ${YELLOW}→${NC} Spotlight state (would clean $(bytes_to_human $((size_kb * 1024))))"
|
||||
note_activity
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ $cleaned_size -gt 0 ]]; then
|
||||
((files_cleaned += cleaned_count))
|
||||
((total_size_cleaned += cleaned_size))
|
||||
((total_items++))
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -245,7 +245,6 @@ clean_dev_api_tools() {
|
||||
# Clean misc dev tools
|
||||
clean_dev_misc() {
|
||||
safe_clean ~/Library/Caches/com.unity3d.*/* "Unity cache"
|
||||
# safe_clean ~/Library/Caches/com.jetbrains.toolbox/* "JetBrains Toolbox cache"
|
||||
safe_clean ~/Library/Caches/com.mongodb.compass/* "MongoDB Compass cache"
|
||||
safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache"
|
||||
safe_clean ~/Library/Caches/com.github.GitHubDesktop/* "GitHub Desktop cache"
|
||||
@@ -314,7 +313,7 @@ clean_developer_tools() {
|
||||
safe_clean "$lock_dir"/* "Homebrew lock files"
|
||||
elif [[ -d "$lock_dir" ]]; then
|
||||
# Directory exists but not writable. Check if empty to avoid noise.
|
||||
if [[ -n "$(ls -A "$lock_dir" 2> /dev/null)" ]]; then
|
||||
if find "$lock_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
|
||||
# Only try sudo ONCE if we really need to, or just skip to avoid spam
|
||||
# Decision: Skip strict system/root owned locks to avoid nag.
|
||||
debug_log "Skipping read-only Homebrew locks in $lock_dir"
|
||||
|
||||
@@ -487,7 +487,7 @@ clean_project_artifacts() {
|
||||
for root in "${search_roots[@]}"; do
|
||||
if [[ "$path" == "$root/"* ]]; then
|
||||
# Remove root prefix and get first directory component
|
||||
local relative_path="${path#$root/}"
|
||||
local relative_path="${path#"$root"/}"
|
||||
# Extract first directory name
|
||||
echo "$relative_path" | cut -d'/' -f1
|
||||
return 0
|
||||
|
||||
@@ -29,14 +29,16 @@ clean_deep_system() {
|
||||
# Clean Library Updates safely - skip if SIP is enabled to avoid error messages
|
||||
# SIP-protected files in /Library/Updates cannot be deleted even with sudo
|
||||
if [[ -d "/Library/Updates" && ! -L "/Library/Updates" ]]; then
|
||||
if is_sip_enabled; then
|
||||
# SIP is enabled, skip /Library/Updates entirely to avoid error messages
|
||||
# These files are system-protected and cannot be removed
|
||||
: # No-op, silently skip
|
||||
else
|
||||
if ! is_sip_enabled; then
|
||||
# SIP is disabled, attempt cleanup with restricted flag check
|
||||
local updates_cleaned=0
|
||||
while IFS= read -r -d '' item; do
|
||||
# Validate path format (must be direct child of /Library/Updates)
|
||||
if [[ -z "$item" ]] || [[ ! "$item" =~ ^/Library/Updates/[^/]+$ ]]; then
|
||||
debug_log "Skipping malformed path: $item"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Skip system-protected files (restricted flag)
|
||||
local item_flags
|
||||
item_flags=$(command stat -f%Sf "$item" 2> /dev/null || echo "")
|
||||
@@ -81,12 +83,22 @@ clean_deep_system() {
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning system caches..."
|
||||
fi
|
||||
local code_sign_cleaned=0
|
||||
local found_count=0
|
||||
|
||||
# Stream processing with progress updates (efficient for large directories)
|
||||
# Reduce timeout to 5s for faster completion when no caches exist
|
||||
while IFS= read -r -d '' cache_dir; do
|
||||
debug_log "Found code sign cache: $cache_dir"
|
||||
if safe_remove "$cache_dir" true; then
|
||||
((code_sign_cleaned++))
|
||||
fi
|
||||
done < <(find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true)
|
||||
((found_count++))
|
||||
|
||||
# Update spinner every 50 items to show progress
|
||||
if [[ -t 1 ]] && ((found_count % 50 == 0)); then
|
||||
stop_inline_spinner
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning system caches... ($found_count found)"
|
||||
fi
|
||||
done < <(run_with_timeout 5 command find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true)
|
||||
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
@@ -103,52 +115,89 @@ clean_deep_system() {
|
||||
log_success "Power logs"
|
||||
}
|
||||
|
||||
# Clean Time Machine failed backups
|
||||
# Clean Time Machine incomplete backups
|
||||
clean_time_machine_failed_backups() {
|
||||
local tm_cleaned=0
|
||||
|
||||
# Check if Time Machine is configured
|
||||
if command -v tmutil > /dev/null 2>&1; then
|
||||
if tmutil destinationinfo 2>&1 | grep -q "No destinations configured"; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No failed Time Machine backups found"
|
||||
return 0
|
||||
# Check if tmutil is available
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Start spinner early (before potentially slow tmutil command)
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking Time Machine configuration..."
|
||||
fi
|
||||
local spinner_active=true
|
||||
|
||||
# Check if Time Machine is configured (with short timeout for faster response)
|
||||
local tm_info
|
||||
tm_info=$(run_with_timeout 2 tmutil destinationinfo 2>&1 || echo "failed")
|
||||
if [[ "$tm_info" == *"No destinations configured"* || "$tm_info" == "failed" ]]; then
|
||||
if [[ "$spinner_active" == "true" && -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ ! -d "/Volumes" ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No failed Time Machine backups found"
|
||||
if [[ "$spinner_active" == "true" && -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Skip if backup is running
|
||||
if pgrep -x "backupd" > /dev/null 2>&1; then
|
||||
if [[ "$spinner_active" == "true" && -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
echo -e " ${YELLOW}!${NC} Time Machine backup in progress, skipping cleanup"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Update spinner message for volume scanning
|
||||
if [[ "$spinner_active" == "true" && -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking backup volumes..."
|
||||
fi
|
||||
|
||||
# Fast pre-scan: check which volumes have Backups.backupdb (avoid expensive tmutil checks)
|
||||
local -a backup_volumes=()
|
||||
for volume in /Volumes/*; do
|
||||
[[ -d "$volume" ]] || continue
|
||||
|
||||
# Skip system and network volumes
|
||||
[[ "$volume" == "/Volumes/MacintoshHD" || "$volume" == "/" ]] && continue
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning backup volumes..."
|
||||
fi
|
||||
|
||||
# Skip if volume is a symlink (security check)
|
||||
[[ -L "$volume" ]] && continue
|
||||
|
||||
# Check if this is a Time Machine destination
|
||||
if command -v tmutil > /dev/null 2>&1; then
|
||||
if ! tmutil destinationinfo 2> /dev/null | grep -q "$(basename "$volume")"; then
|
||||
continue
|
||||
fi
|
||||
# Quick check: does this volume have backup directories?
|
||||
if [[ -d "$volume/Backups.backupdb" ]] || [[ -d "$volume/.MobileBackups" ]]; then
|
||||
backup_volumes+=("$volume")
|
||||
fi
|
||||
done
|
||||
|
||||
local fs_type=$(command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}')
|
||||
# If no backup volumes found, stop spinner and return
|
||||
if [[ ${#backup_volumes[@]} -eq 0 ]]; then
|
||||
if [[ "$spinner_active" == "true" && -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Update spinner message: we have potential backup volumes, now scan them
|
||||
if [[ "$spinner_active" == "true" && -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning backup volumes..."
|
||||
fi
|
||||
for volume in "${backup_volumes[@]}"; do
|
||||
# Skip network volumes (quick check)
|
||||
local fs_type
|
||||
fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "unknown")
|
||||
case "$fs_type" in
|
||||
nfs | smbfs | afpfs | cifs | webdav) continue ;;
|
||||
nfs | smbfs | afpfs | cifs | webdav | unknown) continue ;;
|
||||
esac
|
||||
|
||||
# HFS+ style backups (Backups.backupdb)
|
||||
@@ -157,7 +206,7 @@ clean_time_machine_failed_backups() {
|
||||
while IFS= read -r inprogress_file; do
|
||||
[[ -d "$inprogress_file" ]] || continue
|
||||
|
||||
# Only delete old failed backups (safety window)
|
||||
# Only delete old incomplete backups (safety window)
|
||||
local file_mtime=$(get_file_mtime "$inprogress_file")
|
||||
local current_time=$(date +%s)
|
||||
local hours_old=$(((current_time - file_mtime) / 3600))
|
||||
@@ -169,11 +218,17 @@ clean_time_machine_failed_backups() {
|
||||
local size_kb=$(get_path_size_kb "$inprogress_file")
|
||||
[[ "$size_kb" -le 0 ]] && continue
|
||||
|
||||
# Stop spinner before first output
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
spinner_active=false
|
||||
fi
|
||||
|
||||
local backup_name=$(basename "$inprogress_file")
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}→${NC} Failed backup: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}→${NC} Incomplete backup: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
((tm_cleaned++))
|
||||
note_activity
|
||||
continue
|
||||
@@ -186,7 +241,7 @@ clean_time_machine_failed_backups() {
|
||||
fi
|
||||
|
||||
if tmutil delete "$inprogress_file" 2> /dev/null; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Failed backup: $backup_name ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete backup: $backup_name ${GREEN}($size_human)${NC}"
|
||||
((tm_cleaned++))
|
||||
((files_cleaned++))
|
||||
((total_size_cleaned += size_kb))
|
||||
@@ -211,7 +266,7 @@ clean_time_machine_failed_backups() {
|
||||
while IFS= read -r inprogress_file; do
|
||||
[[ -d "$inprogress_file" ]] || continue
|
||||
|
||||
# Only delete old failed backups (safety window)
|
||||
# Only delete old incomplete backups (safety window)
|
||||
local file_mtime=$(get_file_mtime "$inprogress_file")
|
||||
local current_time=$(date +%s)
|
||||
local hours_old=$(((current_time - file_mtime) / 3600))
|
||||
@@ -223,11 +278,17 @@ clean_time_machine_failed_backups() {
|
||||
local size_kb=$(get_path_size_kb "$inprogress_file")
|
||||
[[ "$size_kb" -le 0 ]] && continue
|
||||
|
||||
# Stop spinner before first output
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
spinner_active=false
|
||||
fi
|
||||
|
||||
local backup_name=$(basename "$inprogress_file")
|
||||
local size_human=$(bytes_to_human "$((size_kb * 1024))")
|
||||
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
echo -e " ${YELLOW}→${NC} Failed APFS backup in $bundle_name: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
echo -e " ${YELLOW}→${NC} Incomplete APFS backup in $bundle_name: $backup_name ${YELLOW}($size_human dry)${NC}"
|
||||
((tm_cleaned++))
|
||||
note_activity
|
||||
continue
|
||||
@@ -239,7 +300,7 @@ clean_time_machine_failed_backups() {
|
||||
fi
|
||||
|
||||
if tmutil delete "$inprogress_file" 2> /dev/null; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Failed APFS backup in $bundle_name: $backup_name ${GREEN}($size_human)${NC}"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${GREEN}($size_human)${NC}"
|
||||
((tm_cleaned++))
|
||||
((files_cleaned++))
|
||||
((total_size_cleaned += size_kb))
|
||||
@@ -251,11 +312,15 @@ clean_time_machine_failed_backups() {
|
||||
done < <(run_with_timeout 15 find "$mounted_path" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true)
|
||||
fi
|
||||
done
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
done
|
||||
|
||||
# Stop spinner if still active (no backups found)
|
||||
if [[ "$spinner_active" == "true" ]]; then
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
fi
|
||||
|
||||
if [[ $tm_cleaned -eq 0 ]]; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No failed Time Machine backups found"
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -3,38 +3,90 @@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Clean user essentials (caches, logs, trash, crash reports)
|
||||
# Clean user essentials (caches, logs, trash)
|
||||
clean_user_essentials() {
|
||||
safe_clean ~/Library/Caches/* "User app cache"
|
||||
safe_clean ~/Library/Logs/* "User app logs"
|
||||
safe_clean ~/.Trash/* "Trash"
|
||||
}
|
||||
|
||||
# Empty trash on mounted volumes
|
||||
if [[ -d "/Volumes" && "$DRY_RUN" != "true" ]]; then
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning external volumes..."
|
||||
fi
|
||||
for volume in /Volumes/*; do
|
||||
[[ -d "$volume" && -d "$volume/.Trashes" && -w "$volume" ]] || continue
|
||||
# Helper: Scan external volumes for cleanup (Trash & DS_Store)
|
||||
scan_external_volumes() {
|
||||
[[ -d "/Volumes" ]] || return 0
|
||||
|
||||
# Skip network volumes
|
||||
local fs_type=$(command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}')
|
||||
case "$fs_type" in
|
||||
nfs | smbfs | afpfs | cifs | webdav) continue ;;
|
||||
esac
|
||||
# Fast pre-check: count non-system external volumes without expensive operations
|
||||
local -a candidate_volumes=()
|
||||
for volume in /Volumes/*; do
|
||||
# Basic checks (directory, writable, not a symlink)
|
||||
[[ -d "$volume" && -w "$volume" && ! -L "$volume" ]] || continue
|
||||
|
||||
# Verify volume is mounted and not a symlink
|
||||
mount | grep -q "on $volume " || continue
|
||||
[[ -L "$volume/.Trashes" ]] && continue
|
||||
# Skip system root if it appears in /Volumes
|
||||
[[ "$volume" == "/" || "$volume" == "/Volumes/Macintosh HD" ]] && continue
|
||||
|
||||
candidate_volumes+=("$volume")
|
||||
done
|
||||
|
||||
# If no external volumes found, return immediately (zero overhead)
|
||||
local volume_count=${#candidate_volumes[@]}
|
||||
[[ $volume_count -eq 0 ]] && return 0
|
||||
|
||||
# We have external volumes, now perform full scan
|
||||
if [[ -t 1 ]]; then
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning $volume_count external volume(s)..."
|
||||
fi
|
||||
|
||||
for volume in "${candidate_volumes[@]}"; do
|
||||
# Skip network volumes with short timeout (reduced from 2s to 1s)
|
||||
local fs_type=""
|
||||
fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "unknown")
|
||||
case "$fs_type" in
|
||||
nfs | smbfs | afpfs | cifs | webdav | unknown) continue ;;
|
||||
esac
|
||||
|
||||
# Verify volume is actually mounted (reduced timeout from 2s to 1s)
|
||||
run_with_timeout 1 mount | grep -q "on $volume " || continue
|
||||
|
||||
# 1. Clean Trash on volume
|
||||
if [[ -d "$volume/.Trashes" && "$DRY_RUN" != "true" ]]; then
|
||||
# Safely iterate and remove each item
|
||||
while IFS= read -r -d '' item; do
|
||||
safe_remove "$item" true || true
|
||||
done < <(command find "$volume/.Trashes" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
|
||||
done
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
fi
|
||||
|
||||
# 2. Clean .DS_Store
|
||||
if [[ "$PROTECT_FINDER_METADATA" != "true" ]]; then
|
||||
clean_ds_store_tree "$volume" "$(basename "$volume") volume (.DS_Store)"
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
}
|
||||
|
||||
# Clean Finder metadata (.DS_Store files)
|
||||
clean_finder_metadata() {
|
||||
if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then
|
||||
note_activity
|
||||
echo -e " ${GRAY}⊘${NC} Finder metadata (protected)"
|
||||
return
|
||||
fi
|
||||
|
||||
clean_ds_store_tree "$HOME" "Home directory (.DS_Store)"
|
||||
}
|
||||
|
||||
# Clean macOS system caches
|
||||
clean_macos_system_caches() {
|
||||
safe_clean ~/Library/Saved\ Application\ State/* "Saved application states"
|
||||
|
||||
# REMOVED: Spotlight cache cleanup can cause system UI issues
|
||||
# Spotlight indexes should be managed by macOS automatically
|
||||
# safe_clean ~/Library/Caches/com.apple.spotlight "Spotlight cache"
|
||||
|
||||
safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache"
|
||||
safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache"
|
||||
safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache"
|
||||
|
||||
# Extra user items
|
||||
safe_clean ~/Library/DiagnosticReports/* "Diagnostic reports"
|
||||
safe_clean ~/Library/Caches/com.apple.QuickLook.thumbnailcache "QuickLook thumbnails"
|
||||
safe_clean ~/Library/Caches/Quick\ Look/* "QuickLook cache"
|
||||
@@ -54,47 +106,6 @@ clean_user_essentials() {
|
||||
safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache"
|
||||
}
|
||||
|
||||
# Clean Finder metadata (.DS_Store files)
|
||||
clean_finder_metadata() {
|
||||
if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then
|
||||
note_activity
|
||||
echo -e " ${GRAY}${ICON_SUCCESS}${NC} Finder metadata (whitelisted)"
|
||||
else
|
||||
clean_ds_store_tree "$HOME" "Home directory (.DS_Store)"
|
||||
|
||||
if [[ -d "/Volumes" ]]; then
|
||||
for volume in /Volumes/*; do
|
||||
[[ -d "$volume" && -w "$volume" ]] || continue
|
||||
|
||||
local fs_type=""
|
||||
fs_type=$(command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}')
|
||||
case "$fs_type" in
|
||||
nfs | smbfs | afpfs | cifs | webdav) continue ;;
|
||||
esac
|
||||
|
||||
clean_ds_store_tree "$volume" "$(basename "$volume") volume (.DS_Store)"
|
||||
done
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean macOS system caches
|
||||
clean_macos_system_caches() {
|
||||
safe_clean ~/Library/Saved\ Application\ State/* "Saved application states"
|
||||
|
||||
# REMOVED: Spotlight cache cleanup can cause system UI issues
|
||||
# Spotlight indexes should be managed by macOS automatically
|
||||
# safe_clean ~/Library/Caches/com.apple.spotlight "Spotlight cache"
|
||||
|
||||
safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache"
|
||||
safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache"
|
||||
safe_clean ~/Library/Caches/com.apple.Safari/Webpage\ Previews/* "Safari webpage previews"
|
||||
safe_clean ~/Library/Application\ Support/CloudDocs/session/db/* "iCloud session cache"
|
||||
safe_clean ~/Library/Caches/com.apple.Safari/fsCachedData/* "Safari cached data"
|
||||
safe_clean ~/Library/Caches/com.apple.WebKit.WebContent/* "WebKit content cache"
|
||||
safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache"
|
||||
}
|
||||
|
||||
# Clean sandboxed app caches
|
||||
clean_sandboxed_app_caches() {
|
||||
safe_clean ~/Library/Containers/com.apple.wallpaper.agent/Data/Library/Caches/* "Wallpaper agent cache"
|
||||
@@ -115,44 +126,7 @@ clean_sandboxed_app_caches() {
|
||||
local found_any=false
|
||||
|
||||
for container_dir in "$containers_dir"/*; do
|
||||
[[ -d "$container_dir" ]] || continue
|
||||
|
||||
# Extract bundle ID and check protection status early
|
||||
local bundle_id=$(basename "$container_dir")
|
||||
local bundle_id_lower=$(echo "$bundle_id" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Check explicit critical system components (case-insensitive regex)
|
||||
if [[ "$bundle_id_lower" =~ backgroundtaskmanagement || "$bundle_id_lower" =~ loginitems || "$bundle_id_lower" =~ systempreferences || "$bundle_id_lower" =~ systemsettings || "$bundle_id_lower" =~ settings || "$bundle_id_lower" =~ preferences || "$bundle_id_lower" =~ controlcenter || "$bundle_id_lower" =~ biometrickit || "$bundle_id_lower" =~ sfl || "$bundle_id_lower" =~ tcc ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if should_protect_data "$bundle_id"; then
|
||||
continue
|
||||
elif should_protect_data "$bundle_id_lower"; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local cache_dir="$container_dir/Data/Library/Caches"
|
||||
# Check if dir exists and has content
|
||||
if [[ -d "$cache_dir" ]]; then
|
||||
# Fast check if empty (avoid expensive size calc on empty dirs)
|
||||
if [[ -n "$(ls -A "$cache_dir" 2> /dev/null)" ]]; then
|
||||
# Get size
|
||||
local size=$(get_path_size_kb "$cache_dir")
|
||||
((total_size += size))
|
||||
found_any=true
|
||||
((cleaned_count++))
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
# Clean contents safely
|
||||
# We know this is a user cache path, so rm -rf is acceptable here
|
||||
# provided we keep the Cache directory itself
|
||||
for item in "${cache_dir:?}"/*; do
|
||||
safe_remove "$item" true || true
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
process_container_cache "$container_dir"
|
||||
done
|
||||
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
@@ -172,6 +146,46 @@ clean_sandboxed_app_caches() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Process a single container cache directory (reduces nesting)
|
||||
process_container_cache() {
|
||||
local container_dir="$1"
|
||||
[[ -d "$container_dir" ]] || return 0
|
||||
|
||||
# Extract bundle ID and check protection status early
|
||||
local bundle_id=$(basename "$container_dir")
|
||||
local bundle_id_lower=$(echo "$bundle_id" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Check explicit critical system components (case-insensitive regex)
|
||||
if [[ "$bundle_id_lower" =~ backgroundtaskmanagement || "$bundle_id_lower" =~ loginitems || "$bundle_id_lower" =~ systempreferences || "$bundle_id_lower" =~ systemsettings || "$bundle_id_lower" =~ settings || "$bundle_id_lower" =~ preferences || "$bundle_id_lower" =~ controlcenter || "$bundle_id_lower" =~ biometrickit || "$bundle_id_lower" =~ sfl || "$bundle_id_lower" =~ tcc ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if should_protect_data "$bundle_id" || should_protect_data "$bundle_id_lower"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local cache_dir="$container_dir/Data/Library/Caches"
|
||||
# Check if dir exists and has content
|
||||
[[ -d "$cache_dir" ]] || return 0
|
||||
|
||||
# Fast check if empty using find (more efficient than ls)
|
||||
if find "$cache_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
|
||||
# Use global variables from caller for tracking
|
||||
local size=$(get_path_size_kb "$cache_dir")
|
||||
((total_size += size))
|
||||
found_any=true
|
||||
((cleaned_count++))
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
# Clean contents safely (rm -rf is restricted by safe_remove)
|
||||
for item in "$cache_dir"/*; do
|
||||
[[ -e "$item" ]] || continue
|
||||
safe_remove "$item" true || true
|
||||
done
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean browser caches (Safari, Chrome, Edge, Firefox, etc.)
|
||||
clean_browsers() {
|
||||
safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache"
|
||||
@@ -193,9 +207,6 @@ clean_browsers() {
|
||||
safe_clean ~/Library/Caches/com.kagi.kagimacOS/* "Orion cache"
|
||||
safe_clean ~/Library/Caches/zen/* "Zen cache"
|
||||
safe_clean ~/Library/Application\ Support/Firefox/Profiles/*/cache2/* "Firefox profile cache"
|
||||
|
||||
# DISABLED: Service Worker CacheStorage scanning (find can hang on large browser profiles)
|
||||
# Browser caches are already cleaned by the safe_clean calls above
|
||||
}
|
||||
|
||||
# Clean cloud storage app caches
|
||||
@@ -271,14 +282,17 @@ clean_application_support_logs() {
|
||||
|
||||
for candidate in "${start_candidates[@]}"; do
|
||||
if [[ -d "$candidate" ]]; then
|
||||
if [[ -n "$(ls -A "$candidate" 2> /dev/null)" ]]; then
|
||||
if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
|
||||
local size=$(get_path_size_kb "$candidate")
|
||||
((total_size += size))
|
||||
((cleaned_count++))
|
||||
found_any=true
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
safe_remove "$candidate"/* true > /dev/null 2>&1 || true
|
||||
for item in "$candidate"/*; do
|
||||
[[ -e "$item" ]] || continue
|
||||
safe_remove "$item" true > /dev/null 2>&1 || true
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -296,14 +310,17 @@ clean_application_support_logs() {
|
||||
|
||||
for candidate in "${gc_candidates[@]}"; do
|
||||
if [[ -d "$candidate" ]]; then
|
||||
if [[ -n "$(ls -A "$candidate" 2> /dev/null)" ]]; then
|
||||
if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
|
||||
local size=$(get_path_size_kb "$candidate")
|
||||
((total_size += size))
|
||||
((cleaned_count++))
|
||||
found_any=true
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
safe_remove "$candidate"/* true > /dev/null 2>&1 || true
|
||||
for item in "$candidate"/*; do
|
||||
[[ -e "$item" ]] || continue
|
||||
safe_remove "$item" true > /dev/null 2>&1 || true
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user