1
0
mirror of https://github.com/tw93/Mole.git synced 2026-02-12 05:54:00 +00:00

Merge branch 'main' into dev

This commit is contained in:
tw93
2026-02-10 14:27:26 +08:00
36 changed files with 2627 additions and 1429 deletions

View File

@@ -21,7 +21,7 @@ jobs:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Cache Homebrew
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v4
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v4
with:
path: |
~/Library/Caches/Homebrew
@@ -74,7 +74,7 @@ jobs:
ref: ${{ (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository && github.head_ref) || github.ref }}
- name: Cache Homebrew
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v4
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v4
with:
path: |
~/Library/Caches/Homebrew

View File

@@ -255,7 +255,22 @@ curl -fsSL https://raw.githubusercontent.com/tw93/Mole/main/scripts/setup-quick-
Adds 5 commands: `clean`, `uninstall`, `optimize`, `analyze`, `status`.
Mole automatically detects your terminal, or set `MO_LAUNCHER_APP=<name>` to override. For Raycast users: if this is your first script directory, add it via Raycast Extensions → Add Script Directory, then run "Reload Script Directories".
### Raycast Setup
After running the script above, **complete these steps in Raycast**:
1. Open Raycast Settings (⌘ + ,)
2. Go to **Extensions****Script Commands**
3. Click **"Add Script Directory"** (or **"+"**)
4. Add path: `~/Library/Application Support/Raycast/script-commands`
5. Search in Raycast for: **"Reload Script Directories"** and run it
6. Done! Search for `mole`, `clean`, or `optimize` to use the commands
> **Note**: The script creates the commands automatically, but Raycast requires you to manually add the script directory. This is a one-time setup.
### Terminal Detection
Mole automatically detects your terminal app (Warp, Ghostty, Alacritty, Kitty, WezTerm, etc.). To override, set `MO_LAUNCHER_APP=<name>` in your environment.
## Community Love

View File

@@ -970,6 +970,7 @@ perform_cleanup() {
start_section "Uninstalled app data"
clean_orphaned_app_data
clean_orphaned_system_services
clean_orphaned_launch_agents
end_section
# ===== 13. Apple Silicon optimizations =====

View File

@@ -392,7 +392,7 @@ main() {
trap handle_interrupt INT TERM
if [[ -t 1 ]]; then
clear
clear_screen
fi
print_header

View File

@@ -5,6 +5,10 @@
set -euo pipefail
# Preserve user's locale for app display name lookup.
readonly MOLE_UNINSTALL_USER_LC_ALL="${LC_ALL:-}"
readonly MOLE_UNINSTALL_USER_LANG="${LANG:-}"
# Fix locale issues on non-English systems.
export LC_ALL=C
export LANG=C
@@ -27,59 +31,371 @@ total_items=0
files_cleaned=0
total_size_cleaned=0
# Scan applications and collect information.
scan_applications() {
# Cache app scan (24h TTL).
local cache_dir="$HOME/.cache/mole"
local cache_file="$cache_dir/app_scan_cache"
local cache_ttl=86400 # 24 hours
local force_rescan="${1:-false}"
readonly MOLE_UNINSTALL_META_CACHE_DIR="$HOME/.cache/mole"
readonly MOLE_UNINSTALL_META_CACHE_FILE="$MOLE_UNINSTALL_META_CACHE_DIR/uninstall_app_metadata_v1"
readonly MOLE_UNINSTALL_META_CACHE_LOCK="${MOLE_UNINSTALL_META_CACHE_FILE}.lock"
readonly MOLE_UNINSTALL_META_REFRESH_TTL=604800 # 7 days
readonly MOLE_UNINSTALL_SCAN_SPINNER_DELAY_SEC="0.15"
readonly MOLE_UNINSTALL_INLINE_METADATA_LIMIT=4
readonly MOLE_UNINSTALL_INLINE_MDLS_TIMEOUT_SEC="0.08"
ensure_user_dir "$cache_dir"
uninstall_relative_time_from_epoch() {
local value_epoch="${1:-0}"
local now_epoch="${2:-0}"
if [[ $force_rescan == false && -f "$cache_file" ]]; then
local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file")))
[[ $cache_age -eq $(get_epoch_seconds) ]] && cache_age=86401 # Handle mtime read failure
if [[ $cache_age -lt $cache_ttl ]]; then
if [[ -t 2 ]]; then
echo -e "${GREEN}Loading from cache...${NC}" >&2
sleep 0.3 # Brief pause so user sees the message
fi
echo "$cache_file"
return 0
if [[ ! "$value_epoch" =~ ^[0-9]+$ || $value_epoch -le 0 ]]; then
echo "..."
return 0
fi
local days_ago=$(((now_epoch - value_epoch) / 86400))
if [[ $days_ago -lt 0 ]]; then
days_ago=0
fi
if [[ $days_ago -eq 0 ]]; then
echo "Today"
elif [[ $days_ago -eq 1 ]]; then
echo "Yesterday"
elif [[ $days_ago -lt 7 ]]; then
echo "${days_ago} days ago"
elif [[ $days_ago -lt 30 ]]; then
local weeks_ago=$((days_ago / 7))
[[ $weeks_ago -eq 1 ]] && echo "1 week ago" || echo "${weeks_ago} weeks ago"
elif [[ $days_ago -lt 365 ]]; then
local months_ago=$((days_ago / 30))
[[ $months_ago -eq 1 ]] && echo "1 month ago" || echo "${months_ago} months ago"
else
local years_ago=$((days_ago / 365))
[[ $years_ago -eq 1 ]] && echo "1 year ago" || echo "${years_ago} years ago"
fi
}
uninstall_resolve_display_name() {
local app_path="$1"
local app_name="$2"
local display_name="$app_name"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
local md_display_name
if [[ -n "$MOLE_UNINSTALL_USER_LC_ALL" ]]; then
md_display_name=$(run_with_timeout 0.04 env LC_ALL="$MOLE_UNINSTALL_USER_LC_ALL" LANG="$MOLE_UNINSTALL_USER_LANG" mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "")
elif [[ -n "$MOLE_UNINSTALL_USER_LANG" ]]; then
md_display_name=$(run_with_timeout 0.04 env LANG="$MOLE_UNINSTALL_USER_LANG" mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "")
else
md_display_name=$(run_with_timeout 0.04 mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "")
fi
local bundle_display_name
bundle_display_name=$(plutil -extract CFBundleDisplayName raw "$app_path/Contents/Info.plist" 2> /dev/null || echo "")
local bundle_name
bundle_name=$(plutil -extract CFBundleName raw "$app_path/Contents/Info.plist" 2> /dev/null || echo "")
if [[ "$md_display_name" == /* ]]; then
md_display_name=""
fi
md_display_name="${md_display_name//|/-}"
md_display_name="${md_display_name//[$'\t\r\n']/}"
bundle_display_name="${bundle_display_name//|/-}"
bundle_display_name="${bundle_display_name//[$'\t\r\n']/}"
bundle_name="${bundle_name//|/-}"
bundle_name="${bundle_name//[$'\t\r\n']/}"
if [[ -n "$md_display_name" && "$md_display_name" != "(null)" && "$md_display_name" != "$app_name" ]]; then
display_name="$md_display_name"
elif [[ -n "$bundle_display_name" && "$bundle_display_name" != "(null)" ]]; then
display_name="$bundle_display_name"
elif [[ -n "$bundle_name" && "$bundle_name" != "(null)" ]]; then
display_name="$bundle_name"
fi
fi
local inline_loading=false
if [[ -t 1 && -t 2 ]]; then
inline_loading=true
printf "\033[2J\033[H" >&2 # Clear screen for inline loading
if [[ "$display_name" == /* ]]; then
display_name="$app_name"
fi
display_name="${display_name%.app}"
display_name="${display_name//|/-}"
display_name="${display_name//[$'\t\r\n']/}"
echo "$display_name"
}
uninstall_acquire_metadata_lock() {
local lock_dir="$1"
local attempts=0
while ! mkdir "$lock_dir" 2> /dev/null; do
((attempts++))
if [[ $attempts -ge 40 ]]; then
return 1
fi
# Clean stale lock if older than 5 minutes.
if [[ -d "$lock_dir" ]]; then
local lock_mtime
lock_mtime=$(get_file_mtime "$lock_dir")
# Skip stale detection if mtime lookup failed (returns 0).
if [[ "$lock_mtime" =~ ^[0-9]+$ && $lock_mtime -gt 0 ]]; then
local lock_age
lock_age=$(($(get_epoch_seconds) - lock_mtime))
if [[ "$lock_age" =~ ^-?[0-9]+$ && $lock_age -gt 300 ]]; then
rmdir "$lock_dir" 2> /dev/null || true
fi
fi
fi
sleep 0.1 2> /dev/null || sleep 1
done
return 0
}
uninstall_release_metadata_lock() {
local lock_dir="$1"
[[ -d "$lock_dir" ]] && rmdir "$lock_dir" 2> /dev/null || true
}
uninstall_collect_inline_metadata() {
local app_path="$1"
local app_mtime="${2:-0}"
local now_epoch="${3:-0}"
local size_kb
size_kb=$(get_path_size_kb "$app_path")
[[ "$size_kb" =~ ^[0-9]+$ ]] || size_kb=0
local last_used_epoch=0
local metadata_date
metadata_date=$(run_with_timeout "$MOLE_UNINSTALL_INLINE_MDLS_TIMEOUT_SEC" mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then
last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0")
fi
local temp_file
# Fallback to app mtime so first scan does not show "...".
if [[ ! "$last_used_epoch" =~ ^[0-9]+$ || $last_used_epoch -le 0 ]]; then
if [[ "$app_mtime" =~ ^[0-9]+$ && $app_mtime -gt 0 ]]; then
last_used_epoch="$app_mtime"
else
last_used_epoch=0
fi
fi
printf "%s|%s|%s\n" "$size_kb" "$last_used_epoch" "$now_epoch"
}
start_uninstall_metadata_refresh() {
local refresh_file="$1"
[[ ! -s "$refresh_file" ]] && {
rm -f "$refresh_file" 2> /dev/null || true
return 0
}
(
_refresh_debug() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
local ts
ts=$(date "+%Y-%m-%d %H:%M:%S" 2> /dev/null || echo "?")
echo "[$ts] DEBUG: [metadata-refresh] $*" >> "${HOME}/.config/mole/mole_debug_session.log" 2> /dev/null || true
fi
}
ensure_user_dir "$MOLE_UNINSTALL_META_CACHE_DIR"
ensure_user_file "$MOLE_UNINSTALL_META_CACHE_FILE"
if [[ ! -r "$MOLE_UNINSTALL_META_CACHE_FILE" ]]; then
if ! : > "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null; then
_refresh_debug "Cannot create cache file, aborting"
exit 0
fi
fi
if [[ ! -w "$MOLE_UNINSTALL_META_CACHE_FILE" ]]; then
_refresh_debug "Cache file not writable, aborting"
exit 0
fi
local updates_file
updates_file=$(mktemp 2> /dev/null) || {
_refresh_debug "mktemp failed, aborting"
exit 0
}
local now_epoch
now_epoch=$(get_epoch_seconds)
local max_parallel
max_parallel=$(get_optimal_parallel_jobs "io")
if [[ ! "$max_parallel" =~ ^[0-9]+$ || $max_parallel -lt 1 ]]; then
max_parallel=1
elif [[ $max_parallel -gt 4 ]]; then
max_parallel=4
fi
local -a worker_pids=()
local worker_idx=0
while IFS='|' read -r app_path app_mtime bundle_id display_name; do
[[ -n "$app_path" && -d "$app_path" ]] || continue
((worker_idx++))
local worker_output="${updates_file}.${worker_idx}"
(
local last_used_epoch=0
local metadata_date
metadata_date=$(run_with_timeout 0.2 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then
last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0")
fi
if [[ ! "$last_used_epoch" =~ ^[0-9]+$ || $last_used_epoch -le 0 ]]; then
last_used_epoch=0
fi
local size_kb
size_kb=$(get_path_size_kb "$app_path")
[[ "$size_kb" =~ ^[0-9]+$ ]] || size_kb=0
printf "%s|%s|%s|%s|%s|%s|%s\n" "$app_path" "${app_mtime:-0}" "$size_kb" "${last_used_epoch:-0}" "$now_epoch" "$bundle_id" "$display_name" > "$worker_output"
) &
worker_pids+=($!)
if ((${#worker_pids[@]} >= max_parallel)); then
wait "${worker_pids[0]}" 2> /dev/null || true
worker_pids=("${worker_pids[@]:1}")
fi
done < "$refresh_file"
local worker_pid
for worker_pid in "${worker_pids[@]}"; do
wait "$worker_pid" 2> /dev/null || true
done
local worker_output
for worker_output in "${updates_file}".*; do
[[ -f "$worker_output" ]] || continue
cat "$worker_output" >> "$updates_file"
rm -f "$worker_output"
done
if [[ ! -s "$updates_file" ]]; then
rm -f "$updates_file"
exit 0
fi
if ! uninstall_acquire_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK"; then
_refresh_debug "Failed to acquire lock, aborting merge"
rm -f "$updates_file"
exit 0
fi
local merged_file
merged_file=$(mktemp 2> /dev/null) || {
_refresh_debug "mktemp for merge failed, aborting"
uninstall_release_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK"
rm -f "$updates_file"
exit 0
}
awk -F'|' '
NR == FNR { updates[$1] = $0; next }
!($1 in updates) { print }
END {
for (path in updates) {
print updates[path]
}
}
' "$updates_file" "$MOLE_UNINSTALL_META_CACHE_FILE" > "$merged_file"
mv "$merged_file" "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null || {
cp "$merged_file" "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null || true
rm -f "$merged_file"
}
uninstall_release_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK"
rm -f "$updates_file"
rm -f "$refresh_file" 2> /dev/null || true
) > /dev/null 2>&1 &
}
# Scan applications and collect information.
scan_applications() {
local temp_file scan_raw_file merged_file refresh_file cache_snapshot_file
temp_file=$(create_temp_file)
scan_raw_file="${temp_file}.scan"
merged_file="${temp_file}.merged"
refresh_file="${temp_file}.refresh"
cache_snapshot_file="${temp_file}.cache"
: > "$scan_raw_file"
: > "$refresh_file"
: > "$cache_snapshot_file"
ensure_user_dir "$MOLE_UNINSTALL_META_CACHE_DIR"
ensure_user_file "$MOLE_UNINSTALL_META_CACHE_FILE"
local cache_source="$MOLE_UNINSTALL_META_CACHE_FILE"
local cache_source_is_temp=false
if [[ ! -r "$cache_source" ]]; then
cache_source=$(create_temp_file)
: > "$cache_source"
cache_source_is_temp=true
fi
# Fast lookup cache for unchanged apps: path+mtime -> bundle_id/display_name.
local -a cache_paths=()
local -a cache_mtimes=()
local -a cache_bundle_ids=()
local -a cache_display_names=()
local cache_path cache_mtime _cache_size _cache_epoch _cache_updated cache_bundle cache_display
while IFS='|' read -r cache_path cache_mtime _cache_size _cache_epoch _cache_updated cache_bundle cache_display; do
[[ -n "$cache_path" ]] || continue
cache_paths+=("$cache_path")
cache_mtimes+=("${cache_mtime:-0}")
cache_bundle_ids+=("${cache_bundle:-}")
cache_display_names+=("${cache_display:-}")
done < "$cache_source"
lookup_cached_identity() {
local target_path="$1"
local target_mtime="$2"
local idx
for ((idx = 0; idx < ${#cache_paths[@]}; idx++)); do
if [[ "${cache_paths[idx]}" == "$target_path" ]]; then
if [[ "${cache_mtimes[idx]:-0}" == "${target_mtime:-0}" ]]; then
echo "${cache_bundle_ids[idx]:-}|${cache_display_names[idx]:-}"
else
echo "|"
fi
return 0
fi
done
echo "|"
}
# Local spinner_pid for cleanup
local spinner_pid=""
local spinner_shown_file="${temp_file}.spinner_shown"
local previous_int_trap=""
previous_int_trap=$(trap -p INT || true)
restore_scan_int_trap() {
if [[ -n "$previous_int_trap" ]]; then
eval "$previous_int_trap"
else
trap - INT
fi
}
# Trap to handle Ctrl+C during scan
local scan_interrupted=false
# shellcheck disable=SC2329 # Function invoked indirectly via trap
trap_scan_cleanup() {
scan_interrupted=true
if [[ -n "$spinner_pid" ]]; then
kill -TERM "$spinner_pid" 2> /dev/null || true
wait "$spinner_pid" 2> /dev/null || true
fi
printf "\r\033[K" >&2
rm -f "$temp_file" "${temp_file}.sorted" "${temp_file}.progress" 2> /dev/null || true
if [[ -f "$spinner_shown_file" ]]; then
printf "\r\033[K" >&2
fi
rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$refresh_file" "$cache_snapshot_file" "${temp_file}.sorted" "${temp_file}.progress" "$spinner_shown_file" 2> /dev/null || true
exit 130
}
trap trap_scan_cleanup INT
local current_epoch
current_epoch=$(get_epoch_seconds)
# Pass 1: collect app paths and bundle IDs (no mdls).
local -a app_data_tuples=()
local -a app_dirs=(
@@ -122,28 +438,45 @@ scan_applications() {
continue
fi
# Bundle ID from plist (fast path).
local bundle_id="unknown"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown")
if [[ -L "$app_path" ]]; then
local link_target
link_target=$(readlink "$app_path" 2> /dev/null)
if [[ -n "$link_target" ]]; then
local resolved_target="$link_target"
if [[ "$link_target" != /* ]]; then
local link_dir
link_dir=$(dirname "$app_path")
resolved_target=$(cd "$link_dir" 2> /dev/null && cd "$(dirname "$link_target")" 2> /dev/null && pwd)/$(basename "$link_target") 2> /dev/null || echo ""
fi
case "$resolved_target" in
/System/* | /usr/bin/* | /usr/lib/* | /bin/* | /sbin/* | /private/etc/*)
continue
;;
esac
fi
fi
if should_protect_from_uninstall "$bundle_id"; then
continue
fi
local app_mtime
app_mtime=$(get_file_mtime "$app_path")
# Store tuple for pass 2 (metadata + size).
app_data_tuples+=("${app_path}|${app_name}|${bundle_id}")
local cached_identity cached_bundle_id cached_display_name
cached_identity=$(lookup_cached_identity "$app_path" "$app_mtime")
IFS='|' read -r cached_bundle_id cached_display_name <<< "$cached_identity"
# Store tuple for pass 2 (bundle + display resolution, then cache merge).
app_data_tuples+=("${app_path}|${app_name}|${app_mtime}|${cached_bundle_id}|${cached_display_name}")
done < <(command find "$app_dir" -name "*.app" -maxdepth 3 -print0 2> /dev/null)
done
if [[ ${#app_data_tuples[@]} -eq 0 ]]; then
rm -f "$temp_file"
rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$refresh_file" "$cache_snapshot_file" "${temp_file}.sorted" "${temp_file}.progress" "$spinner_shown_file" 2> /dev/null || true
[[ $cache_source_is_temp == true ]] && rm -f "$cache_source" 2> /dev/null || true
restore_scan_int_trap
printf "\r\033[K" >&2
echo "No applications found to uninstall." >&2
return 1
fi
# Pass 2: metadata + size in parallel (mdls is slow).
# Pass 2: resolve display names in parallel.
local app_count=0
local total_apps=${#app_data_tuples[@]}
local max_parallel
@@ -158,97 +491,33 @@ scan_applications() {
process_app_metadata() {
local app_data_tuple="$1"
local output_file="$2"
local current_epoch="$3"
IFS='|' read -r app_path app_name bundle_id <<< "$app_data_tuple"
IFS='|' read -r app_path app_name app_mtime cached_bundle_id cached_display_name <<< "$app_data_tuple"
# Display name priority: mdls display name → bundle display → bundle name → folder.
local display_name="$app_name"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
local md_display_name
md_display_name=$(run_with_timeout 0.05 mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "")
local bundle_display_name
bundle_display_name=$(plutil -extract CFBundleDisplayName raw "$app_path/Contents/Info.plist" 2> /dev/null)
local bundle_name
bundle_name=$(plutil -extract CFBundleName raw "$app_path/Contents/Info.plist" 2> /dev/null)
if [[ "$md_display_name" == /* ]]; then md_display_name=""; fi
md_display_name="${md_display_name//|/-}"
md_display_name="${md_display_name//[$'\t\r\n']/}"
bundle_display_name="${bundle_display_name//|/-}"
bundle_display_name="${bundle_display_name//[$'\t\r\n']/}"
bundle_name="${bundle_name//|/-}"
bundle_name="${bundle_name//[$'\t\r\n']/}"
if [[ -n "$md_display_name" && "$md_display_name" != "(null)" && "$md_display_name" != "$app_name" ]]; then
display_name="$md_display_name"
elif [[ -n "$bundle_display_name" && "$bundle_display_name" != "(null)" ]]; then
display_name="$bundle_display_name"
elif [[ -n "$bundle_name" && "$bundle_name" != "(null)" ]]; then
display_name="$bundle_name"
local bundle_id="${cached_bundle_id:-}"
if [[ -z "$bundle_id" ]]; then
bundle_id="unknown"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown")
fi
fi
if [[ "$display_name" == /* ]]; then
display_name="$app_name"
if should_protect_from_uninstall "$bundle_id"; then
return 0
fi
local display_name="${cached_display_name:-}"
if [[ -z "$display_name" ]]; then
display_name=$(uninstall_resolve_display_name "$app_path" "$app_name")
fi
display_name="${display_name%.app}"
display_name="${display_name//|/-}"
display_name="${display_name//[$'\t\r\n']/}"
# App size (KB → human).
local app_size="N/A"
local app_size_kb="0"
if [[ -d "$app_path" ]]; then
app_size_kb=$(get_path_size_kb "$app_path")
app_size=$(bytes_to_human "$((app_size_kb * 1024))")
fi
# Last used: mdls (fast timeout) → mtime.
local last_used="Never"
local last_used_epoch=0
if [[ -d "$app_path" ]]; then
local metadata_date
metadata_date=$(run_with_timeout 0.1 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then
last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0")
fi
if [[ "$last_used_epoch" -eq 0 ]]; then
last_used_epoch=$(get_file_mtime "$app_path")
fi
if [[ $last_used_epoch -gt 0 ]]; then
local days_ago=$(((current_epoch - last_used_epoch) / 86400))
if [[ $days_ago -eq 0 ]]; then
last_used="Today"
elif [[ $days_ago -eq 1 ]]; then
last_used="Yesterday"
elif [[ $days_ago -lt 7 ]]; then
last_used="${days_ago} days ago"
elif [[ $days_ago -lt 30 ]]; then
local weeks_ago=$((days_ago / 7))
[[ $weeks_ago -eq 1 ]] && last_used="1 week ago" || last_used="${weeks_ago} weeks ago"
elif [[ $days_ago -lt 365 ]]; then
local months_ago=$((days_ago / 30))
[[ $months_ago -eq 1 ]] && last_used="1 month ago" || last_used="${months_ago} months ago"
else
local years_ago=$((days_ago / 365))
[[ $years_ago -eq 1 ]] && last_used="1 year ago" || last_used="${years_ago} years ago"
fi
fi
fi
echo "${last_used_epoch}|${app_path}|${display_name}|${bundle_id}|${app_size}|${last_used}|${app_size_kb}" >> "$output_file"
echo "${app_path}|${display_name}|${bundle_id}|${app_mtime}" >> "$output_file"
}
export -f process_app_metadata
local progress_file="${temp_file}.progress"
echo "0" > "$progress_file"
@@ -256,16 +525,15 @@ scan_applications() {
# shellcheck disable=SC2329 # Function invoked indirectly via trap
cleanup_spinner() { exit 0; }
trap cleanup_spinner TERM INT EXIT
sleep "$MOLE_UNINSTALL_SCAN_SPINNER_DELAY_SEC" 2> /dev/null || sleep 1
[[ -f "$progress_file" ]] || exit 0
local spinner_chars="|/-\\"
local i=0
: > "$spinner_shown_file"
while true; do
local completed=$(cat "$progress_file" 2> /dev/null || echo 0)
local c="${spinner_chars:$((i % 4)):1}"
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K%s Scanning applications... %d/%d\n" "$c" "$completed" "$total_apps" >&2
else
printf "\r\033[K%s Scanning applications... %d/%d" "$c" "$completed" "$total_apps" >&2
fi
printf "\r\033[K%s Scanning applications... %d/%d" "$c" "$completed" "$total_apps" >&2
((i++))
sleep 0.1 2> /dev/null || sleep 1
done
@@ -274,7 +542,7 @@ scan_applications() {
for app_data_tuple in "${app_data_tuples[@]}"; do
((app_count++))
process_app_metadata "$app_data_tuple" "$temp_file" "$current_epoch" &
process_app_metadata "$app_data_tuple" "$scan_raw_file" &
pids+=($!)
echo "$app_count" > "$progress_file"
@@ -292,47 +560,146 @@ scan_applications() {
kill -TERM "$spinner_pid" 2> /dev/null || true
wait "$spinner_pid" 2> /dev/null || true
fi
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K" >&2
else
if [[ -f "$spinner_shown_file" ]]; then
echo -ne "\r\033[K" >&2
fi
rm -f "$progress_file"
rm -f "$progress_file" "$spinner_shown_file"
if [[ ! -s "$temp_file" ]]; then
if [[ ! -s "$scan_raw_file" ]]; then
echo "No applications found to uninstall" >&2
rm -f "$temp_file"
rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$refresh_file" "$cache_snapshot_file" "${temp_file}.sorted" "${temp_file}.progress" "$spinner_shown_file" 2> /dev/null || true
[[ $cache_source_is_temp == true ]] && rm -f "$cache_source" 2> /dev/null || true
restore_scan_int_trap
return 1
fi
if [[ $total_apps -gt 50 ]]; then
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2KProcessing %d applications...\n" "$total_apps" >&2
printf "\rProcessing %d applications... " "$total_apps" >&2
fi
awk -F'|' '
NR == FNR {
cache_mtime[$1] = $2
cache_size[$1] = $3
cache_epoch[$1] = $4
cache_updated[$1] = $5
cache_bundle[$1] = $6
cache_display[$1] = $7
next
}
{
print $0 "|" cache_mtime[$1] "|" cache_size[$1] "|" cache_epoch[$1] "|" cache_updated[$1] "|" cache_bundle[$1] "|" cache_display[$1]
}
' "$cache_source" "$scan_raw_file" > "$merged_file"
if [[ ! -s "$merged_file" && -s "$scan_raw_file" ]]; then
awk '{print $0 "||||||"}' "$scan_raw_file" > "$merged_file"
fi
local current_epoch
current_epoch=$(get_epoch_seconds)
local inline_metadata_count=0
while IFS='|' read -r app_path display_name bundle_id app_mtime cached_mtime cached_size_kb cached_epoch cached_updated_epoch cached_bundle_id cached_display_name; do
[[ -n "$app_path" && -e "$app_path" ]] || continue
local cache_match=false
if [[ -n "$cached_mtime" && -n "$app_mtime" && "$cached_mtime" == "$app_mtime" ]]; then
cache_match=true
fi
local final_epoch=0
if [[ "$cached_epoch" =~ ^[0-9]+$ && $cached_epoch -gt 0 ]]; then
final_epoch="$cached_epoch"
fi
local final_size_kb=0
local final_size="..."
if [[ "$cached_size_kb" =~ ^[0-9]+$ && $cached_size_kb -gt 0 ]]; then
final_size_kb="$cached_size_kb"
final_size=$(bytes_to_human "$((cached_size_kb * 1024))")
fi
local final_last_used
final_last_used=$(uninstall_relative_time_from_epoch "$final_epoch" "$current_epoch")
local needs_refresh=false
if [[ $cache_match == false ]]; then
needs_refresh=true
elif [[ ! "$cached_size_kb" =~ ^[0-9]+$ || $cached_size_kb -le 0 ]]; then
needs_refresh=true
elif [[ ! "$cached_epoch" =~ ^[0-9]+$ || $cached_epoch -le 0 ]]; then
needs_refresh=true
elif [[ ! "$cached_updated_epoch" =~ ^[0-9]+$ ]]; then
needs_refresh=true
elif [[ -z "$cached_bundle_id" || -z "$cached_display_name" ]]; then
needs_refresh=true
else
printf "\rProcessing %d applications... " "$total_apps" >&2
local cache_age=$((current_epoch - cached_updated_epoch))
if [[ $cache_age -gt $MOLE_UNINSTALL_META_REFRESH_TTL ]]; then
needs_refresh=true
fi
fi
if [[ $needs_refresh == true ]]; then
if [[ $inline_metadata_count -lt $MOLE_UNINSTALL_INLINE_METADATA_LIMIT ]]; then
local inline_metadata inline_size_kb inline_epoch inline_updated_epoch
inline_metadata=$(uninstall_collect_inline_metadata "$app_path" "${app_mtime:-0}" "$current_epoch")
IFS='|' read -r inline_size_kb inline_epoch inline_updated_epoch <<< "$inline_metadata"
((inline_metadata_count++))
if [[ "$inline_size_kb" =~ ^[0-9]+$ && $inline_size_kb -gt 0 ]]; then
final_size_kb="$inline_size_kb"
final_size=$(bytes_to_human "$((inline_size_kb * 1024))")
fi
if [[ "$inline_epoch" =~ ^[0-9]+$ && $inline_epoch -gt 0 ]]; then
final_epoch="$inline_epoch"
final_last_used=$(uninstall_relative_time_from_epoch "$final_epoch" "$current_epoch")
fi
if [[ "$inline_updated_epoch" =~ ^[0-9]+$ && $inline_updated_epoch -gt 0 ]]; then
cached_updated_epoch="$inline_updated_epoch"
fi
fi
printf "%s|%s|%s|%s\n" "$app_path" "${app_mtime:-0}" "$bundle_id" "$display_name" >> "$refresh_file"
fi
local persist_updated_epoch=0
if [[ "$cached_updated_epoch" =~ ^[0-9]+$ && $cached_updated_epoch -gt 0 ]]; then
persist_updated_epoch="$cached_updated_epoch"
fi
printf "%s|%s|%s|%s|%s|%s|%s\n" "$app_path" "${app_mtime:-0}" "${final_size_kb:-0}" "${final_epoch:-0}" "${persist_updated_epoch:-0}" "$bundle_id" "$display_name" >> "$cache_snapshot_file"
echo "${final_epoch}|${app_path}|${display_name}|${bundle_id}|${final_size}|${final_last_used}|${final_size_kb}" >> "$temp_file"
done < "$merged_file"
if [[ -s "$cache_snapshot_file" ]]; then
if uninstall_acquire_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK"; then
mv "$cache_snapshot_file" "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null || {
cp "$cache_snapshot_file" "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null || true
rm -f "$cache_snapshot_file"
}
uninstall_release_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK"
fi
fi
sort -t'|' -k1,1n "$temp_file" > "${temp_file}.sorted" || {
rm -f "$temp_file"
rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$refresh_file" "$cache_snapshot_file"
[[ $cache_source_is_temp == true ]] && rm -f "$cache_source" 2> /dev/null || true
restore_scan_int_trap
return 1
}
rm -f "$temp_file"
rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$cache_snapshot_file"
[[ $cache_source_is_temp == true ]] && rm -f "$cache_source" 2> /dev/null || true
if [[ $total_apps -gt 50 ]]; then
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K" >&2
else
printf "\r\033[K" >&2
fi
fi
[[ $total_apps -gt 50 ]] && printf "\r\033[K" >&2
ensure_user_file "$cache_file"
cp "${temp_file}.sorted" "$cache_file" 2> /dev/null || true
start_uninstall_metadata_refresh "$refresh_file"
if [[ -f "${temp_file}.sorted" ]]; then
restore_scan_int_trap
echo "${temp_file}.sorted"
return 0
else
restore_scan_int_trap
return 1
fi
}
@@ -387,7 +754,6 @@ main() {
export MOLE_CURRENT_COMMAND="uninstall"
log_operation_session_start "uninstall"
local force_rescan=false
# Global flags
for arg in "$@"; do
case "$arg" in
@@ -397,68 +763,21 @@ main() {
esac
done
local use_inline_loading=false
if [[ -t 1 && -t 2 ]]; then
use_inline_loading=true
fi
hide_cursor
while true; do
local needs_scanning=true
local cache_file="$HOME/.cache/mole/app_scan_cache"
if [[ $force_rescan == false && -f "$cache_file" ]]; then
local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file")))
[[ $cache_age -eq $(get_epoch_seconds) ]] && cache_age=86401
[[ $cache_age -lt 86400 ]] && needs_scanning=false
fi
if [[ $needs_scanning == true && $use_inline_loading == true ]]; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" != "1" ]]; then
enter_alt_screen
export MOLE_ALT_SCREEN_ACTIVE=1
export MOLE_INLINE_LOADING=1
export MOLE_MANAGED_ALT_SCREEN=1
fi
printf "\033[2J\033[H" >&2
else
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN MOLE_ALT_SCREEN_ACTIVE
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
fi
fi
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
local apps_file=""
if ! apps_file=$(scan_applications "$force_rescan"); then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
printf "\033[2J\033[H" >&2
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
if ! apps_file=$(scan_applications); then
return 1
fi
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
printf "\033[2J\033[H" >&2
fi
if [[ ! -f "$apps_file" ]]; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
return 1
fi
if ! load_applications "$apps_file"; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
rm -f "$apps_file"
return 1
fi
@@ -469,30 +788,14 @@ main() {
set -e
if [[ $exit_code -ne 0 ]]; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
show_cursor
clear_screen
printf '\033[2J\033[H' >&2
rm -f "$apps_file"
if [[ $exit_code -eq 10 ]]; then
force_rescan=true
continue
fi
return 0
fi
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
show_cursor
clear_screen
printf '\033[2J\033[H' >&2
@@ -512,13 +815,15 @@ main() {
local name_width=$(get_display_width "$app_name")
[[ $name_width -gt $max_name_display_width ]] && max_name_display_width=$name_width
local size_display="$size"
[[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]] && size_display="Unknown"
[[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" || "$size_display" == "Unknown" ]] && size_display="..."
[[ ${#size_display} -gt $max_size_width ]] && max_size_width=${#size_display}
local last_display=$(format_last_used_summary "$last_used")
[[ -z "$last_display" || "$last_display" == "Unknown" || "$last_display" == "Never" ]] && last_display="..."
[[ ${#last_display} -gt $max_last_width ]] && max_last_width=${#last_display}
done
((max_size_width < 5)) && max_size_width=5
((max_last_width < 5)) && max_last_width=5
((max_name_display_width < 16)) && max_name_display_width=16
local term_width=$(tput cols 2> /dev/null || echo 100)
local available_for_name=$((term_width - 17 - max_size_width - max_last_width))
@@ -550,12 +855,13 @@ main() {
[[ $current_width -gt $max_name_display_width ]] && max_name_display_width=$current_width
local size_display="$size"
if [[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]]; then
size_display="Unknown"
if [[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" || "$size_display" == "Unknown" ]]; then
size_display="..."
fi
local last_display
last_display=$(format_last_used_summary "$last_used")
[[ -z "$last_display" || "$last_display" == "Unknown" || "$last_display" == "Never" ]] && last_display="..."
summary_rows+=("$display_name|$size_display|$last_display")
done
@@ -591,7 +897,6 @@ main() {
return 0
fi
force_rescan=false
done
}

View File

@@ -1,666 +0,0 @@
#!/bin/bash
# Mole - Uninstall Module
# Interactive application uninstaller with keyboard navigation
#
# Usage:
# uninstall.sh # Launch interactive uninstaller
# uninstall.sh --force-rescan # Rescan apps and refresh cache
set -euo pipefail
# Fix locale issues (avoid Perl warnings on non-English systems)
export LC_ALL=C
export LANG=C
# Get script directory and source common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/../lib/core/common.sh"
source "$SCRIPT_DIR/../lib/ui/menu_paginated.sh"
source "$SCRIPT_DIR/../lib/ui/app_selector.sh"
source "$SCRIPT_DIR/../lib/uninstall/batch.sh"
# Note: Bundle preservation logic is now in lib/core/common.sh
# Initialize global variables
selected_apps=() # Global array for app selection
declare -a apps_data=()
declare -a selection_state=()
total_items=0
files_cleaned=0
total_size_cleaned=0
# Compact the "last used" descriptor for aligned summaries
format_last_used_summary() {
local value="$1"
case "$value" in
"" | "Unknown")
echo "Unknown"
return 0
;;
"Never" | "Recent" | "Today" | "Yesterday" | "This year" | "Old")
echo "$value"
return 0
;;
esac
if [[ $value =~ ^([0-9]+)[[:space:]]+days?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}d ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+weeks?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}w ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+months?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}m ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+month\(s\)\ ago$ ]]; then
echo "${BASH_REMATCH[1]}m ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+years?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}y ago"
return 0
fi
echo "$value"
}
# Scan applications and collect information
scan_applications() {
# Simplified cache: only check timestamp (24h TTL)
local cache_dir="$HOME/.cache/mole"
local cache_file="$cache_dir/app_scan_cache"
local cache_ttl=86400 # 24 hours
local force_rescan="${1:-false}"
ensure_user_dir "$cache_dir"
# Check if cache exists and is fresh
if [[ $force_rescan == false && -f "$cache_file" ]]; then
local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file")))
[[ $cache_age -eq $(get_epoch_seconds) ]] && cache_age=86401 # Handle missing file
if [[ $cache_age -lt $cache_ttl ]]; then
# Cache hit - return immediately
# Show brief flash of cache usage if in interactive mode
if [[ -t 2 ]]; then
echo -e "${GREEN}Loading from cache...${NC}" >&2
# Small sleep to let user see it (optional, but good for "feeling" the speed vs glitch)
sleep 0.3
fi
echo "$cache_file"
return 0
fi
fi
# Cache miss - prepare for scanning
local inline_loading=false
if [[ -t 1 && -t 2 ]]; then
inline_loading=true
# Clear screen for inline loading
printf "\033[2J\033[H" >&2
fi
local temp_file
temp_file=$(create_temp_file)
# Pre-cache current epoch to avoid repeated calls
local current_epoch
current_epoch=$(get_epoch_seconds)
# First pass: quickly collect all valid app paths and bundle IDs (NO mdls calls)
local -a app_data_tuples=()
local -a app_dirs=(
"/Applications"
"$HOME/Applications"
)
local vol_app_dir
local nullglob_was_set=0
shopt -q nullglob && nullglob_was_set=1
shopt -s nullglob
for vol_app_dir in /Volumes/*/Applications; do
[[ -d "$vol_app_dir" && -r "$vol_app_dir" ]] || continue
if [[ -d "/Applications" && "$vol_app_dir" -ef "/Applications" ]]; then
continue
fi
if [[ -d "$HOME/Applications" && "$vol_app_dir" -ef "$HOME/Applications" ]]; then
continue
fi
app_dirs+=("$vol_app_dir")
done
if [[ $nullglob_was_set -eq 0 ]]; then
shopt -u nullglob
fi
for app_dir in "${app_dirs[@]}"; do
if [[ ! -d "$app_dir" ]]; then continue; fi
while IFS= read -r -d '' app_path; do
if [[ ! -e "$app_path" ]]; then continue; fi
local app_name
app_name=$(basename "$app_path" .app)
# Skip nested apps (e.g. inside Wrapper/ or Frameworks/ of another app)
# Check if parent path component ends in .app (e.g. /Foo.app/Bar.app or /Foo.app/Contents/Bar.app)
# This prevents false positives like /Old.apps/Target.app
local parent_dir
parent_dir=$(dirname "$app_path")
if [[ "$parent_dir" == *".app" || "$parent_dir" == *".app/"* ]]; then
continue
fi
# Get bundle ID only (fast, no mdls calls in first pass)
local bundle_id="unknown"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown")
fi
# Skip system critical apps (input methods, system components)
if should_protect_from_uninstall "$bundle_id"; then
continue
fi
# Store tuple: app_path|app_name|bundle_id (display_name will be resolved in parallel later)
app_data_tuples+=("${app_path}|${app_name}|${bundle_id}")
done < <(command find "$app_dir" -name "*.app" -maxdepth 3 -print0 2> /dev/null)
done
# Second pass: process each app with parallel size calculation
local app_count=0
local total_apps=${#app_data_tuples[@]}
# Bound parallelism - for metadata queries, can go higher since it's mostly waiting
local max_parallel
max_parallel=$(get_optimal_parallel_jobs "io")
if [[ $max_parallel -lt 8 ]]; then
max_parallel=8
elif [[ $max_parallel -gt 32 ]]; then
max_parallel=32
fi
local pids=()
# inline_loading variable already set above (line ~92)
# Process app metadata extraction function
process_app_metadata() {
local app_data_tuple="$1"
local output_file="$2"
local current_epoch="$3"
IFS='|' read -r app_path app_name bundle_id <<< "$app_data_tuple"
# Get localized display name (moved from first pass for better performance)
local display_name="$app_name"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
# Try to get localized name from system metadata (best for i18n)
local md_display_name
md_display_name=$(run_with_timeout 0.05 mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "")
# Get bundle names
local bundle_display_name
bundle_display_name=$(plutil -extract CFBundleDisplayName raw "$app_path/Contents/Info.plist" 2> /dev/null)
local bundle_name
bundle_name=$(plutil -extract CFBundleName raw "$app_path/Contents/Info.plist" 2> /dev/null)
# Priority order for name selection (prefer localized names):
# 1. System metadata display name (kMDItemDisplayName) - respects system language
# 2. CFBundleDisplayName - usually localized
# 3. CFBundleName - fallback
# 4. App folder name - last resort
if [[ -n "$md_display_name" && "$md_display_name" != "(null)" && "$md_display_name" != "$app_name" ]]; then
display_name="$md_display_name"
elif [[ -n "$bundle_display_name" && "$bundle_display_name" != "(null)" ]]; then
display_name="$bundle_display_name"
elif [[ -n "$bundle_name" && "$bundle_name" != "(null)" ]]; then
display_name="$bundle_name"
fi
fi
# Parallel size calculation
local app_size="N/A"
local app_size_kb="0"
if [[ -d "$app_path" ]]; then
# Get size in KB, then format for display
app_size_kb=$(get_path_size_kb "$app_path")
app_size=$(bytes_to_human "$((app_size_kb * 1024))")
fi
# Get last used date
local last_used="Never"
local last_used_epoch=0
if [[ -d "$app_path" ]]; then
# Try mdls first with short timeout (0.1s) for accuracy, fallback to mtime for speed
local metadata_date
metadata_date=$(run_with_timeout 0.1 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then
last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0")
fi
# Fallback if mdls failed or returned nothing
if [[ "$last_used_epoch" -eq 0 ]]; then
last_used_epoch=$(get_file_mtime "$app_path")
fi
if [[ $last_used_epoch -gt 0 ]]; then
local days_ago=$(((current_epoch - last_used_epoch) / 86400))
if [[ $days_ago -eq 0 ]]; then
last_used="Today"
elif [[ $days_ago -eq 1 ]]; then
last_used="Yesterday"
elif [[ $days_ago -lt 7 ]]; then
last_used="${days_ago} days ago"
elif [[ $days_ago -lt 30 ]]; then
local weeks_ago=$((days_ago / 7))
[[ $weeks_ago -eq 1 ]] && last_used="1 week ago" || last_used="${weeks_ago} weeks ago"
elif [[ $days_ago -lt 365 ]]; then
local months_ago=$((days_ago / 30))
[[ $months_ago -eq 1 ]] && last_used="1 month ago" || last_used="${months_ago} months ago"
else
local years_ago=$((days_ago / 365))
[[ $years_ago -eq 1 ]] && last_used="1 year ago" || last_used="${years_ago} years ago"
fi
fi
fi
# Write to output file atomically
# Fields: epoch|app_path|display_name|bundle_id|size_human|last_used|size_kb
echo "${last_used_epoch}|${app_path}|${display_name}|${bundle_id}|${app_size}|${last_used}|${app_size_kb}" >> "$output_file"
}
export -f process_app_metadata
# Create a temporary file to track progress
local progress_file="${temp_file}.progress"
echo "0" > "$progress_file"
# Start a background spinner that reads progress from file
local spinner_pid=""
(
# shellcheck disable=SC2329 # Function invoked indirectly via trap
cleanup_spinner() { exit 0; }
trap cleanup_spinner TERM INT EXIT
local spinner_chars="|/-\\"
local i=0
while true; do
local completed=$(cat "$progress_file" 2> /dev/null || echo 0)
local c="${spinner_chars:$((i % 4)):1}"
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K%s Scanning applications... %d/%d\n" "$c" "$completed" "$total_apps" >&2
else
printf "\r\033[K%s Scanning applications... %d/%d" "$c" "$completed" "$total_apps" >&2
fi
((i++))
sleep 0.1 2> /dev/null || sleep 1
done
) &
spinner_pid=$!
# Process apps in parallel batches
for app_data_tuple in "${app_data_tuples[@]}"; do
((app_count++))
# Launch background process
process_app_metadata "$app_data_tuple" "$temp_file" "$current_epoch" &
pids+=($!)
# Update progress to show scanning progress (use app_count as it increments smoothly)
echo "$app_count" > "$progress_file"
# Wait if we've hit max parallel limit
if ((${#pids[@]} >= max_parallel)); then
wait "${pids[0]}" 2> /dev/null
pids=("${pids[@]:1}") # Remove first pid
fi
done
# Wait for remaining background processes
for pid in "${pids[@]}"; do
wait "$pid" 2> /dev/null
done
# Stop the spinner and clear the line
if [[ -n "$spinner_pid" ]]; then
kill -TERM "$spinner_pid" 2> /dev/null || true
wait "$spinner_pid" 2> /dev/null || true
fi
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K" >&2
else
echo -ne "\r\033[K" >&2
fi
rm -f "$progress_file"
# Check if we found any applications
if [[ ! -s "$temp_file" ]]; then
echo "No applications found to uninstall" >&2
rm -f "$temp_file"
return 1
fi
# Sort by last used (oldest first) and cache the result
# Show brief processing message for large app lists
if [[ $total_apps -gt 50 ]]; then
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2KProcessing %d applications...\n" "$total_apps" >&2
else
printf "\rProcessing %d applications... " "$total_apps" >&2
fi
fi
sort -t'|' -k1,1n "$temp_file" > "${temp_file}.sorted" || {
rm -f "$temp_file"
return 1
}
rm -f "$temp_file"
# Clear processing message
if [[ $total_apps -gt 50 ]]; then
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K" >&2
else
printf "\r\033[K" >&2
fi
fi
# Save to cache (simplified - no metadata)
ensure_user_file "$cache_file"
cp "${temp_file}.sorted" "$cache_file" 2> /dev/null || true
# Return sorted file
if [[ -f "${temp_file}.sorted" ]]; then
echo "${temp_file}.sorted"
else
return 1
fi
}
load_applications() {
local apps_file="$1"
if [[ ! -f "$apps_file" || ! -s "$apps_file" ]]; then
log_warning "No applications found for uninstallation"
return 1
fi
# Clear arrays
apps_data=()
selection_state=()
# Read apps into array, skip non-existent apps
while IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb; do
# Skip if app path no longer exists
[[ ! -e "$app_path" ]] && continue
apps_data+=("$epoch|$app_path|$app_name|$bundle_id|$size|$last_used|${size_kb:-0}")
selection_state+=(false)
done < "$apps_file"
if [[ ${#apps_data[@]} -eq 0 ]]; then
log_warning "No applications available for uninstallation"
return 1
fi
return 0
}
# Cleanup function - restore cursor and clean up
cleanup() {
# Restore cursor using common function
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
fi
if [[ -n "${sudo_keepalive_pid:-}" ]]; then
kill "$sudo_keepalive_pid" 2> /dev/null || true
wait "$sudo_keepalive_pid" 2> /dev/null || true
sudo_keepalive_pid=""
fi
show_cursor
exit "${1:-0}"
}
# Set trap for cleanup on exit
trap cleanup EXIT INT TERM
main() {
local force_rescan=false
for arg in "$@"; do
case "$arg" in
"--debug")
export MO_DEBUG=1
;;
"--force-rescan")
force_rescan=true
;;
esac
done
local use_inline_loading=false
if [[ -t 1 && -t 2 ]]; then
use_inline_loading=true
fi
# Hide cursor during operation
hide_cursor
# Main interaction loop
while true; do
# Simplified: always check if we need alt screen for scanning
# (scan_applications handles cache internally)
local needs_scanning=true
local cache_file="$HOME/.cache/mole/app_scan_cache"
if [[ $force_rescan == false && -f "$cache_file" ]]; then
local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file")))
[[ $cache_age -eq $(get_epoch_seconds) ]] && cache_age=86401 # Handle missing file
[[ $cache_age -lt 86400 ]] && needs_scanning=false
fi
# Only enter alt screen if we need scanning (shows progress)
if [[ $needs_scanning == true && $use_inline_loading == true ]]; then
# Only enter if not already active
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" != "1" ]]; then
enter_alt_screen
export MOLE_ALT_SCREEN_ACTIVE=1
export MOLE_INLINE_LOADING=1
export MOLE_MANAGED_ALT_SCREEN=1
fi
printf "\033[2J\033[H" >&2
else
# If we don't need scanning but have alt screen from previous iteration, keep it?
# Actually, scan_applications might output to stderr.
# Let's just unset the flags if we don't need scanning, but keep alt screen if it was active?
# No, select_apps_for_uninstall will handle its own screen management.
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN MOLE_ALT_SCREEN_ACTIVE
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
fi
fi
# Scan applications
local apps_file=""
if ! apps_file=$(scan_applications "$force_rescan"); then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
printf "\033[2J\033[H" >&2
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
return 1
fi
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
printf "\033[2J\033[H" >&2
fi
if [[ ! -f "$apps_file" ]]; then
# Error message already shown by scan_applications
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
return 1
fi
# Load applications
if ! load_applications "$apps_file"; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
rm -f "$apps_file"
return 1
fi
# Interactive selection using paginated menu
set +e
select_apps_for_uninstall
local exit_code=$?
set -e
if [[ $exit_code -ne 0 ]]; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
show_cursor
clear_screen
printf '\033[2J\033[H' >&2 # Also clear stderr
rm -f "$apps_file"
# Handle Refresh (code 10)
if [[ $exit_code -eq 10 ]]; then
force_rescan=true
continue
fi
# User cancelled selection, exit the loop
return 0
fi
# Always clear on exit from selection, regardless of alt screen state
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
# Restore cursor and clear screen (output to both stdout and stderr for reliability)
show_cursor
clear_screen
printf '\033[2J\033[H' >&2 # Also clear stderr in case of mixed output
local selection_count=${#selected_apps[@]}
if [[ $selection_count -eq 0 ]]; then
echo "No apps selected"
rm -f "$apps_file"
# Loop back or exit? If select_apps_for_uninstall returns 0 but empty selection,
# it technically shouldn't happen based on that function's logic.
continue
fi
# Show selected apps with clean alignment
echo -e "${BLUE}${ICON_CONFIRM}${NC} Selected ${selection_count} apps:"
local -a summary_rows=()
local max_name_width=0
local max_size_width=0
local max_last_width=0
# First pass: get actual max widths for all columns
for selected_app in "${selected_apps[@]}"; do
IFS='|' read -r _ _ app_name _ size last_used _ <<< "$selected_app"
[[ ${#app_name} -gt $max_name_width ]] && max_name_width=${#app_name}
local size_display="$size"
[[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]] && size_display="Unknown"
[[ ${#size_display} -gt $max_size_width ]] && max_size_width=${#size_display}
local last_display=$(format_last_used_summary "$last_used")
[[ ${#last_display} -gt $max_last_width ]] && max_last_width=${#last_display}
done
((max_size_width < 5)) && max_size_width=5
((max_last_width < 5)) && max_last_width=5
# Calculate name width: use actual max, but constrain by terminal width
# Fixed elements: "99. " (4) + " " (2) + " | Last: " (11) = 17
local term_width=$(tput cols 2> /dev/null || echo 100)
local available_for_name=$((term_width - 17 - max_size_width - max_last_width))
# Dynamic minimum for better spacing on wide terminals
local min_name_width=24
if [[ $term_width -ge 120 ]]; then
min_name_width=50
elif [[ $term_width -ge 100 ]]; then
min_name_width=42
elif [[ $term_width -ge 80 ]]; then
min_name_width=30
fi
# Constrain name width: dynamic min, max min(actual_max, available, 60)
local name_trunc_limit=$max_name_width
[[ $name_trunc_limit -lt $min_name_width ]] && name_trunc_limit=$min_name_width
[[ $name_trunc_limit -gt $available_for_name ]] && name_trunc_limit=$available_for_name
[[ $name_trunc_limit -gt 60 ]] && name_trunc_limit=60
# Reset for second pass
max_name_width=0
for selected_app in "${selected_apps[@]}"; do
IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb <<< "$selected_app"
local display_name="$app_name"
if [[ ${#display_name} -gt $name_trunc_limit ]]; then
display_name="${display_name:0:$((name_trunc_limit - 3))}..."
fi
[[ ${#display_name} -gt $max_name_width ]] && max_name_width=${#display_name}
local size_display="$size"
if [[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]]; then
size_display="Unknown"
fi
local last_display
last_display=$(format_last_used_summary "$last_used")
summary_rows+=("$display_name|$size_display|$last_display")
done
((max_name_width < 16)) && max_name_width=16
local index=1
for row in "${summary_rows[@]}"; do
IFS='|' read -r name_cell size_cell last_cell <<< "$row"
printf "%d. %-*s %*s | Last: %s\n" "$index" "$max_name_width" "$name_cell" "$max_size_width" "$size_cell" "$last_cell"
((index++))
done
# Execute batch uninstallation (handles confirmation)
batch_uninstall_applications
# Cleanup current apps file
rm -f "$apps_file"
# Pause before looping back
echo -e "${GRAY}Press Enter to return to application list, ESC to exit...${NC}"
local key
IFS= read -r -s -n1 key || key=""
drain_pending_input # Clean up any escape sequence remnants
case "$key" in
$'\e' | q | Q)
show_cursor
return 0
;;
*)
# Continue loop
;;
esac
# Reset force_rescan to false for subsequent loops,
# but relying on batch_uninstall's cache deletion for actual update
force_rescan=false
done
}
# Run main function

View File

@@ -2,6 +2,7 @@ package main
import (
"encoding/gob"
"fmt"
"os"
"path/filepath"
"strings"
@@ -414,6 +415,221 @@ func TestLoadCacheExpiresWhenDirectoryChanges(t *testing.T) {
}
}
func TestLoadCacheReusesRecentEntryAfterDirectoryChanges(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
target := filepath.Join(home, "recent-change-target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
result := scanResult{TotalSize: 5, TotalFiles: 1}
if err := saveCacheToDisk(target, result); err != nil {
t.Fatalf("saveCacheToDisk: %v", err)
}
cachePath, err := getCachePath(target)
if err != nil {
t.Fatalf("getCachePath: %v", err)
}
file, err := os.Open(cachePath)
if err != nil {
t.Fatalf("open cache: %v", err)
}
var entry cacheEntry
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
t.Fatalf("decode cache: %v", err)
}
_ = file.Close()
// Make cache entry look recently scanned, but older than mod time grace.
entry.ModTime = time.Now().Add(-2 * time.Hour)
entry.ScanTime = time.Now().Add(-1 * time.Hour)
tmp := cachePath + ".tmp"
f, err := os.Create(tmp)
if err != nil {
t.Fatalf("create tmp cache: %v", err)
}
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
t.Fatalf("encode tmp cache: %v", err)
}
_ = f.Close()
if err := os.Rename(tmp, cachePath); err != nil {
t.Fatalf("rename tmp cache: %v", err)
}
if err := os.Chtimes(target, time.Now(), time.Now()); err != nil {
t.Fatalf("chtimes target: %v", err)
}
if _, err := loadCacheFromDisk(target); err != nil {
t.Fatalf("expected recent cache to be reused, got error: %v", err)
}
}
func TestLoadCacheExpiresWhenModifiedAndReuseWindowPassed(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
target := filepath.Join(home, "reuse-window-target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
result := scanResult{TotalSize: 5, TotalFiles: 1}
if err := saveCacheToDisk(target, result); err != nil {
t.Fatalf("saveCacheToDisk: %v", err)
}
cachePath, err := getCachePath(target)
if err != nil {
t.Fatalf("getCachePath: %v", err)
}
file, err := os.Open(cachePath)
if err != nil {
t.Fatalf("open cache: %v", err)
}
var entry cacheEntry
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
t.Fatalf("decode cache: %v", err)
}
_ = file.Close()
// Within overall 7-day TTL but beyond reuse window.
entry.ModTime = time.Now().Add(-48 * time.Hour)
entry.ScanTime = time.Now().Add(-(cacheReuseWindow + time.Hour))
tmp := cachePath + ".tmp"
f, err := os.Create(tmp)
if err != nil {
t.Fatalf("create tmp cache: %v", err)
}
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
t.Fatalf("encode tmp cache: %v", err)
}
_ = f.Close()
if err := os.Rename(tmp, cachePath); err != nil {
t.Fatalf("rename tmp cache: %v", err)
}
if err := os.Chtimes(target, time.Now(), time.Now()); err != nil {
t.Fatalf("chtimes target: %v", err)
}
if _, err := loadCacheFromDisk(target); err == nil {
t.Fatalf("expected cache load to fail after reuse window passes")
}
}
func TestLoadStaleCacheFromDiskAllowsRecentExpiredCache(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
target := filepath.Join(home, "stale-cache-target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
result := scanResult{TotalSize: 7, TotalFiles: 2}
if err := saveCacheToDisk(target, result); err != nil {
t.Fatalf("saveCacheToDisk: %v", err)
}
cachePath, err := getCachePath(target)
if err != nil {
t.Fatalf("getCachePath: %v", err)
}
file, err := os.Open(cachePath)
if err != nil {
t.Fatalf("open cache: %v", err)
}
var entry cacheEntry
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
t.Fatalf("decode cache: %v", err)
}
_ = file.Close()
// Expired for normal cache validation but still inside stale fallback window.
entry.ModTime = time.Now().Add(-48 * time.Hour)
entry.ScanTime = time.Now().Add(-48 * time.Hour)
tmp := cachePath + ".tmp"
f, err := os.Create(tmp)
if err != nil {
t.Fatalf("create tmp cache: %v", err)
}
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
t.Fatalf("encode tmp cache: %v", err)
}
_ = f.Close()
if err := os.Rename(tmp, cachePath); err != nil {
t.Fatalf("rename tmp cache: %v", err)
}
if err := os.Chtimes(target, time.Now(), time.Now()); err != nil {
t.Fatalf("chtimes target: %v", err)
}
if _, err := loadCacheFromDisk(target); err == nil {
t.Fatalf("expected normal cache load to fail")
}
if _, err := loadStaleCacheFromDisk(target); err != nil {
t.Fatalf("expected stale cache load to succeed, got error: %v", err)
}
}
func TestLoadStaleCacheFromDiskExpiresByStaleTTL(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
target := filepath.Join(home, "stale-cache-expired-target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
result := scanResult{TotalSize: 9, TotalFiles: 3}
if err := saveCacheToDisk(target, result); err != nil {
t.Fatalf("saveCacheToDisk: %v", err)
}
cachePath, err := getCachePath(target)
if err != nil {
t.Fatalf("getCachePath: %v", err)
}
file, err := os.Open(cachePath)
if err != nil {
t.Fatalf("open cache: %v", err)
}
var entry cacheEntry
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
t.Fatalf("decode cache: %v", err)
}
_ = file.Close()
entry.ScanTime = time.Now().Add(-(staleCacheTTL + time.Hour))
tmp := cachePath + ".tmp"
f, err := os.Create(tmp)
if err != nil {
t.Fatalf("create tmp cache: %v", err)
}
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
t.Fatalf("encode tmp cache: %v", err)
}
_ = f.Close()
if err := os.Rename(tmp, cachePath); err != nil {
t.Fatalf("rename tmp cache: %v", err)
}
if _, err := loadStaleCacheFromDisk(target); err == nil {
t.Fatalf("expected stale cache load to fail after stale TTL")
}
}
func TestScanPathPermissionError(t *testing.T) {
root := t.TempDir()
lockedDir := filepath.Join(root, "locked")
@@ -448,3 +664,40 @@ func TestScanPathPermissionError(t *testing.T) {
t.Logf("unexpected error type: %v", err)
}
}
func TestCalculateDirSizeFastHighFanoutCompletes(t *testing.T) {
root := t.TempDir()
// Reproduce high fan-out nested directory pattern that previously risked semaphore deadlock.
const fanout = 256
for i := 0; i < fanout; i++ {
nested := filepath.Join(root, fmt.Sprintf("dir-%03d", i), "nested")
if err := os.MkdirAll(nested, 0o755); err != nil {
t.Fatalf("create nested dir: %v", err)
}
if err := os.WriteFile(filepath.Join(nested, "data.bin"), []byte("x"), 0o644); err != nil {
t.Fatalf("write nested file: %v", err)
}
}
var files, dirs, bytes int64
current := &atomic.Value{}
current.Store("")
done := make(chan int64, 1)
go func() {
done <- calculateDirSizeFast(root, &files, &dirs, &bytes, current)
}()
select {
case total := <-done:
if total <= 0 {
t.Fatalf("expected positive total size, got %d", total)
}
if got := atomic.LoadInt64(&files); got < fanout {
t.Fatalf("expected at least %d files scanned, got %d", fanout, got)
}
case <-time.After(5 * time.Second):
t.Fatalf("calculateDirSizeFast did not complete under high fan-out")
}
}

View File

@@ -182,7 +182,7 @@ func getCachePath(path string) (string, error) {
return filepath.Join(cacheDir, filename), nil
}
func loadCacheFromDisk(path string) (*cacheEntry, error) {
func loadRawCacheFromDisk(path string) (*cacheEntry, error) {
cachePath, err := getCachePath(path)
if err != nil {
return nil, err
@@ -200,23 +200,56 @@ func loadCacheFromDisk(path string) (*cacheEntry, error) {
return nil, err
}
return &entry, nil
}
func loadCacheFromDisk(path string) (*cacheEntry, error) {
entry, err := loadRawCacheFromDisk(path)
if err != nil {
return nil, err
}
info, err := os.Stat(path)
if err != nil {
return nil, err
}
if info.ModTime().After(entry.ModTime) {
// Allow grace window.
if cacheModTimeGrace <= 0 || info.ModTime().Sub(entry.ModTime) > cacheModTimeGrace {
return nil, fmt.Errorf("cache expired: directory modified")
}
}
if time.Since(entry.ScanTime) > 7*24*time.Hour {
scanAge := time.Since(entry.ScanTime)
if scanAge > 7*24*time.Hour {
return nil, fmt.Errorf("cache expired: too old")
}
return &entry, nil
if info.ModTime().After(entry.ModTime) {
// Allow grace window.
if cacheModTimeGrace <= 0 || info.ModTime().Sub(entry.ModTime) > cacheModTimeGrace {
// Directory mod time is noisy on macOS; reuse recent cache to avoid
// frequent full rescans while still forcing refresh for older entries.
if cacheReuseWindow <= 0 || scanAge > cacheReuseWindow {
return nil, fmt.Errorf("cache expired: directory modified")
}
}
}
return entry, nil
}
// loadStaleCacheFromDisk loads cache without strict freshness checks.
// It is used for fast first paint before triggering a background refresh.
func loadStaleCacheFromDisk(path string) (*cacheEntry, error) {
entry, err := loadRawCacheFromDisk(path)
if err != nil {
return nil, err
}
if _, err := os.Stat(path); err != nil {
return nil, err
}
if time.Since(entry.ScanTime) > staleCacheTTL {
return nil, fmt.Errorf("stale cache expired")
}
return entry, nil
}
func saveCacheToDisk(path string, result scanResult) error {

View File

@@ -16,6 +16,8 @@ const (
maxConcurrentOverview = 8
batchUpdateSize = 100
cacheModTimeGrace = 30 * time.Minute
cacheReuseWindow = 24 * time.Hour
staleCacheTTL = 3 * 24 * time.Hour
// Worker pool limits.
minWorkers = 16
@@ -187,6 +189,17 @@ var defaultSkipDirs = map[string]bool{
"nfs": true,
"PHD": true,
"Permissions": true,
// Virtualization/Container mounts (NFS, network filesystems).
"OrbStack": true, // OrbStack NFS mounts
"Colima": true, // Colima VM mounts
"Parallels": true, // Parallels Desktop VMs
"VMware Fusion": true, // VMware Fusion VMs
"VirtualBox VMs": true, // VirtualBox VMs
"Rancher Desktop": true, // Rancher Desktop mounts
".lima": true, // Lima VM mounts
".colima": true, // Colima config/mounts
".orbstack": true, // OrbStack config/mounts
}
var skipExtensions = map[string]bool{

View File

@@ -63,8 +63,10 @@ type historyEntry struct {
}
type scanResultMsg struct {
path string
result scanResult
err error
stale bool
}
type overviewSizeMsg struct {
@@ -369,9 +371,19 @@ func (m model) scanCmd(path string) tea.Cmd {
Entries: cached.Entries,
LargeFiles: cached.LargeFiles,
TotalSize: cached.TotalSize,
TotalFiles: 0, // Cache doesn't store file count currently, minor UI limitation
TotalFiles: cached.TotalFiles,
}
return scanResultMsg{result: result, err: nil}
return scanResultMsg{path: path, result: result, err: nil}
}
if stale, err := loadStaleCacheFromDisk(path); err == nil {
result := scanResult{
Entries: stale.Entries,
LargeFiles: stale.LargeFiles,
TotalSize: stale.TotalSize,
TotalFiles: stale.TotalFiles,
}
return scanResultMsg{path: path, result: result, err: nil, stale: true}
}
v, err, _ := scanGroup.Do(path, func() (any, error) {
@@ -379,7 +391,7 @@ func (m model) scanCmd(path string) tea.Cmd {
})
if err != nil {
return scanResultMsg{err: err}
return scanResultMsg{path: path, err: err}
}
result := v.(scanResult)
@@ -390,7 +402,28 @@ func (m model) scanCmd(path string) tea.Cmd {
}
}(path, result)
return scanResultMsg{result: result, err: nil}
return scanResultMsg{path: path, result: result, err: nil}
}
}
func (m model) scanFreshCmd(path string) tea.Cmd {
return func() tea.Msg {
v, err, _ := scanGroup.Do(path, func() (any, error) {
return scanPathConcurrent(path, m.filesScanned, m.dirsScanned, m.bytesScanned, m.currentPath)
})
if err != nil {
return scanResultMsg{path: path, err: err}
}
result := v.(scanResult)
go func(p string, r scanResult) {
if err := saveCacheToDisk(p, r); err != nil {
_ = err
}
}(path, result)
return scanResultMsg{path: path, result: result}
}
}
@@ -442,6 +475,9 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
}
return m, nil
case scanResultMsg:
if msg.path != "" && msg.path != m.path {
return m, nil
}
m.scanning = false
if msg.err != nil {
m.status = fmt.Sprintf("Scan failed: %v", msg.err)
@@ -457,7 +493,6 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
m.largeFiles = msg.result.LargeFiles
m.totalSize = msg.result.TotalSize
m.totalFiles = msg.result.TotalFiles
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
m.clampEntrySelection()
m.clampLargeSelection()
m.cache[m.path] = cacheSnapshot(m)
@@ -470,6 +505,23 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
_ = storeOverviewSize(path, size)
}(m.path, m.totalSize)
}
if msg.stale {
m.status = fmt.Sprintf("Loaded cached data for %s, refreshing...", displayPath(m.path))
m.scanning = true
if m.totalFiles > 0 {
m.lastTotalFiles = m.totalFiles
}
atomic.StoreInt64(m.filesScanned, 0)
atomic.StoreInt64(m.dirsScanned, 0)
atomic.StoreInt64(m.bytesScanned, 0)
if m.currentPath != nil {
m.currentPath.Store("")
}
return m, tea.Batch(m.scanFreshCmd(m.path), tickCmd())
}
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
return m, nil
case overviewSizeMsg:
delete(m.overviewScanningSet, msg.Path)

View File

@@ -23,6 +23,21 @@ import (
var scanGroup singleflight.Group
// trySend attempts to send an item to a channel with a timeout.
// Returns true if the item was sent, false if the timeout was reached.
func trySend[T any](ch chan<- T, item T, timeout time.Duration) bool {
timer := time.NewTimer(timeout)
select {
case ch <- item:
if !timer.Stop() {
<-timer.C
}
return true
case <-timer.C:
return false
}
}
func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *atomic.Value) (scanResult, error) {
children, err := os.ReadDir(root)
if err != nil {
@@ -119,42 +134,13 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
size := getActualFileSize(fullPath, info)
atomic.AddInt64(&total, size)
// Reuse timer to reduce GC pressure
timer := time.NewTimer(0)
// Ensure timer is drained immediately since we start with 0
if !timer.Stop() {
select {
case <-timer.C:
default:
}
}
select {
case entryChan <- dirEntry{
trySend(entryChan, dirEntry{
Name: child.Name() + " →",
Path: fullPath,
Size: size,
IsDir: isDir,
LastAccess: getLastAccessTimeFromInfo(info),
}:
default:
// If channel is full, use timer to wait with timeout
timer.Reset(100 * time.Millisecond)
select {
case entryChan <- dirEntry{
Name: child.Name() + " →",
Path: fullPath,
Size: size,
IsDir: isDir,
LastAccess: getLastAccessTimeFromInfo(info),
}:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
// Skip if channel is blocked
}
}
}, 100*time.Millisecond)
continue
}
@@ -188,20 +174,13 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
timer := time.NewTimer(100 * time.Millisecond)
select {
case entryChan <- dirEntry{
trySend(entryChan, dirEntry{
Name: name,
Path: path,
Size: size,
IsDir: true,
LastAccess: time.Time{},
}:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
}
}, 100*time.Millisecond)
}(child.Name(), fullPath)
continue
}
@@ -225,20 +204,13 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
timer := time.NewTimer(100 * time.Millisecond)
select {
case entryChan <- dirEntry{
trySend(entryChan, dirEntry{
Name: name,
Path: path,
Size: size,
IsDir: true,
LastAccess: time.Time{},
}:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
}
}, 100*time.Millisecond)
}(child.Name(), fullPath)
continue
}
@@ -253,20 +225,13 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
timer := time.NewTimer(100 * time.Millisecond)
select {
case entryChan <- dirEntry{
trySend(entryChan, dirEntry{
Name: name,
Path: path,
Size: size,
IsDir: true,
LastAccess: time.Time{},
}:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
}
}, 100*time.Millisecond)
}(child.Name(), fullPath)
continue
}
@@ -281,35 +246,19 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
atomic.AddInt64(filesScanned, 1)
atomic.AddInt64(bytesScanned, size)
// Single-use timer for main loop (less pressure than tight loop above)
// But let's be consistent and optimized
timer := time.NewTimer(100 * time.Millisecond)
select {
case entryChan <- dirEntry{
trySend(entryChan, dirEntry{
Name: child.Name(),
Path: fullPath,
Size: size,
IsDir: false,
LastAccess: getLastAccessTimeFromInfo(info),
}:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
}
}, 100*time.Millisecond)
// Track large files only.
if !shouldSkipFileForLargeTracking(fullPath) {
minSize := atomic.LoadInt64(&largeFileMinSize)
if size >= minSize {
timer.Reset(100 * time.Millisecond)
select {
case largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
}
trySend(largeFileChan, fileEntry{Name: child.Name(), Path: fullPath, Size: size}, 100*time.Millisecond)
}
}
}
@@ -402,14 +351,20 @@ func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *
for _, entry := range entries {
if entry.IsDir() {
subDir := filepath.Join(dirPath, entry.Name())
sem <- struct{}{}
wg.Add(1)
go func(p string) {
defer wg.Done()
defer func() { <-sem }()
walk(p)
}(subDir)
atomic.AddInt64(dirsScanned, 1)
select {
case sem <- struct{}{}:
wg.Add(1)
go func(p string) {
defer wg.Done()
defer func() { <-sem }()
walk(p)
}(subDir)
default:
// Fallback to synchronous traversal to avoid semaphore deadlock under high fan-out.
walk(subDir)
}
} else {
info, err := entry.Info()
if err == nil {
@@ -519,15 +474,6 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, lar
maxConcurrent := min(runtime.NumCPU()*2, maxDirWorkers)
sem := make(chan struct{}, maxConcurrent)
// Reuse timer for large file sends
timer := time.NewTimer(0)
if !timer.Stop() {
select {
case <-timer.C:
default:
}
}
for _, child := range children {
fullPath := filepath.Join(root, child.Name())
@@ -593,14 +539,7 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, lar
if !shouldSkipFileForLargeTracking(fullPath) && largeFileMinSize != nil {
minSize := atomic.LoadInt64(largeFileMinSize)
if size >= minSize {
timer.Reset(100 * time.Millisecond)
select {
case largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
}
trySend(largeFileChan, fileEntry{Name: child.Name(), Path: fullPath, Size: size}, 100*time.Millisecond)
}
}

View File

@@ -160,7 +160,7 @@ const NetworkHistorySize = 120 // Increased history size for wider graph
type ProxyStatus struct {
Enabled bool
Type string // HTTP, SOCKS, System
Type string // HTTP, HTTPS, SOCKS, PAC, WPAD, TUN
Host string
}

View File

@@ -2,9 +2,11 @@ package main
import (
"context"
"net/url"
"os"
"runtime"
"sort"
"strconv"
"strings"
"time"
@@ -114,23 +116,8 @@ func isNoiseInterface(name string) bool {
}
func collectProxy() ProxyStatus {
// Check environment variables first.
for _, env := range []string{"https_proxy", "HTTPS_PROXY", "http_proxy", "HTTP_PROXY"} {
if val := os.Getenv(env); val != "" {
proxyType := "HTTP"
if strings.HasPrefix(val, "socks") {
proxyType = "SOCKS"
}
// Extract host.
host := val
if strings.Contains(host, "://") {
host = strings.SplitN(host, "://", 2)[1]
}
if idx := strings.Index(host, "@"); idx >= 0 {
host = host[idx+1:]
}
return ProxyStatus{Enabled: true, Type: proxyType, Host: host}
}
if proxy := collectProxyFromEnv(os.Getenv); proxy.Enabled {
return proxy
}
// macOS: check system proxy via scutil.
@@ -139,14 +126,166 @@ func collectProxy() ProxyStatus {
defer cancel()
out, err := runCmd(ctx, "scutil", "--proxy")
if err == nil {
if strings.Contains(out, "HTTPEnable : 1") || strings.Contains(out, "HTTPSEnable : 1") {
return ProxyStatus{Enabled: true, Type: "System", Host: "System Proxy"}
}
if strings.Contains(out, "SOCKSEnable : 1") {
return ProxyStatus{Enabled: true, Type: "SOCKS", Host: "System Proxy"}
if proxy := collectProxyFromScutilOutput(out); proxy.Enabled {
return proxy
}
}
if proxy := collectProxyFromTunInterfaces(); proxy.Enabled {
return proxy
}
}
return ProxyStatus{Enabled: false}
}
func collectProxyFromEnv(getenv func(string) string) ProxyStatus {
// Include ALL_PROXY for users running proxy tools that only export a single variable.
envKeys := []string{
"https_proxy", "HTTPS_PROXY",
"http_proxy", "HTTP_PROXY",
"all_proxy", "ALL_PROXY",
}
for _, key := range envKeys {
val := strings.TrimSpace(getenv(key))
if val == "" {
continue
}
proxyType := "HTTP"
lower := strings.ToLower(val)
if strings.HasPrefix(lower, "socks") {
proxyType = "SOCKS"
}
host := parseProxyHost(val)
if host == "" {
host = val
}
return ProxyStatus{Enabled: true, Type: proxyType, Host: host}
}
return ProxyStatus{Enabled: false}
}
func collectProxyFromScutilOutput(out string) ProxyStatus {
if out == "" {
return ProxyStatus{Enabled: false}
}
if scutilProxyEnabled(out, "SOCKSEnable") {
host := joinHostPort(scutilProxyValue(out, "SOCKSProxy"), scutilProxyValue(out, "SOCKSPort"))
if host == "" {
host = "System Proxy"
}
return ProxyStatus{Enabled: true, Type: "SOCKS", Host: host}
}
if scutilProxyEnabled(out, "HTTPSEnable") {
host := joinHostPort(scutilProxyValue(out, "HTTPSProxy"), scutilProxyValue(out, "HTTPSPort"))
if host == "" {
host = "System Proxy"
}
return ProxyStatus{Enabled: true, Type: "HTTPS", Host: host}
}
if scutilProxyEnabled(out, "HTTPEnable") {
host := joinHostPort(scutilProxyValue(out, "HTTPProxy"), scutilProxyValue(out, "HTTPPort"))
if host == "" {
host = "System Proxy"
}
return ProxyStatus{Enabled: true, Type: "HTTP", Host: host}
}
if scutilProxyEnabled(out, "ProxyAutoConfigEnable") {
pacURL := scutilProxyValue(out, "ProxyAutoConfigURLString")
host := parseProxyHost(pacURL)
if host == "" {
host = "PAC"
}
return ProxyStatus{Enabled: true, Type: "PAC", Host: host}
}
if scutilProxyEnabled(out, "ProxyAutoDiscoveryEnable") {
return ProxyStatus{Enabled: true, Type: "WPAD", Host: "Auto Discovery"}
}
return ProxyStatus{Enabled: false}
}
func collectProxyFromTunInterfaces() ProxyStatus {
stats, err := net.IOCounters(true)
if err != nil {
return ProxyStatus{Enabled: false}
}
var activeTun []string
for _, s := range stats {
lower := strings.ToLower(s.Name)
if strings.HasPrefix(lower, "utun") || strings.HasPrefix(lower, "tun") {
if s.BytesRecv+s.BytesSent > 0 {
activeTun = append(activeTun, s.Name)
}
}
}
if len(activeTun) == 0 {
return ProxyStatus{Enabled: false}
}
sort.Strings(activeTun)
host := activeTun[0]
if len(activeTun) > 1 {
host = activeTun[0] + "+"
}
return ProxyStatus{Enabled: true, Type: "TUN", Host: host}
}
func scutilProxyEnabled(out, key string) bool {
return scutilProxyValue(out, key) == "1"
}
func scutilProxyValue(out, key string) string {
prefix := key + " :"
for _, line := range strings.Split(out, "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, prefix) {
return strings.TrimSpace(strings.TrimPrefix(line, prefix))
}
}
return ""
}
func parseProxyHost(raw string) string {
raw = strings.TrimSpace(raw)
if raw == "" {
return ""
}
target := raw
if !strings.Contains(target, "://") {
target = "http://" + target
}
parsed, err := url.Parse(target)
if err != nil {
return ""
}
host := parsed.Host
if host == "" {
return ""
}
return strings.TrimPrefix(host, "@")
}
func joinHostPort(host, port string) string {
host = strings.TrimSpace(host)
port = strings.TrimSpace(port)
if host == "" {
return ""
}
if port == "" {
return host
}
if _, err := strconv.Atoi(port); err != nil {
return host
}
return host + ":" + port
}

View File

@@ -0,0 +1,60 @@
package main
import "testing"
func TestCollectProxyFromEnvSupportsAllProxy(t *testing.T) {
env := map[string]string{
"ALL_PROXY": "socks5://127.0.0.1:7890",
}
getenv := func(key string) string {
return env[key]
}
got := collectProxyFromEnv(getenv)
if !got.Enabled {
t.Fatalf("expected proxy enabled")
}
if got.Type != "SOCKS" {
t.Fatalf("expected SOCKS type, got %s", got.Type)
}
if got.Host != "127.0.0.1:7890" {
t.Fatalf("unexpected host: %s", got.Host)
}
}
func TestCollectProxyFromScutilOutputPAC(t *testing.T) {
out := `
<dictionary> {
ProxyAutoConfigEnable : 1
ProxyAutoConfigURLString : http://127.0.0.1:6152/proxy.pac
}`
got := collectProxyFromScutilOutput(out)
if !got.Enabled {
t.Fatalf("expected proxy enabled")
}
if got.Type != "PAC" {
t.Fatalf("expected PAC type, got %s", got.Type)
}
if got.Host != "127.0.0.1:6152" {
t.Fatalf("unexpected host: %s", got.Host)
}
}
func TestCollectProxyFromScutilOutputHTTPHostPort(t *testing.T) {
out := `
<dictionary> {
HTTPEnable : 1
HTTPProxy : 127.0.0.1
HTTPPort : 7890
}`
got := collectProxyFromScutilOutput(out)
if !got.Enabled {
t.Fatalf("expected proxy enabled")
}
if got.Type != "HTTP" {
t.Fatalf("expected HTTP type, got %s", got.Type)
}
if got.Host != "127.0.0.1:7890" {
t.Fatalf("unexpected host: %s", got.Host)
}
}

2
go.mod
View File

@@ -8,7 +8,7 @@ require (
github.com/cespare/xxhash/v2 v2.3.0
github.com/charmbracelet/bubbletea v1.3.10
github.com/charmbracelet/lipgloss v1.1.0
github.com/shirou/gopsutil/v4 v4.25.12
github.com/shirou/gopsutil/v4 v4.26.1
golang.org/x/sync v0.19.0
)

4
go.sum
View File

@@ -53,8 +53,8 @@ github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/shirou/gopsutil/v4 v4.25.12 h1:e7PvW/0RmJ8p8vPGJH4jvNkOyLmbkXgXW4m6ZPic6CY=
github.com/shirou/gopsutil/v4 v4.25.12/go.mod h1:EivAfP5x2EhLp2ovdpKSozecVXn1TmuG7SMzs/Wh4PU=
github.com/shirou/gopsutil/v4 v4.26.1 h1:TOkEyriIXk2HX9d4isZJtbjXbEjf5qyKPAzbzY0JWSo=
github.com/shirou/gopsutil/v4 v4.26.1/go.mod h1:medLI9/UNAb0dOI9Q3/7yWSqKkj00u+1tgY8nvv41pc=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=

View File

@@ -1,6 +1,8 @@
#!/bin/bash
# Application Data Cleanup Module
set -euo pipefail
readonly ORPHAN_AGE_THRESHOLD=${ORPHAN_AGE_THRESHOLD:-${MOLE_ORPHAN_AGE_DAYS:-60}}
# Args: $1=target_dir, $2=label
clean_ds_store_tree() {
local target="$1"
@@ -282,9 +284,21 @@ clean_orphaned_app_data() {
file_patterns+=("$base_path/$pat")
done
if [[ ${#file_patterns[@]} -gt 0 ]]; then
local _nullglob_state
_nullglob_state=$(shopt -p nullglob || true)
shopt -s nullglob
for item_path in "${file_patterns[@]}"; do
local iteration_count=0
for match in $item_path; do
local old_ifs=$IFS
IFS=$'\n'
local -a matches=()
# shellcheck disable=SC2206
matches=($item_path)
IFS=$old_ifs
if [[ ${#matches[@]} -eq 0 ]]; then
continue
fi
for match in "${matches[@]}"; do
[[ -e "$match" ]] || continue
((iteration_count++))
if [[ $iteration_count -gt $MOLE_MAX_ORPHAN_ITERATIONS ]]; then
@@ -299,12 +313,14 @@ clean_orphaned_app_data() {
if [[ -z "$size_kb" || "$size_kb" == "0" ]]; then
continue
fi
safe_clean "$match" "Orphaned $label: $bundle_id"
((orphaned_count++))
((total_orphaned_kb += size_kb))
if safe_clean "$match" "Orphaned $label: $bundle_id"; then
((orphaned_count++))
((total_orphaned_kb += size_kb))
fi
fi
done
done
eval "$_nullglob_state"
fi
done
stop_section_spinner
@@ -517,3 +533,197 @@ clean_orphaned_system_services() {
fi
}
# ============================================================================
# Orphaned LaunchAgent/LaunchDaemon Cleanup (Generic Detection)
# ============================================================================
# Extract program path from plist (supports both ProgramArguments and Program)
_extract_program_path() {
local plist="$1"
local program=""
program=$(plutil -extract ProgramArguments.0 raw "$plist" 2> /dev/null)
if [[ -z "$program" ]]; then
program=$(plutil -extract Program raw "$plist" 2> /dev/null)
fi
echo "$program"
}
# Extract associated bundle identifier from plist
_extract_associated_bundle() {
local plist="$1"
local associated=""
# Try array format first
associated=$(plutil -extract AssociatedBundleIdentifiers.0 raw "$plist" 2> /dev/null)
if [[ -z "$associated" ]] || [[ "$associated" == "1" ]]; then
# Try string format
associated=$(plutil -extract AssociatedBundleIdentifiers raw "$plist" 2> /dev/null)
# Filter out dict/array markers
if [[ "$associated" == "{"* ]] || [[ "$associated" == "["* ]]; then
associated=""
fi
fi
echo "$associated"
}
# Check if a LaunchAgent/LaunchDaemon is orphaned using multi-layer verification
# Returns 0 if orphaned, 1 if not orphaned
is_launch_item_orphaned() {
local plist="$1"
# Layer 1: Check if program path exists
local program=$(_extract_program_path "$plist")
# No program path - skip (not a standard launch item)
[[ -z "$program" ]] && return 1
# Program exists -> not orphaned
[[ -e "$program" ]] && return 1
# Layer 2: Check AssociatedBundleIdentifiers
local associated=$(_extract_associated_bundle "$plist")
if [[ -n "$associated" ]]; then
# Check if associated app exists via mdfind
if run_with_timeout 2 mdfind "kMDItemCFBundleIdentifier == '$associated'" 2> /dev/null | head -1 | grep -q .; then
return 1 # Associated app found -> not orphaned
fi
# Extract vendor name from bundle ID (com.vendor.app -> vendor)
local vendor=$(echo "$associated" | cut -d'.' -f2)
if [[ -n "$vendor" ]] && [[ ${#vendor} -ge 3 ]]; then
# Check if any app from this vendor exists
if find /Applications ~/Applications -maxdepth 2 -iname "*${vendor}*" -type d 2> /dev/null | grep -iq "\.app"; then
return 1 # Vendor app exists -> not orphaned
fi
fi
fi
# Layer 3: Check Application Support directory activity
if [[ "$program" =~ /Library/Application\ Support/([^/]+)/ ]]; then
local app_support_name="${BASH_REMATCH[1]}"
# Check both user and system Application Support
for base in "$HOME/Library/Application Support" "/Library/Application Support"; do
local support_path="$base/$app_support_name"
if [[ -d "$support_path" ]]; then
# Check if there are files modified in last 7 days (active usage)
local recent_file=$(find "$support_path" -type f -mtime -7 2> /dev/null | head -1)
if [[ -n "$recent_file" ]]; then
return 1 # Active Application Support -> not orphaned
fi
fi
done
fi
# Layer 4: Check if app name from program path exists
if [[ "$program" =~ /Applications/([^/]+)\.app/ ]]; then
local app_name="${BASH_REMATCH[1]}"
# Look for apps with similar names (case-insensitive)
if find /Applications ~/Applications -maxdepth 2 -iname "*${app_name}*" -type d 2> /dev/null | grep -iq "\.app"; then
return 1 # Similar app exists -> not orphaned
fi
fi
# Layer 5: PrivilegedHelper special handling
if [[ "$program" =~ ^/Library/PrivilegedHelperTools/ ]]; then
local filename=$(basename "$plist")
local bundle_id="${filename%.plist}"
# Extract app hint from bundle ID (com.vendor.app.helper -> vendor)
local app_hint=$(echo "$bundle_id" | sed 's/com\.//; s/\..*helper.*//')
if [[ -n "$app_hint" ]] && [[ ${#app_hint} -ge 3 ]]; then
# Look for main app
if find /Applications ~/Applications -maxdepth 2 -iname "*${app_hint}*" -type d 2> /dev/null | grep -iq "\.app"; then
return 1 # Helper's main app exists -> not orphaned
fi
fi
fi
# All checks failed -> likely orphaned
return 0
}
# Clean orphaned user-level LaunchAgents
# Only processes ~/Library/LaunchAgents (safer than system-level)
clean_orphaned_launch_agents() {
local launch_agents_dir="$HOME/Library/LaunchAgents"
[[ ! -d "$launch_agents_dir" ]] && return 0
start_section_spinner "Scanning orphaned launch agents..."
local -a orphaned_items=()
local total_orphaned_kb=0
# Scan user LaunchAgents
while IFS= read -r -d '' plist; do
local filename=$(basename "$plist")
# Skip Apple's LaunchAgents
[[ "$filename" == com.apple.* ]] && continue
local bundle_id="${filename%.plist}"
# Check if orphaned using multi-layer verification
if is_launch_item_orphaned "$plist"; then
local size_kb=$(get_path_size_kb "$plist")
orphaned_items+=("$bundle_id|$plist")
((total_orphaned_kb += size_kb))
fi
done < <(find "$launch_agents_dir" -maxdepth 1 -name "*.plist" -print0 2> /dev/null)
stop_section_spinner
local orphaned_count=${#orphaned_items[@]}
if [[ $orphaned_count -eq 0 ]]; then
return 0
fi
# Clean the orphaned items automatically
local removed_count=0
local dry_run_count=0
local is_dry_run=false
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
is_dry_run=true
fi
for item in "${orphaned_items[@]}"; do
IFS='|' read -r bundle_id plist_path <<< "$item"
if [[ "$is_dry_run" == "true" ]]; then
((dry_run_count++))
log_operation "clean" "DRY_RUN" "$plist_path" "orphaned launch agent"
continue
fi
# Try to unload first (if currently loaded)
launchctl unload "$plist_path" 2> /dev/null || true
# Remove the plist file
if safe_remove "$plist_path" false; then
((removed_count++))
log_operation "clean" "REMOVED" "$plist_path" "orphaned launch agent"
else
log_operation "clean" "FAILED" "$plist_path" "permission denied"
fi
done
if [[ "$is_dry_run" == "true" ]]; then
if [[ $dry_run_count -gt 0 ]]; then
local cleaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}')
echo " ${YELLOW}${ICON_DRY_RUN}${NC} Would remove $dry_run_count orphaned launch agent(s), ${cleaned_mb}MB"
note_activity
fi
else
if [[ $removed_count -gt 0 ]]; then
local cleaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}')
echo " ${GREEN}${ICON_SUCCESS}${NC} Removed $removed_count orphaned launch agent(s), ${cleaned_mb}MB"
note_activity
fi
fi
}

View File

@@ -178,15 +178,41 @@ clean_dev_mobile() {
if command -v xcrun > /dev/null 2>&1; then
debug_log "Checking for unavailable Xcode simulators"
local unavailable_before=0
local unavailable_after=0
local removed_unavailable=0
unavailable_before=$(xcrun simctl list devices unavailable 2> /dev/null | command awk '/\(unavailable/ { count++ } END { print count+0 }' || echo "0")
[[ "$unavailable_before" =~ ^[0-9]+$ ]] || unavailable_before=0
if [[ "$DRY_RUN" == "true" ]]; then
clean_tool_cache "Xcode unavailable simulators" xcrun simctl delete unavailable
if ((unavailable_before > 0)); then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Xcode unavailable simulators · would clean ${unavailable_before}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators · already clean"
fi
else
start_section_spinner "Checking unavailable simulators..."
if xcrun simctl delete unavailable > /dev/null 2>&1; then
stop_section_spinner
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators"
unavailable_after=$(xcrun simctl list devices unavailable 2> /dev/null | command awk '/\(unavailable/ { count++ } END { print count+0 }' || echo "0")
[[ "$unavailable_after" =~ ^[0-9]+$ ]] || unavailable_after=0
removed_unavailable=$((unavailable_before - unavailable_after))
if ((removed_unavailable < 0)); then
removed_unavailable=0
fi
if ((unavailable_before == 0)); then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators · already clean"
elif ((removed_unavailable > 0)); then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators · removed ${removed_unavailable}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators · cleanup completed"
fi
else
stop_section_spinner
echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode unavailable simulators cleanup failed"
fi
fi
note_activity
@@ -207,9 +233,8 @@ clean_dev_mobile() {
safe_clean ~/.cache/swift-package-manager/* "Swift package manager cache"
}
# JVM ecosystem caches.
# Gradle excluded (default whitelist, like Maven). Remove via: mo clean --whitelist
clean_dev_jvm() {
safe_clean ~/.gradle/caches/* "Gradle caches"
safe_clean ~/.gradle/daemon/* "Gradle daemon logs"
safe_clean ~/.sbt/* "SBT cache"
safe_clean ~/.ivy2/cache/* "Ivy cache"
}

View File

@@ -44,8 +44,8 @@ readonly PURGE_TARGETS=(
# Minimum age in days before considering for cleanup.
readonly MIN_AGE_DAYS=7
# Scan depth defaults (relative to search root).
readonly PURGE_MIN_DEPTH_DEFAULT=2
readonly PURGE_MAX_DEPTH_DEFAULT=4
readonly PURGE_MIN_DEPTH_DEFAULT=1
readonly PURGE_MAX_DEPTH_DEFAULT=6
# Search paths (default, can be overridden via config file).
readonly DEFAULT_PURGE_SEARCH_PATHS=(
"$HOME/www"
@@ -352,7 +352,36 @@ scan_purge_targets() {
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
echo "$search_path" > "$stats_dir/purge_scanning" 2> /dev/null || true
if command -v fd > /dev/null 2>&1; then
# Helper to process raw results
process_scan_results() {
local input_file="$1"
if [[ -f "$input_file" ]]; then
while IFS= read -r item; do
# Check if we should abort (scanning file removed by Ctrl+C)
if [[ ! -f "$stats_dir/purge_scanning" ]]; then
return
fi
if [[ -n "$item" ]] && is_safe_project_artifact "$item" "$search_path"; then
echo "$item"
# Update scanning path to show current project directory
local project_dir=$(dirname "$item")
echo "$project_dir" > "$stats_dir/purge_scanning" 2> /dev/null || true
fi
done < "$input_file" | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
rm -f "$input_file"
else
touch "$output_file"
fi
}
local use_find=true
# Allow forcing find via MO_USE_FIND environment variable
if [[ "${MO_USE_FIND:-0}" == "1" ]]; then
debug_log "MO_USE_FIND=1: Forcing find instead of fd"
use_find=true
elif command -v fd > /dev/null 2>&1; then
# Escape regex special characters in target names for fd patterns
local escaped_targets=()
for target in "${PURGE_TARGETS[@]}"; do
@@ -375,66 +404,49 @@ scan_purge_targets() {
"--exclude" ".Trash"
"--exclude" "Applications"
)
# Write to temp file first, then filter - more efficient than piping
fd "${fd_args[@]}" "$pattern" "$search_path" 2> /dev/null > "$output_file.raw" || true
# Single pass: safe + nested + protected
if [[ -f "$output_file.raw" ]]; then
while IFS= read -r item; do
# Check if we should abort (scanning file removed by Ctrl+C)
if [[ ! -f "$stats_dir/purge_scanning" ]]; then
return
fi
if [[ -n "$item" ]] && is_safe_project_artifact "$item" "$search_path"; then
echo "$item"
# Update scanning path to show current project directory
local project_dir=$(dirname "$item")
echo "$project_dir" > "$stats_dir/purge_scanning" 2> /dev/null || true
fi
done < "$output_file.raw" | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
rm -f "$output_file.raw"
else
touch "$output_file"
fi
else
# Pruned find avoids descending into heavy directories.
local find_expr=()
local prune_dirs=(".git" "Library" ".Trash" "Applications")
for dir in "${prune_dirs[@]}"; do
find_expr+=("-name" "$dir" "-prune" "-o")
done
local i=0
for target in "${PURGE_TARGETS[@]}"; do
find_expr+=("-name" "$target" "-print" "-prune")
if [[ $i -lt $((${#PURGE_TARGETS[@]} - 1)) ]]; then
find_expr+=("-o")
# Try running fd. If it succeeds (exit code 0), use it.
# If it fails (e.g. bad flag, permissions, binary issue), fallback to find.
if fd "${fd_args[@]}" "$pattern" "$search_path" 2> /dev/null > "$output_file.raw"; then
# Check if fd actually found anything - if empty, fallback to find
if [[ -s "$output_file.raw" ]]; then
debug_log "Using fd for scanning (found results)"
use_find=false
process_scan_results "$output_file.raw"
else
debug_log "fd returned empty results, falling back to find"
rm -f "$output_file.raw"
fi
((i++))
done
command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \
\( "${find_expr[@]}" \) 2> /dev/null > "$output_file.raw" || true
# Single pass: safe + nested + protected
if [[ -f "$output_file.raw" ]]; then
while IFS= read -r item; do
# Check if we should abort (scanning file removed by Ctrl+C)
if [[ ! -f "$stats_dir/purge_scanning" ]]; then
return
fi
if [[ -n "$item" ]] && is_safe_project_artifact "$item" "$search_path"; then
echo "$item"
# Update scanning path to show current project directory
local project_dir=$(dirname "$item")
echo "$project_dir" > "$stats_dir/purge_scanning" 2> /dev/null || true
fi
done < "$output_file.raw" | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
rm -f "$output_file.raw"
else
touch "$output_file"
debug_log "fd command failed, falling back to find"
fi
fi
if [[ "$use_find" == "true" ]]; then
debug_log "Using find for scanning"
# Pruned find avoids descending into heavy directories.
local prune_dirs=(".git" "Library" ".Trash" "Applications")
local purge_targets=("${PURGE_TARGETS[@]}")
local prune_expr=()
for i in "${!prune_dirs[@]}"; do
prune_expr+=(-name "${prune_dirs[$i]}")
[[ $i -lt $((${#prune_dirs[@]} - 1)) ]] && prune_expr+=(-o)
done
local target_expr=()
for i in "${!purge_targets[@]}"; do
target_expr+=(-name "${purge_targets[$i]}")
[[ $i -lt $((${#purge_targets[@]} - 1)) ]] && target_expr+=(-o)
done
command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \
\( "${prune_expr[@]}" \) -prune -o \
\( "${target_expr[@]}" \) -print -prune \
2> /dev/null > "$output_file.raw" || true
process_scan_results "$output_file.raw"
fi
}
# Filter out nested artifacts (e.g. node_modules inside node_modules, .build inside build).
# Optimized: Sort paths to put parents before children, then filter in single pass.
@@ -996,11 +1008,12 @@ clean_project_artifacts() {
local size_str="$3"
# Terminal width for alignment
local terminal_width=$(tput cols 2> /dev/null || echo 80)
local fixed_width=28 # Reserve for size and artifact type (9 + 3 + 16)
local fixed_width=32 # Reserve for size and artifact type (9 + 3 + 20)
local available_width=$((terminal_width - fixed_width))
# Bounds: 30-50 chars for project path (increased to accommodate full paths)
# Bounds: 30 chars min, but cap at 70% of terminal width to preserve aesthetics
local max_aesthetic_width=$((terminal_width * 70 / 100))
[[ $available_width -gt $max_aesthetic_width ]] && available_width=$max_aesthetic_width
[[ $available_width -lt 30 ]] && available_width=30
[[ $available_width -gt 50 ]] && available_width=50
# Truncate project path if needed
local truncated_path=$(truncate_by_display_width "$project_path" "$available_width")
local current_width=$(get_display_width "$truncated_path")
@@ -1008,7 +1021,7 @@ clean_project_artifacts() {
local padding=$((available_width - current_width))
local printf_width=$((char_count + padding))
# Format: "project_path size | artifact_type"
printf "%-*s %9s | %-13s" "$printf_width" "$truncated_path" "$size_str" "$artifact_type"
printf "%-*s %9s | %-17s" "$printf_width" "$truncated_path" "$size_str" "$artifact_type"
}
# Build menu options - one line per artifact
for item in "${safe_to_clean[@]}"; do

View File

@@ -5,19 +5,47 @@ set -euo pipefail
clean_deep_system() {
stop_section_spinner
local cache_cleaned=0
safe_sudo_find_delete "/Library/Caches" "*.cache" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true
safe_sudo_find_delete "/Library/Caches" "*.tmp" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true
safe_sudo_find_delete "/Library/Caches" "*.log" "$MOLE_LOG_AGE_DAYS" "f" && cache_cleaned=1 || true
# Optimized: Single pass for /Library/Caches (3 patterns in 1 scan)
if sudo test -d "/Library/Caches" 2> /dev/null; then
while IFS= read -r -d '' file; do
if should_protect_path "$file"; then
continue
fi
if safe_sudo_remove "$file"; then
cache_cleaned=1
fi
done < <(sudo find "/Library/Caches" -maxdepth 5 -type f \( \
\( -name "*.cache" -mtime "+$MOLE_TEMP_FILE_AGE_DAYS" \) -o \
\( -name "*.tmp" -mtime "+$MOLE_TEMP_FILE_AGE_DAYS" \) -o \
\( -name "*.log" -mtime "+$MOLE_LOG_AGE_DAYS" \) \
\) -print0 2> /dev/null || true)
fi
[[ $cache_cleaned -eq 1 ]] && log_success "System caches"
start_section_spinner "Cleaning system temporary files..."
local tmp_cleaned=0
safe_sudo_find_delete "/private/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true
safe_sudo_find_delete "/private/var/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true
stop_section_spinner
[[ $tmp_cleaned -eq 1 ]] && log_success "System temp files"
start_section_spinner "Cleaning system crash reports..."
safe_sudo_find_delete "/Library/Logs/DiagnosticReports" "*" "$MOLE_CRASH_REPORT_AGE_DAYS" "f" || true
stop_section_spinner
log_success "System crash reports"
safe_sudo_find_delete "/private/var/log" "*.log" "$MOLE_LOG_AGE_DAYS" "f" || true
safe_sudo_find_delete "/private/var/log" "*.gz" "$MOLE_LOG_AGE_DAYS" "f" || true
start_section_spinner "Cleaning system logs..."
# Optimized: Single pass for /private/var/log (2 patterns in 1 scan)
if sudo test -d "/private/var/log" 2> /dev/null; then
while IFS= read -r -d '' file; do
if should_protect_path "$file"; then
continue
fi
safe_sudo_remove "$file" || true
done < <(sudo find "/private/var/log" -maxdepth 5 -type f \( \
-name "*.log" -o -name "*.gz" \
\) -mtime "+$MOLE_LOG_AGE_DAYS" -print0 2> /dev/null || true)
fi
stop_section_spinner
log_success "System logs"
start_section_spinner "Scanning system library updates..."
if [[ -d "/Library/Updates" && ! -L "/Library/Updates" ]]; then
local updates_cleaned=0
while IFS= read -r -d '' item; do
@@ -34,13 +62,17 @@ clean_deep_system() {
((updates_cleaned++))
fi
done < <(find /Library/Updates -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
stop_section_spinner
[[ $updates_cleaned -gt 0 ]] && log_success "System library updates"
else
stop_section_spinner
fi
start_section_spinner "Scanning macOS installer files..."
if [[ -d "/macOS Install Data" ]]; then
local mtime=$(get_file_mtime "/macOS Install Data")
local age_days=$((($(get_epoch_seconds) - mtime) / 86400))
debug_log "Found macOS Install Data, age ${age_days} days"
if [[ $age_days -ge 30 ]]; then
if [[ $age_days -ge 14 ]]; then
local size_kb=$(get_path_size_kb "/macOS Install Data")
if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then
local size_human=$(bytes_to_human "$((size_kb * 1024))")
@@ -50,9 +82,39 @@ clean_deep_system() {
fi
fi
else
debug_log "Keeping macOS Install Data, only ${age_days} days old, needs 30+"
debug_log "Keeping macOS Install Data, only ${age_days} days old, needs 14+"
fi
fi
# Clean macOS installer apps (e.g., "Install macOS Sequoia.app")
# Only remove installers older than 14 days and not currently running
local installer_cleaned=0
for installer_app in /Applications/Install\ macOS*.app; do
[[ -d "$installer_app" ]] || continue
local app_name=$(basename "$installer_app")
# Skip if installer is currently running
if pgrep -f "$installer_app" > /dev/null 2>&1; then
debug_log "Skipping $app_name: currently running"
continue
fi
# Check age (same 14-day threshold as /macOS Install Data)
local mtime=$(get_file_mtime "$installer_app")
local age_days=$((($(get_epoch_seconds) - mtime) / 86400))
if [[ $age_days -lt 14 ]]; then
debug_log "Keeping $app_name: only ${age_days} days old, needs 14+"
continue
fi
local size_kb=$(get_path_size_kb "$installer_app")
if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then
local size_human=$(bytes_to_human "$((size_kb * 1024))")
debug_log "Cleaning macOS installer: $app_name, $size_human, ${age_days} days old"
if safe_sudo_remove "$installer_app"; then
log_success "$app_name, $size_human"
((installer_cleaned++))
fi
fi
done
stop_section_spinner
[[ $installer_cleaned -gt 0 ]] && debug_log "Cleaned $installer_cleaned macOS installer(s)"
start_section_spinner "Scanning system caches..."
local code_sign_cleaned=0
local found_count=0
@@ -78,23 +140,54 @@ clean_deep_system() {
stop_section_spinner
[[ $code_sign_cleaned -gt 0 ]] && log_success "Browser code signature caches, $code_sign_cleaned items"
start_section_spinner "Cleaning system diagnostic logs..."
local diag_cleaned=0
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*" "$MOLE_LOG_AGE_DAYS" "f" && diag_cleaned=1 || true
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*" "$MOLE_LOG_AGE_DAYS" "f" && diag_cleaned=1 || true
safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" && diag_cleaned=1 || true
safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" && diag_cleaned=1 || true
safe_sudo_find_delete "/private/var/db/reportmemoryexception/MemoryLimitViolations" "*" "30" "f" && diag_cleaned=1 || true
stop_section_spinner
# Optimized: Single pass for diagnostics directory (Special + Persist + tracev3)
# Replaces 4 separate find operations with 1 combined operation
local diag_base="/private/var/db/diagnostics"
if sudo test -d "$diag_base" 2> /dev/null; then
while IFS= read -r -d '' file; do
if should_protect_path "$file"; then
continue
fi
safe_sudo_remove "$file" || true
done < <(sudo find "$diag_base" -maxdepth 5 -type f \( \
\( -mtime "+$MOLE_LOG_AGE_DAYS" \) -o \
\( -name "*.tracev3" -mtime +30 \) \
\) -print0 2> /dev/null || true)
fi
safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
log_success "System diagnostic logs"
safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
log_success "Power logs"
start_section_spinner "Cleaning memory exception reports..."
local mem_reports_dir="/private/var/db/reportmemoryexception/MemoryLimitViolations"
if sudo test -d "$mem_reports_dir" 2> /dev/null; then
# Count and size old files before deletion
local file_count=0
local total_size_kb=0
while IFS= read -r -d '' file; do
((file_count++))
local file_size
file_size=$(sudo stat -f%z "$file" 2> /dev/null || echo "0")
((total_size_kb += file_size / 1024))
done < <(sudo find "$mem_reports_dir" -type f -mtime +30 -print0 2> /dev/null || true)
[[ $diag_cleaned -eq 1 ]] && log_success "System diagnostic logs"
start_section_spinner "Cleaning diagnostic trace logs..."
local trace_cleaned=0
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*.tracev3" "30" "f" && trace_cleaned=1 || true
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*.tracev3" "30" "f" && trace_cleaned=1 || true
# For directories with many files, use find -delete for performance
if [[ "$file_count" -gt 0 ]]; then
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
sudo find "$mem_reports_dir" -type f -mtime +30 -delete 2> /dev/null || true
# Log summary to operations.log
if oplog_enabled && [[ "$total_size_kb" -gt 0 ]]; then
local size_human
size_human=$(bytes_to_human "$((total_size_kb * 1024))")
log_operation "clean" "REMOVED" "$mem_reports_dir" "$file_count files, $size_human"
fi
else
log_info "[DRY-RUN] Would remove $file_count old memory exception reports ($total_size_kb KB)"
fi
fi
fi
stop_section_spinner
[[ $trace_cleaned -eq 1 ]] && log_success "System diagnostic trace logs"
log_success "Memory exception reports"
}
# Incomplete Time Machine backups.
clean_time_machine_failed_backups() {
@@ -275,15 +368,18 @@ clean_local_snapshots() {
return 0
fi
start_section_spinner "Checking Time Machine status..."
local rc_running=0
tm_is_running || rc_running=$?
if [[ $rc_running -eq 2 ]]; then
stop_section_spinner
echo -e " ${YELLOW}!${NC} Could not determine Time Machine status; skipping snapshot check"
return 0
fi
if [[ $rc_running -eq 0 ]]; then
stop_section_spinner
echo -e " ${YELLOW}!${NC} Time Machine is active; skipping snapshot check"
return 0
fi

View File

@@ -20,9 +20,16 @@ clean_user_essentials() {
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Trash · emptied, $trash_count items"
note_activity
else
local cleaned_count=0
while IFS= read -r -d '' item; do
safe_remove "$item" true || true
if safe_remove "$item" true; then
((cleaned_count++))
fi
done < <(command find "$HOME/.Trash" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
if [[ $cleaned_count -gt 0 ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Trash · emptied, $cleaned_count items"
note_activity
fi
fi
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Trash · already empty"
@@ -450,15 +457,18 @@ process_container_cache() {
found_any=true
((cleaned_count++))
if [[ "$DRY_RUN" != "true" ]]; then
# Clean contents safely with local nullglob.
local _ng_state
_ng_state=$(shopt -p nullglob || true)
shopt -s nullglob
for item in "$cache_dir"/*; do
[[ -e "$item" ]] || continue
safe_remove "$item" true || true
done
eval "$_ng_state"
# For directories with many files, use find -delete for performance
if ! find "$cache_dir" -mindepth 1 -delete 2> /dev/null; then
# Fallback: try item-by-item if find fails
local _ng_state
_ng_state=$(shopt -p nullglob || true)
shopt -s nullglob
for item in "$cache_dir"/*; do
[[ -e "$item" ]] || continue
safe_remove "$item" true || true
done
eval "$_ng_state"
fi
fi
fi
}
@@ -573,10 +583,15 @@ clean_application_support_logs() {
((cleaned_count++))
found_any=true
if [[ "$DRY_RUN" != "true" ]]; then
for item in "$candidate"/*; do
[[ -e "$item" ]] || continue
safe_remove "$item" true > /dev/null 2>&1 || true
done
# For directories with many files, use find -delete for performance
# This avoids shell expansion and individual safe_remove calls
if ! find "$candidate" -mindepth 1 -delete 2> /dev/null; then
# Fallback: try item-by-item if find fails
for item in "$candidate"/*; do
[[ -e "$item" ]] || continue
safe_remove "$item" true > /dev/null 2>&1 || true
done
fi
fi
fi
fi
@@ -597,10 +612,15 @@ clean_application_support_logs() {
((cleaned_count++))
found_any=true
if [[ "$DRY_RUN" != "true" ]]; then
for item in "$candidate"/*; do
[[ -e "$item" ]] || continue
safe_remove "$item" true > /dev/null 2>&1 || true
done
# For directories with many files, use find -delete for performance
# This avoids shell expansion and individual safe_remove calls
if ! find "$candidate" -mindepth 1 -delete 2> /dev/null; then
# Fallback: try item-by-item if find fails
for item in "$candidate"/*; do
[[ -e "$item" ]] || continue
safe_remove "$item" true > /dev/null 2>&1 || true
done
fi
fi
fi
fi

View File

@@ -280,9 +280,18 @@ readonly DATA_PROTECTED_BUNDLES=(
"com.telerik.Fiddler"
"com.usebruno.app"
# Network Proxy & VPN Tools
"*clash*"
"*Clash*"
# Network Proxy & VPN Tools (Clash variants - use specific patterns to avoid false positives)
"com.clash.*"
"ClashX*"
"clash-*"
"Clash-*"
"*-clash"
"*-Clash"
"clash.*"
"Clash.*"
"clash_*"
"clashverge*"
"ClashVerge*"
"com.nssurge.surge-mac"
"*surge*"
"*Surge*"
@@ -678,7 +687,14 @@ should_protect_data() {
com.sublimetext.* | com.sublimehq.* | Cursor | Claude | ChatGPT | Ollama)
return 0
;;
com.nssurge.* | com.v2ray.* | ClashX* | Surge* | Shadowrocket* | Quantumult*)
# Specific match to avoid ShellCheck redundancy warning with com.clash.*
com.clash.app)
return 0
;;
com.nssurge.* | com.v2ray.* | com.clash.* | ClashX* | Surge* | Shadowrocket* | Quantumult*)
return 0
;;
clash-* | Clash-* | *-clash | *-Clash | clash.* | Clash.* | clash_* | clashverge* | ClashVerge*)
return 0
;;
com.docker.* | com.getpostman.* | com.insomnia.*)
@@ -695,7 +711,13 @@ should_protect_data() {
;;
esac
# Most apps won't match, return early
# Fallback: check against the full DATA_PROTECTED_BUNDLES list
for pattern in "${DATA_PROTECTED_BUNDLES[@]}"; do
if bundle_matches_pattern "$bundle_id" "$pattern"; then
return 0
fi
done
return 1
}
@@ -760,7 +782,8 @@ should_protect_path() {
# Matches: .../Library/Group Containers/group.id/...
if [[ "$path" =~ /Library/Containers/([^/]+) ]] || [[ "$path" =~ /Library/Group\ Containers/([^/]+) ]]; then
local bundle_id="${BASH_REMATCH[1]}"
if should_protect_data "$bundle_id"; then
# In uninstall mode, only system components are protected; skip data protection
if [[ "${MOLE_UNINSTALL_MODE:-0}" != "1" ]] && should_protect_data "$bundle_id"; then
return 0
fi
fi
@@ -978,6 +1001,14 @@ find_app_files() {
)
fi
# Issue #422: Zed channel builds can leave data under another channel bundle id.
# Example: uninstalling dev.zed.Zed-Nightly should also detect dev.zed.Zed-Preview leftovers.
if [[ "$bundle_id" =~ ^dev\.zed\.Zed- ]] && [[ -d "$HOME/Library/HTTPStorages" ]]; then
while IFS= read -r -d '' zed_http_storage; do
files_to_clean+=("$zed_http_storage")
done < <(command find "$HOME/Library/HTTPStorages" -maxdepth 1 -name "dev.zed.Zed-*" -print0 2> /dev/null)
fi
# Process standard patterns
for p in "${user_patterns[@]}"; do
local expanded_path="${p/#\~/$HOME}"
@@ -1076,9 +1107,48 @@ find_app_files() {
[[ "$app_name" =~ Godot|godot ]] && [[ -d ~/Library/Application\ Support/Godot ]] && files_to_clean+=("$HOME/Library/Application Support/Godot")
# 6. Tools
[[ "$bundle_id" =~ microsoft.*vscode ]] && [[ -d ~/.vscode ]] && files_to_clean+=("$HOME/.vscode")
if [[ "$bundle_id" =~ microsoft.*[vV][sS][cC]ode ]]; then
[[ -d "$HOME/.vscode" ]] && files_to_clean+=("$HOME/.vscode")
[[ -d "$HOME/Library/Caches/com.microsoft.VSCode.ShipIt" ]] && files_to_clean+=("$HOME/Library/Caches/com.microsoft.VSCode.ShipIt")
[[ -d "$HOME/Library/Caches/com.microsoft.VSCodeInsiders.ShipIt" ]] && files_to_clean+=("$HOME/Library/Caches/com.microsoft.VSCodeInsiders.ShipIt")
fi
[[ "$app_name" =~ Docker ]] && [[ -d ~/.docker ]] && files_to_clean+=("$HOME/.docker")
# 6.1 Maestro Studio
if [[ "$bundle_id" == "com.maestro.studio" ]] || [[ "$lowercase_name" =~ maestro[[:space:]]*studio ]]; then
[[ -d ~/.mobiledev ]] && files_to_clean+=("$HOME/.mobiledev")
fi
# 7. Raycast
if [[ "$bundle_id" == "com.raycast.macos" ]]; then
# Standard user directories
local raycast_dirs=(
"$HOME/Library/Application Support"
"$HOME/Library/Application Scripts"
"$HOME/Library/Containers"
)
for dir in "${raycast_dirs[@]}"; do
[[ -d "$dir" ]] && while IFS= read -r -d '' p; do
files_to_clean+=("$p")
done < <(command find "$dir" -maxdepth 1 -type d -iname "*raycast*" -print0 2> /dev/null)
done
# Explicit Raycast container directories (hardcoded leftovers)
[[ -d "$HOME/Library/Containers/com.raycast.macos.BrowserExtension" ]] && files_to_clean+=("$HOME/Library/Containers/com.raycast.macos.BrowserExtension")
[[ -d "$HOME/Library/Containers/com.raycast.macos.RaycastAppIntents" ]] && files_to_clean+=("$HOME/Library/Containers/com.raycast.macos.RaycastAppIntents")
# Cache (deeper search)
[[ -d "$HOME/Library/Caches" ]] && while IFS= read -r -d '' p; do
files_to_clean+=("$p")
done < <(command find "$HOME/Library/Caches" -maxdepth 2 -type d -iname "*raycast*" -print0 2> /dev/null)
# VSCode extension storage
local vscode_global="$HOME/Library/Application Support/Code/User/globalStorage"
[[ -d "$vscode_global" ]] && while IFS= read -r -d '' p; do
files_to_clean+=("$p")
done < <(command find "$vscode_global" -maxdepth 1 -type d -iname "*raycast*" -print0 2> /dev/null)
fi
# Output results
if [[ ${#files_to_clean[@]} -gt 0 ]]; then
printf '%s\n' "${files_to_clean[@]}"
@@ -1172,6 +1242,13 @@ find_app_system_files() {
done < <(command find /private/var/db/receipts -maxdepth 1 \( -name "*$bundle_id*" \) -print0 2> /dev/null)
fi
# Raycast system-level files
if [[ "$bundle_id" == "com.raycast.macos" ]]; then
[[ -d "/Library/Application Support" ]] && while IFS= read -r -d '' p; do
system_files+=("$p")
done < <(command find "/Library/Application Support" -maxdepth 1 -type d -iname "*raycast*" -print0 2> /dev/null)
fi
local receipt_files=""
receipt_files=$(find_app_receipt_files "$bundle_id")

View File

@@ -63,6 +63,8 @@ declare -a DEFAULT_WHITELIST_PATTERNS=(
"$HOME/Library/Caches/ms-playwright*"
"$HOME/.cache/huggingface*"
"$HOME/.m2/repository/*"
"$HOME/.gradle/caches/*"
"$HOME/.gradle/daemon/*"
"$HOME/.ollama/models/*"
"$HOME/Library/Caches/com.nssurge.surge-mac/*"
"$HOME/Library/Application Support/com.nssurge.surge-mac/*"
@@ -626,12 +628,12 @@ start_section_spinner() {
# Stop spinner and clear the line
# Usage: stop_section_spinner
stop_section_spinner() {
# Only clear line if spinner was actually running
if [[ -n "${INLINE_SPINNER_PID:-}" ]]; then
stop_inline_spinner 2> /dev/null || true
if [[ -t 1 ]]; then
echo -ne "\r\033[2K" >&2 || true
fi
# Always try to stop spinner (function handles empty PID gracefully)
stop_inline_spinner 2> /dev/null || true
# Always clear line to handle edge cases where spinner output remains
# (e.g., spinner was stopped elsewhere but line not cleared)
if [[ -t 1 ]]; then
printf "\r\033[2K" >&2 || true
fi
}

View File

@@ -10,6 +10,11 @@ if [[ -n "${MOLE_FILE_OPS_LOADED:-}" ]]; then
fi
readonly MOLE_FILE_OPS_LOADED=1
# Error codes for removal operations
readonly MOLE_ERR_SIP_PROTECTED=10
readonly MOLE_ERR_AUTH_FAILED=11
readonly MOLE_ERR_READONLY_FS=12
# Ensure dependencies are loaded
_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [[ -z "${MOLE_BASE_LOADED:-}" ]]; then
@@ -25,6 +30,35 @@ if [[ -z "${MOLE_TIMEOUT_LOADED:-}" ]]; then
source "$_MOLE_CORE_DIR/timeout.sh"
fi
# ============================================================================
# Utility Functions
# ============================================================================
# Format duration in seconds to human readable string (e.g., "5 days", "2 months")
format_duration_human() {
local seconds="${1:-0}"
[[ ! "$seconds" =~ ^[0-9]+$ ]] && seconds=0
local days=$((seconds / 86400))
if [[ $days -eq 0 ]]; then
echo "today"
elif [[ $days -eq 1 ]]; then
echo "1 day"
elif [[ $days -lt 7 ]]; then
echo "${days} days"
elif [[ $days -lt 30 ]]; then
local weeks=$((days / 7))
[[ $weeks -eq 1 ]] && echo "1 week" || echo "${weeks} weeks"
elif [[ $days -lt 365 ]]; then
local months=$((days / 30))
[[ $months -eq 1 ]] && echo "1 month" || echo "${months} months"
else
local years=$((days / 365))
[[ $years -eq 1 ]] && echo "1 year" || echo "${years} years"
fi
}
# ============================================================================
# Path Validation
# ============================================================================
@@ -235,28 +269,54 @@ safe_remove() {
fi
}
# Safe symlink removal (for pre-validated symlinks only)
safe_remove_symlink() {
local path="$1"
local use_sudo="${2:-false}"
if [[ ! -L "$path" ]]; then
return 1
fi
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
debug_log "[DRY RUN] Would remove symlink: $path"
return 0
fi
local rm_exit=0
if [[ "$use_sudo" == "true" ]]; then
sudo rm "$path" 2> /dev/null || rm_exit=$?
else
rm "$path" 2> /dev/null || rm_exit=$?
fi
if [[ $rm_exit -eq 0 ]]; then
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "REMOVED" "$path" "symlink"
return 0
else
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "symlink removal failed"
return 1
fi
}
# Safe sudo removal with symlink protection
safe_sudo_remove() {
local path="$1"
# Validate path
if ! validate_path_for_deletion "$path"; then
log_error "Path validation failed for sudo remove: $path"
return 1
fi
# Check if path exists
if [[ ! -e "$path" ]]; then
return 0
fi
# Additional check: reject symlinks for sudo operations
if [[ -L "$path" ]]; then
log_error "Refusing to sudo remove symlink: $path"
return 1
fi
# Dry-run mode: log but don't delete
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
if [[ "${MO_DEBUG:-}" == "1" ]]; then
local file_type="file"
@@ -278,21 +338,21 @@ safe_sudo_remove() {
local now
now=$(date +%s 2> /dev/null || echo "0")
if [[ "$mod_time" -gt 0 && "$now" -gt 0 ]]; then
file_age=$(((now - mod_time) / 86400))
local age_seconds=$((now - mod_time))
file_age=$(format_duration_human "$age_seconds")
fi
fi
fi
debug_file_action "[DRY RUN] Would remove, sudo" "$path" "$file_size" "$file_age"
log_info "[DRY-RUN] Would sudo remove: $file_type $path"
[[ -n "$file_size" ]] && log_info " Size: $file_size"
[[ -n "$file_age" ]] && log_info " Age: $file_age"
else
debug_log "[DRY RUN] Would remove, sudo: $path"
log_info "[DRY-RUN] Would sudo remove: $path"
fi
return 0
fi
debug_log "Removing, sudo: $path"
# Calculate size before deletion for logging
local size_kb=0
local size_human=""
if oplog_enabled; then
@@ -304,15 +364,34 @@ safe_sudo_remove() {
fi
fi
# Perform the deletion
if sudo rm -rf "$path" 2> /dev/null; then # SAFE: safe_sudo_remove implementation
local output
local ret=0
output=$(sudo rm -rf "$path" 2>&1) || ret=$? # safe_remove
if [[ $ret -eq 0 ]]; then
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "REMOVED" "$path" "$size_human"
return 0
else
log_error "Failed to remove, sudo: $path"
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "sudo error"
return 1
fi
case "$output" in
*"Operation not permitted"*)
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "sip/mdm protected"
return "$MOLE_ERR_SIP_PROTECTED"
;;
*"Read-only file system"*)
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "readonly filesystem"
return "$MOLE_ERR_READONLY_FS"
;;
*"Sorry, try again"* | *"incorrect passphrase"* | *"incorrect credentials"*)
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "auth failed"
return "$MOLE_ERR_AUTH_FAILED"
;;
*)
log_error "Failed to remove, sudo: $path"
log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "sudo error"
return 1
;;
esac
}
# ============================================================================
@@ -387,7 +466,12 @@ safe_sudo_find_delete() {
debug_log "Finding, sudo, in $base_dir: $pattern, age: ${age_days}d, type: $type_filter"
local find_args=("-maxdepth" "5" "-name" "$pattern" "-type" "$type_filter")
local find_args=("-maxdepth" "5")
# Skip -name if pattern is "*" (matches everything anyway, but adds overhead)
if [[ "$pattern" != "*" ]]; then
find_args+=("-name" "$pattern")
fi
find_args+=("-type" "$type_filter")
if [[ "$age_days" -gt 0 ]]; then
find_args+=("-mtime" "+$age_days")
fi
@@ -414,16 +498,13 @@ get_path_size_kb() {
echo "0"
return
}
# Direct execution without timeout overhead - critical for performance in loops
# Use || echo 0 to ensure failure in du (e.g. permission error) doesn't exit script under set -e
# Pipefail would normally cause the pipeline to fail if du fails, but || handle catches it.
local size
size=$(command du -skP "$path" 2> /dev/null | awk 'NR==1 {print $1; exit}' || true)
# Ensure size is a valid number (fix for non-numeric du output)
if [[ "$size" =~ ^[0-9]+$ ]]; then
echo "$size"
else
[[ "${MO_DEBUG:-}" == "1" ]] && debug_log "get_path_size_kb: Failed to get size for $path (returned: $size)"
echo "0"
fi
}
@@ -443,3 +524,40 @@ calculate_total_size() {
echo "$total_kb"
}
diagnose_removal_failure() {
local exit_code="$1"
local app_name="${2:-application}"
local reason=""
local suggestion=""
local touchid_file="/etc/pam.d/sudo"
case "$exit_code" in
"$MOLE_ERR_SIP_PROTECTED")
reason="protected by macOS (SIP/MDM)"
;;
"$MOLE_ERR_AUTH_FAILED")
reason="authentication failed"
if [[ -f "$touchid_file" ]] && grep -q "pam_tid.so" "$touchid_file" 2> /dev/null; then
suggestion="Check your credentials or restart Terminal"
else
suggestion="Try 'mole touchid' to enable fingerprint auth"
fi
;;
"$MOLE_ERR_READONLY_FS")
reason="filesystem is read-only"
suggestion="Check if disk needs repair"
;;
*)
reason="permission denied"
if [[ -f "$touchid_file" ]] && grep -q "pam_tid.so" "$touchid_file" 2> /dev/null; then
suggestion="Try running again or check file ownership"
else
suggestion="Try 'mole touchid' or check with 'ls -l'"
fi
;;
esac
echo "$reason|$suggestion"
}

View File

@@ -170,25 +170,27 @@ read_key() {
case "$key" in
$'\n' | $'\r') echo "ENTER" ;;
$'\x7f' | $'\x08') echo "DELETE" ;;
$'\x15') echo "CLEAR_LINE" ;; # Ctrl+U (often mapped from Cmd+Delete in terminals)
$'\x1b')
# Check if this is an escape sequence (arrow keys) or ESC key
if IFS= read -r -s -n 1 -t 0.1 rest 2> /dev/null; then
if IFS= read -r -s -n 1 -t 1 rest 2> /dev/null; then
if [[ "$rest" == "[" ]]; then
if IFS= read -r -s -n 1 -t 0.1 rest2 2> /dev/null; then
if IFS= read -r -s -n 1 -t 1 rest2 2> /dev/null; then
case "$rest2" in
"A") echo "UP" ;;
"B") echo "DOWN" ;;
"C") echo "RIGHT" ;;
"D") echo "LEFT" ;;
"3")
IFS= read -r -s -n 1 -t 0.1 rest3 2> /dev/null
IFS= read -r -s -n 1 -t 1 rest3 2> /dev/null
[[ "$rest3" == "~" ]] && echo "DELETE" || echo "OTHER"
;;
*) echo "OTHER" ;;
esac
else echo "QUIT"; fi
else
echo "QUIT"
fi
elif [[ "$rest" == "O" ]]; then
if IFS= read -r -s -n 1 -t 0.1 rest2 2> /dev/null; then
if IFS= read -r -s -n 1 -t 1 rest2 2> /dev/null; then
case "$rest2" in
"A") echo "UP" ;;
"B") echo "DOWN" ;;
@@ -198,11 +200,9 @@ read_key() {
esac
else echo "OTHER"; fi
else
# Not an escape sequence, it's ESC key
echo "QUIT"
fi
else
# No following characters, it's ESC key
echo "QUIT"
fi
;;
@@ -231,6 +231,7 @@ read_key() {
'l' | 'L') echo "RIGHT" ;;
$'\x03') echo "QUIT" ;;
$'\x7f' | $'\x08') echo "DELETE" ;;
$'\x15') echo "CLEAR_LINE" ;; # Ctrl+U
$'\x1b')
if IFS= read -r -s -n 1 -t 1 rest 2> /dev/null; then
if [[ "$rest" == "[" ]]; then

View File

@@ -12,9 +12,12 @@ format_app_display() {
# Use common function from ui.sh to format last used time
local compact_last_used
compact_last_used=$(format_last_used_summary "$last_used")
if [[ -z "$compact_last_used" || "$compact_last_used" == "Unknown" || "$compact_last_used" == "Never" ]]; then
compact_last_used="..."
fi
# Format size
local size_str="Unknown"
local size_str="..."
[[ "$size" != "0" && "$size" != "" && "$size" != "Unknown" ]] && size_str="$size"
# Calculate available width for app name based on terminal width
@@ -111,15 +114,17 @@ select_apps_for_uninstall() {
[[ $max_name_width -gt 60 ]] && max_name_width=60
local -a menu_options=()
# Prepare metadata (comma-separated) for sorting/filtering inside the menu
local epochs_csv=""
local sizekb_csv=""
local -a names_arr=()
local has_epoch_metadata=false
local has_size_metadata=false
local idx=0
for app_data in "${apps_data[@]}"; do
# Keep extended field 7 (size_kb) if present
IFS='|' read -r epoch _ display_name _ size last_used size_kb <<< "$app_data"
menu_options+=("$(format_app_display "$display_name" "$size" "$last_used" "$terminal_width" "$max_name_width")")
# Build csv lists (avoid trailing commas)
[[ "${epoch:-0}" =~ ^[0-9]+$ && "${epoch:-0}" -gt 0 ]] && has_epoch_metadata=true
[[ "${size_kb:-0}" =~ ^[0-9]+$ && "${size_kb:-0}" -gt 0 ]] && has_size_metadata=true
if [[ $idx -eq 0 ]]; then
epochs_csv="${epoch:-0}"
sizekb_csv="${size_kb:-0}"
@@ -127,8 +132,12 @@ select_apps_for_uninstall() {
epochs_csv+=",${epoch:-0}"
sizekb_csv+=",${size_kb:-0}"
fi
names_arr+=("$display_name")
((idx++))
done
# Use newline separator for names (safe for names with commas)
local names_newline
names_newline=$(printf '%s\n' "${names_arr[@]}")
# Clear loading message
if [[ $app_count -gt 100 ]]; then
@@ -141,10 +150,17 @@ select_apps_for_uninstall() {
# - MOLE_MENU_META_EPOCHS: numeric last_used_epoch per item
# - MOLE_MENU_META_SIZEKB: numeric size in KB per item
# The menu will gracefully fallback if these are unset or malformed.
export MOLE_MENU_META_EPOCHS="$epochs_csv"
export MOLE_MENU_META_SIZEKB="$sizekb_csv"
# Optional: allow default sort override via env (date|name|size)
# export MOLE_MENU_SORT_DEFAULT="${MOLE_MENU_SORT_DEFAULT:-date}"
if [[ $has_epoch_metadata == true ]]; then
export MOLE_MENU_META_EPOCHS="$epochs_csv"
else
unset MOLE_MENU_META_EPOCHS
fi
if [[ $has_size_metadata == true ]]; then
export MOLE_MENU_META_SIZEKB="$sizekb_csv"
else
unset MOLE_MENU_META_SIZEKB
fi
export MOLE_MENU_FILTER_NAMES="$names_newline"
# Use paginated menu - result will be stored in MOLE_SELECTION_RESULT
# Note: paginated_multi_select enters alternate screen and handles clearing
@@ -153,14 +169,9 @@ select_apps_for_uninstall() {
local exit_code=$?
# Clean env leakage for safety
unset MOLE_MENU_META_EPOCHS MOLE_MENU_META_SIZEKB
unset MOLE_MENU_META_EPOCHS MOLE_MENU_META_SIZEKB MOLE_MENU_FILTER_NAMES
# leave MOLE_MENU_SORT_DEFAULT untouched if user set it globally
# Refresh signal handling
if [[ $exit_code -eq 10 ]]; then
return 10
fi
if [[ $exit_code -ne 0 ]]; then
return 1
fi

View File

@@ -89,13 +89,18 @@ paginated_multi_select() {
local top_index=0
local sort_mode="${MOLE_MENU_SORT_MODE:-${MOLE_MENU_SORT_DEFAULT:-date}}" # date|name|size
local sort_reverse="${MOLE_MENU_SORT_REVERSE:-false}"
local filter_text="" # Filter keyword
local filter_text_lower=""
# Metadata (optional)
# epochs[i] -> last_used_epoch (numeric) for item i
# sizekb[i] -> size in KB (numeric) for item i
# filter_names[i] -> name for filtering (if not set, use items[i])
local -a epochs=()
local -a sizekb=()
local -a filter_names=()
local has_metadata="false"
local has_filter_names="false"
if [[ -n "${MOLE_MENU_META_EPOCHS:-}" ]]; then
while IFS= read -r v; do epochs+=("${v:-0}"); done < <(_pm_parse_csv_to_array "$MOLE_MENU_META_EPOCHS")
has_metadata="true"
@@ -104,6 +109,10 @@ paginated_multi_select() {
while IFS= read -r v; do sizekb+=("${v:-0}"); done < <(_pm_parse_csv_to_array "$MOLE_MENU_META_SIZEKB")
has_metadata="true"
fi
if [[ -n "${MOLE_MENU_FILTER_NAMES:-}" ]]; then
while IFS= read -r v; do filter_names+=("$v"); done <<< "$MOLE_MENU_FILTER_NAMES"
has_filter_names="true"
fi
# If no metadata, force name sorting and disable sorting controls
if [[ "$has_metadata" == "false" && "$sort_mode" != "name" ]]; then
@@ -113,10 +122,20 @@ paginated_multi_select() {
# Index mappings
local -a orig_indices=()
local -a view_indices=()
local -a filter_targets_lower=()
local i
for ((i = 0; i < total_items; i++)); do
orig_indices[i]=$i
view_indices[i]=$i
local filter_target
if [[ $has_filter_names == true && -n "${filter_names[i]:-}" ]]; then
filter_target="${filter_names[i]}"
else
filter_target="${items[i]}"
fi
local filter_target_lower
filter_target_lower=$(printf "%s" "$filter_target" | LC_ALL=C tr '[:upper:]' '[:lower:]')
filter_targets_lower[i]="$filter_target_lower"
done
local -a selected=()
@@ -163,8 +182,9 @@ paginated_multi_select() {
# Cleanup function
cleanup() {
trap - EXIT INT TERM
export MOLE_MENU_SORT_MODE="$sort_mode"
export MOLE_MENU_SORT_REVERSE="$sort_reverse"
unset MOLE_READ_KEY_FORCE_CHAR
export MOLE_MENU_SORT_MODE="${sort_mode:-name}"
export MOLE_MENU_SORT_REVERSE="${sort_reverse:-false}"
restore_terminal
}
@@ -232,55 +252,111 @@ paginated_multi_select() {
printf "%s%s\n" "$clear_line" "$line" >&2
}
# Rebuild the view_indices applying sort
rebuild_view() {
# Sort (skip if no metadata)
local sort_cache_key=""
local -a sorted_indices_cache=()
local filter_cache_key=""
local filter_cache_text_lower=""
local -a filter_cache_indices=()
ensure_sorted_indices() {
local requested_key="${sort_mode}:${sort_reverse}:${has_metadata}"
if [[ "$requested_key" == "$sort_cache_key" && ${#sorted_indices_cache[@]} -gt 0 ]]; then
return
fi
if [[ "$has_metadata" == "false" ]]; then
# No metadata: just use original indices
view_indices=("${orig_indices[@]}")
elif [[ ${#orig_indices[@]} -eq 0 ]]; then
view_indices=()
sorted_indices_cache=("${orig_indices[@]}")
sort_cache_key="$requested_key"
return
fi
# Build sort key once; filtering should reuse this cached order.
local sort_key
if [[ "$sort_mode" == "date" ]]; then
# Date: ascending by default (oldest first)
sort_key="-k1,1n"
[[ "$sort_reverse" == "true" ]] && sort_key="-k1,1nr"
elif [[ "$sort_mode" == "size" ]]; then
# Size: descending by default (largest first)
sort_key="-k1,1nr"
[[ "$sort_reverse" == "true" ]] && sort_key="-k1,1n"
else
# Build sort key
local sort_key
if [[ "$sort_mode" == "date" ]]; then
# Date: ascending by default (oldest first)
sort_key="-k1,1n"
[[ "$sort_reverse" == "true" ]] && sort_key="-k1,1nr"
elif [[ "$sort_mode" == "size" ]]; then
# Size: descending by default (largest first)
sort_key="-k1,1nr"
[[ "$sort_reverse" == "true" ]] && sort_key="-k1,1n"
# Name: ascending by default (A to Z)
sort_key="-k1,1f"
[[ "$sort_reverse" == "true" ]] && sort_key="-k1,1fr"
fi
local tmpfile
tmpfile=$(mktemp 2> /dev/null) || tmpfile=""
if [[ -n "$tmpfile" ]]; then
local k id
for id in "${orig_indices[@]}"; do
case "$sort_mode" in
date) k="${epochs[id]:-0}" ;;
size) k="${sizekb[id]:-0}" ;;
name | *) k="${items[id]}|${id}" ;;
esac
printf "%s\t%s\n" "$k" "$id" >> "$tmpfile"
done
sorted_indices_cache=()
while IFS=$'\t' read -r _key _id; do
[[ -z "$_id" ]] && continue
sorted_indices_cache+=("$_id")
done < <(LC_ALL=C sort -t $'\t' $sort_key -- "$tmpfile" 2> /dev/null)
rm -f "$tmpfile"
else
sorted_indices_cache=("${orig_indices[@]}")
fi
sort_cache_key="$requested_key"
}
# Rebuild the view_indices applying filter over cached sort order
rebuild_view() {
ensure_sorted_indices
if [[ -n "$filter_text_lower" ]]; then
local -a source_indices=()
if [[ "$filter_cache_key" == "$sort_cache_key" &&
"$filter_text_lower" == "$filter_cache_text_lower"* &&
${#filter_cache_indices[@]} -gt 0 ]]; then
source_indices=("${filter_cache_indices[@]}")
else
# Name: ascending by default (A to Z)
sort_key="-k1,1f"
[[ "$sort_reverse" == "true" ]] && sort_key="-k1,1fr"
if [[ ${#sorted_indices_cache[@]} -gt 0 ]]; then
source_indices=("${sorted_indices_cache[@]}")
else
source_indices=()
fi
fi
# Create temporary file for sorting
local tmpfile
tmpfile=$(mktemp 2> /dev/null) || tmpfile=""
if [[ -n "$tmpfile" ]]; then
local k id
for id in "${orig_indices[@]}"; do
case "$sort_mode" in
date) k="${epochs[id]:-0}" ;;
size) k="${sizekb[id]:-0}" ;;
name | *) k="${items[id]}|${id}" ;;
esac
printf "%s\t%s\n" "$k" "$id" >> "$tmpfile"
done
view_indices=()
local id
for id in "${source_indices[@]}"; do
if [[ "${filter_targets_lower[id]:-}" == *"$filter_text_lower"* ]]; then
view_indices+=("$id")
fi
done
view_indices=()
while IFS=$'\t' read -r _key _id; do
[[ -z "$_id" ]] && continue
view_indices+=("$_id")
done < <(LC_ALL=C sort -t $'\t' $sort_key -- "$tmpfile" 2> /dev/null)
rm -f "$tmpfile"
filter_cache_key="$sort_cache_key"
filter_cache_text_lower="$filter_text_lower"
if [[ ${#view_indices[@]} -gt 0 ]]; then
filter_cache_indices=("${view_indices[@]}")
else
# Fallback: no sorting
view_indices=("${orig_indices[@]}")
filter_cache_indices=()
fi
else
if [[ ${#sorted_indices_cache[@]} -gt 0 ]]; then
view_indices=("${sorted_indices_cache[@]}")
else
view_indices=()
fi
filter_cache_key="$sort_cache_key"
filter_cache_text_lower=""
if [[ ${#view_indices[@]} -gt 0 ]]; then
filter_cache_indices=("${view_indices[@]}")
else
filter_cache_indices=()
fi
fi
@@ -321,19 +397,45 @@ paginated_multi_select() {
fi
}
draw_header() {
printf "\033[1;1H" >&2
if [[ -n "$filter_text" ]]; then
printf "\r\033[2K${PURPLE_BOLD}%s${NC} ${YELLOW}/ Filter: ${filter_text}_${NC} ${GRAY}(%d/%d)${NC}\n" "${title}" "${#view_indices[@]}" "$total_items" >&2
elif [[ -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then
printf "\r\033[2K${PURPLE_BOLD}%s${NC} ${YELLOW}/ Filter: _ ${NC}${GRAY}(type to search)${NC}\n" "${title}" >&2
else
printf "\r\033[2K${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2
fi
}
# Handle filter character input (reduces code duplication)
# Returns 0 if character was handled, 1 if not in filter mode
handle_filter_char() {
local char="$1"
if [[ -z "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then
return 1
fi
if [[ "$char" =~ ^[[:print:]]$ ]]; then
local char_lower
char_lower=$(printf "%s" "$char" | LC_ALL=C tr '[:upper:]' '[:lower:]')
filter_text+="$char"
filter_text_lower+="$char_lower"
rebuild_view
cursor_pos=0
top_index=0
need_full_redraw=true
fi
return 0
}
# Draw the complete menu
draw_menu() {
# Recalculate items_per_page dynamically to handle window resize
items_per_page=$(_pm_calculate_items_per_page)
local clear_line=$'\r\033[2K'
printf "\033[H" >&2
local clear_line="\r\033[2K"
# Use cached selection count (maintained incrementally on toggle)
# No need to loop through all items anymore!
# Header only
printf "${clear_line}${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2
draw_header
# Visible slice
local visible_total=${#view_indices[@]}
@@ -407,18 +509,21 @@ paginated_multi_select() {
local reverse_arrow="↑"
[[ "$sort_reverse" == "true" ]] && reverse_arrow="↓"
local refresh="${GRAY}R Refresh${NC}"
local sort_ctrl="${GRAY}S ${sort_status}${NC}"
local order_ctrl="${GRAY}O ${reverse_arrow}${NC}"
local filter_ctrl="${GRAY}/ Filter${NC}"
if [[ "$has_metadata" == "true" ]]; then
if [[ -n "$filter_text" ]]; then
local -a _segs_filter=("${GRAY}Backspace${NC}" "${GRAY}Ctrl+U Clear${NC}" "${GRAY}ESC Clear${NC}")
_print_wrapped_controls "$sep" "${_segs_filter[@]}"
elif [[ "$has_metadata" == "true" ]]; then
# With metadata: show sort controls
local term_width="${COLUMNS:-}"
[[ -z "$term_width" ]] && term_width=$(tput cols 2> /dev/null || echo 80)
[[ "$term_width" =~ ^[0-9]+$ ]] || term_width=80
# Full controls
local -a _segs=("$nav" "$space_select" "$enter" "$refresh" "$sort_ctrl" "$order_ctrl" "$exit")
local -a _segs=("$nav" "$space_select" "$enter" "$sort_ctrl" "$order_ctrl" "$filter_ctrl" "$exit")
# Calculate width
local total_len=0 seg_count=${#_segs[@]}
@@ -429,7 +534,7 @@ paginated_multi_select() {
# Level 1: Remove "Space Select" if too wide
if [[ $total_len -gt $term_width ]]; then
_segs=("$nav" "$enter" "$refresh" "$sort_ctrl" "$order_ctrl" "$exit")
_segs=("$nav" "$enter" "$sort_ctrl" "$order_ctrl" "$filter_ctrl" "$exit")
total_len=0
seg_count=${#_segs[@]}
@@ -440,14 +545,14 @@ paginated_multi_select() {
# Level 2: Remove sort label if still too wide
if [[ $total_len -gt $term_width ]]; then
_segs=("$nav" "$enter" "$refresh" "$order_ctrl" "$exit")
_segs=("$nav" "$enter" "$order_ctrl" "$filter_ctrl" "$exit")
fi
fi
_print_wrapped_controls "$sep" "${_segs[@]}"
else
# Without metadata: basic controls
local -a _segs_simple=("$nav" "$space_select" "$enter" "$refresh" "$exit")
local -a _segs_simple=("$nav" "$space_select" "$enter" "$filter_ctrl" "$exit")
_print_wrapped_controls "$sep" "${_segs_simple[@]}"
fi
printf "${clear_line}" >&2
@@ -473,52 +578,63 @@ paginated_multi_select() {
case "$key" in
"QUIT")
cleanup
return 1
if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then
filter_text=""
filter_text_lower=""
unset MOLE_READ_KEY_FORCE_CHAR
rebuild_view
cursor_pos=0
top_index=0
need_full_redraw=true
else
cleanup
return 1
fi
;;
"UP")
if [[ ${#view_indices[@]} -eq 0 ]]; then
:
elif [[ $cursor_pos -gt 0 ]]; then
# Simple cursor move - only redraw affected rows
local old_cursor=$cursor_pos
((cursor_pos--))
local new_cursor=$cursor_pos
# Calculate terminal row positions (+3: row 1=header, row 2=blank, row 3=first item)
if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then
draw_header
fi
local old_row=$((old_cursor + 3))
local new_row=$((new_cursor + 3))
# Quick redraw: update only the two affected rows
printf "\033[%d;1H" "$old_row" >&2
render_item "$old_cursor" false
printf "\033[%d;1H" "$new_row" >&2
render_item "$new_cursor" true
# CRITICAL: Move cursor to footer to avoid visual artifacts
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
prev_cursor_pos=$cursor_pos
continue # Skip full redraw
continue
elif [[ $top_index -gt 0 ]]; then
# Scroll up - redraw visible items only
((top_index--))
# Redraw all visible items (faster than full screen redraw)
if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then
draw_header
fi
local start_idx=$top_index
local end_idx=$((top_index + items_per_page - 1))
local visible_total=${#view_indices[@]}
[[ $end_idx -ge $visible_total ]] && end_idx=$((visible_total - 1))
for ((i = start_idx; i <= end_idx; i++)); do
local row=$((i - start_idx + 3)) # +3 for header
local row=$((i - start_idx + 3))
printf "\033[%d;1H" "$row" >&2
local is_current=false
[[ $((i - start_idx)) -eq $cursor_pos ]] && is_current=true
render_item $((i - start_idx)) $is_current
done
# Move cursor to footer
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
prev_cursor_pos=$cursor_pos
@@ -537,28 +653,27 @@ paginated_multi_select() {
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
# Simple cursor move - only redraw affected rows
local old_cursor=$cursor_pos
((cursor_pos++))
local new_cursor=$cursor_pos
# Calculate terminal row positions (+3: row 1=header, row 2=blank, row 3=first item)
if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then
draw_header
fi
local old_row=$((old_cursor + 3))
local new_row=$((new_cursor + 3))
# Quick redraw: update only the two affected rows
printf "\033[%d;1H" "$old_row" >&2
render_item "$old_cursor" false
printf "\033[%d;1H" "$new_row" >&2
render_item "$new_cursor" true
# CRITICAL: Move cursor to footer to avoid visual artifacts
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
prev_cursor_pos=$cursor_pos
continue # Skip full redraw
continue
elif [[ $((top_index + visible_count)) -lt ${#view_indices[@]} ]]; then
# Scroll down - redraw visible items only
((top_index++))
visible_count=$((${#view_indices[@]} - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
@@ -566,21 +681,23 @@ paginated_multi_select() {
cursor_pos=$((visible_count - 1))
fi
# Redraw all visible items (faster than full screen redraw)
if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then
draw_header
fi
local start_idx=$top_index
local end_idx=$((top_index + items_per_page - 1))
local visible_total=${#view_indices[@]}
[[ $end_idx -ge $visible_total ]] && end_idx=$((visible_total - 1))
for ((i = start_idx; i <= end_idx; i++)); do
local row=$((i - start_idx + 3)) # +3 for header
local row=$((i - start_idx + 3))
printf "\033[%d;1H" "$row" >&2
local is_current=false
[[ $((i - start_idx)) -eq $cursor_pos ]] && is_current=true
render_item $((i - start_idx)) $is_current
done
# Move cursor to footer
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
prev_cursor_pos=$cursor_pos
@@ -617,21 +734,10 @@ paginated_multi_select() {
continue # Skip full redraw
fi
;;
"RETRY")
# 'R' toggles reverse order (only if metadata available)
if [[ "$has_metadata" == "true" ]]; then
if [[ "$sort_reverse" == "true" ]]; then
sort_reverse="false"
else
sort_reverse="true"
fi
rebuild_view
need_full_redraw=true
fi
;;
"CHAR:s" | "CHAR:S")
if [[ "$has_metadata" == "true" ]]; then
# Cycle sort mode (only if metadata available)
if handle_filter_char "${key#CHAR:}"; then
: # Handled as filter input
elif [[ "$has_metadata" == "true" ]]; then
case "$sort_mode" in
date) sort_mode="name" ;;
name) sort_mode="size" ;;
@@ -642,8 +748,9 @@ paginated_multi_select() {
fi
;;
"CHAR:j")
# Down navigation (vim style)
if [[ ${#view_indices[@]} -gt 0 ]]; then
if handle_filter_char "${key#CHAR:}"; then
: # Handled as filter input
elif [[ ${#view_indices[@]} -gt 0 ]]; then
local absolute_index=$((top_index + cursor_pos))
local last_index=$((${#view_indices[@]} - 1))
if [[ $absolute_index -lt $last_index ]]; then
@@ -659,8 +766,9 @@ paginated_multi_select() {
fi
;;
"CHAR:k")
# Up navigation (vim style)
if [[ ${#view_indices[@]} -gt 0 ]]; then
if handle_filter_char "${key#CHAR:}"; then
: # Handled as filter input
elif [[ ${#view_indices[@]} -gt 0 ]]; then
if [[ $cursor_pos -gt 0 ]]; then
((cursor_pos--))
need_full_redraw=true
@@ -670,14 +778,10 @@ paginated_multi_select() {
fi
fi
;;
"CHAR:r" | "CHAR:R")
# Trigger Refresh signal
cleanup
return 10
;;
"CHAR:o" | "CHAR:O")
if [[ "$has_metadata" == "true" ]]; then
# O toggles reverse order
if handle_filter_char "${key#CHAR:}"; then
: # Handled as filter input
elif [[ "$has_metadata" == "true" ]]; then
if [[ "$sort_reverse" == "true" ]]; then
sort_reverse="false"
else
@@ -687,6 +791,41 @@ paginated_multi_select() {
need_full_redraw=true
fi
;;
"CHAR:/" | "CHAR:?")
if [[ -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then
unset MOLE_READ_KEY_FORCE_CHAR
else
export MOLE_READ_KEY_FORCE_CHAR=1
fi
need_full_redraw=true
;;
"DELETE")
if [[ -n "$filter_text" ]]; then
filter_text="${filter_text%?}"
filter_text_lower="${filter_text_lower%?}"
if [[ -z "$filter_text" ]]; then
filter_text_lower=""
unset MOLE_READ_KEY_FORCE_CHAR
fi
rebuild_view
cursor_pos=0
top_index=0
need_full_redraw=true
fi
;;
"CLEAR_LINE")
if [[ -n "$filter_text" ]]; then
filter_text=""
filter_text_lower=""
rebuild_view
cursor_pos=0
top_index=0
need_full_redraw=true
fi
;;
"CHAR:"*)
handle_filter_char "${key#CHAR:}" || true
;;
"ENTER")
# Smart Enter behavior
# 1. Check if any items are already selected
@@ -724,8 +863,9 @@ paginated_multi_select() {
trap - EXIT INT TERM
MOLE_SELECTION_RESULT="$final_result"
export MOLE_MENU_SORT_MODE="$sort_mode"
export MOLE_MENU_SORT_REVERSE="$sort_reverse"
unset MOLE_READ_KEY_FORCE_CHAR
export MOLE_MENU_SORT_MODE="${sort_mode:-name}"
export MOLE_MENU_SORT_REVERSE="${sort_reverse:-false}"
restore_terminal
return 0
;;

View File

@@ -156,18 +156,8 @@ remove_file_list() {
continue
fi
# Symlinks are handled separately using rm (not safe_remove/safe_sudo_remove)
# because safe_sudo_remove() refuses symlinks entirely as a TOCTOU protection.
# This is safe because:
# 1. The path has already passed validate_path_for_deletion() above
# 2. rm on a symlink only removes the link itself, NOT the target
# 3. The symlink deletion is logged via operations.log
if [[ -L "$file" ]]; then
if [[ "$use_sudo" == "true" ]]; then
sudo rm "$file" 2> /dev/null && ((++count)) || true
else
rm "$file" 2> /dev/null && ((++count)) || true
fi
safe_remove_symlink "$file" "$use_sudo" && ((++count)) || true
else
if [[ "$use_sudo" == "true" ]]; then
safe_sudo_remove "$file" && ((++count)) || true
@@ -194,7 +184,16 @@ batch_uninstall_applications() {
old_trap_int=$(trap -p INT)
old_trap_term=$(trap -p TERM)
_cleanup_sudo_keepalive() {
if [[ -n "${sudo_keepalive_pid:-}" ]]; then
kill "$sudo_keepalive_pid" 2> /dev/null || true
wait "$sudo_keepalive_pid" 2> /dev/null || true
sudo_keepalive_pid=""
fi
}
_restore_uninstall_traps() {
_cleanup_sudo_keepalive
if [[ -n "$old_trap_int" ]]; then
eval "$old_trap_int"
else
@@ -207,8 +206,8 @@ batch_uninstall_applications() {
fi
}
# Trap to clean up spinner and uninstall mode on interrupt
trap 'stop_inline_spinner 2>/dev/null; unset MOLE_UNINSTALL_MODE; echo ""; _restore_uninstall_traps; return 130' INT TERM
# Trap to clean up spinner, sudo keepalive, and uninstall mode on interrupt
trap 'stop_inline_spinner 2>/dev/null; _cleanup_sudo_keepalive; unset MOLE_UNINSTALL_MODE; echo ""; _restore_uninstall_traps; return 130' INT TERM
# Pre-scan: running apps, sudo needs, size.
local -a running_apps=()
@@ -260,16 +259,16 @@ batch_uninstall_applications() {
fi
# Size estimate includes related and system files.
local app_size_kb=$(get_path_size_kb "$app_path")
local related_files=$(find_app_files "$bundle_id" "$app_name")
local related_size_kb=$(calculate_total_size "$related_files")
local app_size_kb=$(get_path_size_kb "$app_path" || echo "0")
local related_files=$(find_app_files "$bundle_id" "$app_name" || true)
local related_size_kb=$(calculate_total_size "$related_files" || echo "0")
# system_files is a newline-separated string, not an array.
# shellcheck disable=SC2178,SC2128
local system_files=$(find_app_system_files "$bundle_id" "$app_name")
local system_files=$(find_app_system_files "$bundle_id" "$app_name" || true)
# shellcheck disable=SC2128
local system_size_kb=$(calculate_total_size "$system_files")
local system_size_kb=$(calculate_total_size "$system_files" || echo "0")
local total_kb=$((app_size_kb + related_size_kb + system_size_kb))
((total_estimated_size += total_kb))
((total_estimated_size += total_kb)) || true
# shellcheck disable=SC2128
if [[ -n "$system_files" ]]; then
@@ -282,15 +281,15 @@ batch_uninstall_applications() {
# Check for sensitive user data once.
local has_sensitive_data="false"
if has_sensitive_data "$related_files"; then
if has_sensitive_data "$related_files" 2> /dev/null; then
has_sensitive_data="true"
fi
# Store details for later use (base64 keeps lists on one line).
local encoded_files
encoded_files=$(printf '%s' "$related_files" | base64 | tr -d '\n')
encoded_files=$(printf '%s' "$related_files" | base64 | tr -d '\n' || echo "")
local encoded_system_files
encoded_system_files=$(printf '%s' "$system_files" | base64 | tr -d '\n')
encoded_system_files=$(printf '%s' "$system_files" | base64 | tr -d '\n' || echo "")
app_details+=("$app_name|$app_path|$bundle_id|$total_kb|$encoded_files|$encoded_system_files|$has_sensitive_data|$needs_sudo|$is_brew_cask|$cask_name")
done
if [[ -t 1 ]]; then stop_inline_spinner; fi
@@ -301,18 +300,15 @@ batch_uninstall_applications() {
echo -e "${PURPLE_BOLD}Files to be removed:${NC}"
echo ""
# Warn if user data is detected.
local has_user_data=false
# Warn if brew cask apps are present.
local has_brew_cask=false
for detail in "${app_details[@]}"; do
IFS='|' read -r _ _ _ _ _ _ has_sensitive_data <<< "$detail"
if [[ "$has_sensitive_data" == "true" ]]; then
has_user_data=true
break
fi
IFS='|' read -r _ _ _ _ _ _ _ _ is_brew_cask_flag _ <<< "$detail"
[[ "$is_brew_cask_flag" == "true" ]] && has_brew_cask=true
done
if [[ "$has_user_data" == "true" ]]; then
echo -e "${GRAY}${ICON_WARNING}${NC} ${YELLOW}Note: Some apps contain user configurations/themes${NC}"
if [[ "$has_brew_cask" == "true" ]]; then
echo -e "${GRAY}${ICON_WARNING}${NC} ${YELLOW}Homebrew apps will be fully cleaned (--zap: removes configs & data)${NC}"
echo ""
fi
@@ -432,6 +428,7 @@ batch_uninstall_applications() {
local related_files=$(decode_file_list "$encoded_files" "$app_name")
local system_files=$(decode_file_list "$encoded_system_files" "$app_name")
local reason=""
local suggestion=""
# Show progress for current app
local brew_tag=""
@@ -480,12 +477,38 @@ batch_uninstall_applications() {
fi
fi
elif [[ "$needs_sudo" == true ]]; then
if ! safe_sudo_remove "$app_path"; then
local app_owner=$(get_file_owner "$app_path")
if [[ -n "$app_owner" && "$app_owner" != "$current_user" && "$app_owner" != "root" ]]; then
reason="owned by $app_owner, try 'sudo chown $(whoami) \"$app_path\"'"
if [[ -L "$app_path" ]]; then
local link_target
link_target=$(readlink "$app_path" 2> /dev/null)
if [[ -n "$link_target" ]]; then
local resolved_target="$link_target"
if [[ "$link_target" != /* ]]; then
local link_dir
link_dir=$(dirname "$app_path")
resolved_target=$(cd "$link_dir" 2> /dev/null && cd "$(dirname "$link_target")" 2> /dev/null && pwd)/$(basename "$link_target") 2> /dev/null || echo ""
fi
case "$resolved_target" in
/System/* | /usr/bin/* | /usr/lib/* | /bin/* | /sbin/* | /private/etc/*)
reason="protected system symlink, cannot remove"
;;
*)
if ! safe_remove_symlink "$app_path" "true"; then
reason="failed to remove symlink"
fi
;;
esac
else
reason="permission denied, try 'mole touchid' for passwordless sudo"
if ! safe_remove_symlink "$app_path" "true"; then
reason="failed to remove symlink"
fi
fi
else
local ret=0
safe_sudo_remove "$app_path" || ret=$?
if [[ $ret -ne 0 ]]; then
local diagnosis
diagnosis=$(diagnose_removal_failure "$ret" "$app_name")
IFS='|' read -r reason suggestion <<< "$diagnosis"
fi
fi
else
@@ -542,19 +565,21 @@ batch_uninstall_applications() {
[[ "$used_brew_successfully" == "true" ]] && ((brew_apps_removed++))
((files_cleaned++))
((total_items++))
success_items+=("$app_name")
success_items+=("$app_path")
else
# Show failure
if [[ -t 1 ]]; then
if [[ ${#app_details[@]} -gt 1 ]]; then
echo -e "${ICON_ERROR} [$current_index/${#app_details[@]}] ${app_name} ${GRAY}, $reason${NC}"
else
echo -e "${ICON_ERROR} ${app_name} failed: $reason"
fi
if [[ -n "${suggestion:-}" ]]; then
echo -e "${GRAY}${suggestion}${NC}"
fi
fi
((failed_count++))
failed_items+=("$app_name:$reason")
failed_items+=("$app_name:$reason:${suggestion:-}")
fi
done
@@ -566,7 +591,6 @@ batch_uninstall_applications() {
local -a summary_details=()
if [[ $success_count -gt 0 ]]; then
local success_list="${success_items[*]}"
local success_text="app"
[[ $success_count -gt 1 ]] && success_text="apps"
local success_line="Removed ${success_count} ${success_text}"
@@ -575,13 +599,15 @@ batch_uninstall_applications() {
fi
# Format app list with max 3 per line.
if [[ -n "$success_list" ]]; then
if [[ ${#success_items[@]} -gt 0 ]]; then
local idx=0
local is_first_line=true
local current_line=""
for app_name in "${success_items[@]}"; do
local display_item="${GREEN}${app_name}${NC}"
for success_path in "${success_items[@]}"; do
local display_name
display_name=$(basename "$success_path" .app)
local display_item="${GREEN}${display_name}${NC}"
if ((idx % 3 == 0)); then
if [[ -n "$current_line" ]]; then
@@ -617,8 +643,20 @@ batch_uninstall_applications() {
local failed_list="${failed_names[*]}"
local reason_summary="could not be removed"
local suggestion_text=""
if [[ $failed_count -eq 1 ]]; then
local first_reason=${failed_items[0]#*:}
# Extract reason and suggestion from format: app:reason:suggestion
local item="${failed_items[0]}"
local without_app="${item#*:}"
local first_reason="${without_app%%:*}"
local first_suggestion="${without_app#*:}"
# If suggestion is same as reason, there was no suggestion part
# Also check if suggestion is empty
if [[ "$first_suggestion" != "$first_reason" && -n "$first_suggestion" ]]; then
suggestion_text="${GRAY}${first_suggestion}${NC}"
fi
case "$first_reason" in
still*running*) reason_summary="is still running" ;;
remove*failed*) reason_summary="could not be removed" ;;
@@ -628,6 +666,9 @@ batch_uninstall_applications() {
esac
fi
summary_details+=("Failed: ${RED}${failed_list}${NC} ${reason_summary}")
if [[ -n "$suggestion_text" ]]; then
summary_details+=("$suggestion_text")
fi
fi
if [[ $success_count -eq 0 && $failed_count -eq 0 ]]; then
@@ -667,38 +708,15 @@ batch_uninstall_applications() {
fi
# Clean up Dock entries for uninstalled apps.
if [[ $success_count -gt 0 ]]; then
local -a removed_paths=()
for detail in "${app_details[@]}"; do
IFS='|' read -r app_name app_path _ _ _ _ <<< "$detail"
for success_name in "${success_items[@]}"; do
if [[ "$success_name" == "$app_name" ]]; then
removed_paths+=("$app_path")
break
fi
done
done
if [[ ${#removed_paths[@]} -gt 0 ]]; then
remove_apps_from_dock "${removed_paths[@]}" 2> /dev/null || true
fi
if [[ $success_count -gt 0 && ${#success_items[@]} -gt 0 ]]; then
remove_apps_from_dock "${success_items[@]}" 2> /dev/null || true
fi
# Clean up sudo keepalive if it was started.
if [[ -n "${sudo_keepalive_pid:-}" ]]; then
kill "$sudo_keepalive_pid" 2> /dev/null || true
wait "$sudo_keepalive_pid" 2> /dev/null || true
sudo_keepalive_pid=""
fi
_cleanup_sudo_keepalive
# Disable uninstall mode
unset MOLE_UNINSTALL_MODE
# Invalidate cache if any apps were successfully uninstalled.
if [[ $success_count -gt 0 ]]; then
local cache_file="$HOME/.cache/mole/app_scan_cache"
rm -f "$cache_file" 2> /dev/null || true
fi
_restore_uninstall_traps
unset -f _restore_uninstall_traps

View File

@@ -171,7 +171,7 @@ brew_uninstall_cask() {
is_homebrew_available || return 1
[[ -z "$cask_name" ]] && return 1
debug_log "Attempting brew uninstall --cask $cask_name"
debug_log "Attempting brew uninstall --cask --zap $cask_name"
# Ensure we have sudo access if needed, to prevent brew from hanging on password prompt
if [[ "${NONINTERACTIVE:-}" != "1" && -t 0 && -t 1 ]]; then
@@ -198,7 +198,7 @@ brew_uninstall_cask() {
# Run with timeout to prevent hangs from problematic cask scripts
local brew_exit=0
if HOMEBREW_NO_ENV_HINTS=1 HOMEBREW_NO_AUTO_UPDATE=1 NONINTERACTIVE=1 \
run_with_timeout "$timeout" brew uninstall --cask "$cask_name" 2>&1; then
run_with_timeout "$timeout" brew uninstall --cask --zap "$cask_name" 2>&1; then
uninstall_ok=true
else
brew_exit=$?

2
mole
View File

@@ -13,7 +13,7 @@ source "$SCRIPT_DIR/lib/core/commands.sh"
trap cleanup_temp_files EXIT INT TERM
# Version and update helpers
VERSION="1.23.2"
VERSION="1.25.0"
MOLE_TAGLINE="Deep clean and optimize your Mac."
is_touchid_configured() {

View File

@@ -44,9 +44,10 @@ write_raycast_script() {
local title="$2"
local mo_bin="$3"
local subcommand="$4"
local raw_cmd="\"${mo_bin}\" ${subcommand}"
local cmd_escaped="${raw_cmd//\\/\\\\}"
cmd_escaped="${cmd_escaped//\"/\\\"}"
local cmd_for_applescript="${mo_bin//\\/\\\\}"
cmd_for_applescript="${cmd_for_applescript//\"/\\\"}"
cat > "$target" << EOF
#!/bin/bash
@@ -59,12 +60,18 @@ write_raycast_script() {
# Optional parameters:
# @raycast.icon 🐹
# ──────────────────────────────────────────────────────────
# Script execution begins below
# ──────────────────────────────────────────────────────────
set -euo pipefail
echo "🐹 Running ${title}..."
echo ""
CMD="${raw_cmd}"
CMD_ESCAPED="${cmd_escaped}"
MO_BIN="${mo_bin}"
MO_SUBCOMMAND="${subcommand}"
MO_BIN_ESCAPED="${cmd_for_applescript}"
has_app() {
local name="\$1"
@@ -113,8 +120,8 @@ launch_with_app() {
case "\$app" in
Terminal)
if command -v osascript >/dev/null 2>&1; then
osascript <<'APPLESCRIPT'
set targetCommand to "${cmd_escaped}"
osascript <<APPLESCRIPT
set targetCommand to "\${MO_BIN_ESCAPED} \${MO_SUBCOMMAND}"
tell application "Terminal"
activate
do script targetCommand
@@ -125,8 +132,8 @@ APPLESCRIPT
;;
iTerm|iTerm2)
if command -v osascript >/dev/null 2>&1; then
osascript <<'APPLESCRIPT'
set targetCommand to "${cmd_escaped}"
osascript <<APPLESCRIPT
set targetCommand to "\${MO_BIN_ESCAPED} \${MO_SUBCOMMAND}"
tell application "iTerm2"
activate
try
@@ -150,52 +157,49 @@ APPLESCRIPT
;;
Alacritty)
if launcher_available "Alacritty" && command -v open >/dev/null 2>&1; then
open -na "Alacritty" --args -e /bin/zsh -lc "${raw_cmd}"
open -na "Alacritty" --args -e /bin/zsh -lc "\"\${MO_BIN}\" \${MO_SUBCOMMAND}"
return \$?
fi
;;
Kitty)
if has_bin "kitty"; then
kitty --hold /bin/zsh -lc "${raw_cmd}"
kitty --hold /bin/zsh -lc "\"\${MO_BIN}\" \${MO_SUBCOMMAND}"
return \$?
elif [[ -x "/Applications/kitty.app/Contents/MacOS/kitty" ]]; then
"/Applications/kitty.app/Contents/MacOS/kitty" --hold /bin/zsh -lc "${raw_cmd}"
"/Applications/kitty.app/Contents/MacOS/kitty" --hold /bin/zsh -lc "\"\${MO_BIN}\" \${MO_SUBCOMMAND}"
return \$?
fi
;;
WezTerm)
if has_bin "wezterm"; then
wezterm start -- /bin/zsh -lc "${raw_cmd}"
wezterm start -- /bin/zsh -lc "\"\${MO_BIN}\" \${MO_SUBCOMMAND}"
return \$?
elif [[ -x "/Applications/WezTerm.app/Contents/MacOS/wezterm" ]]; then
"/Applications/WezTerm.app/Contents/MacOS/wezterm" start -- /bin/zsh -lc "${raw_cmd}"
"/Applications/WezTerm.app/Contents/MacOS/wezterm" start -- /bin/zsh -lc "\"\${MO_BIN}\" \${MO_SUBCOMMAND}"
return \$?
fi
;;
Ghostty)
if has_bin "ghostty"; then
ghostty --command "/bin/zsh" -- -lc "${raw_cmd}"
return \$?
elif [[ -x "/Applications/Ghostty.app/Contents/MacOS/ghostty" ]]; then
"/Applications/Ghostty.app/Contents/MacOS/ghostty" --command "/bin/zsh" -- -lc "${raw_cmd}"
if launcher_available "Ghostty" && command -v open >/dev/null 2>&1; then
open -na "Ghostty" --args -e /bin/zsh -lc "\${MO_BIN} \${MO_SUBCOMMAND}"
return \$?
fi
;;
Hyper)
if launcher_available "Hyper" && command -v open >/dev/null 2>&1; then
open -na "Hyper" --args /bin/zsh -lc "${raw_cmd}"
open -na "Hyper" --args /bin/zsh -lc "\"\${MO_BIN}\" \${MO_SUBCOMMAND}"
return \$?
fi
;;
WindTerm)
if launcher_available "WindTerm" && command -v open >/dev/null 2>&1; then
open -na "WindTerm" --args /bin/zsh -lc "${raw_cmd}"
open -na "WindTerm" --args /bin/zsh -lc "\"\${MO_BIN}\" \${MO_SUBCOMMAND}"
return \$?
fi
;;
Warp)
if launcher_available "Warp" && command -v open >/dev/null 2>&1; then
open -na "Warp" --args /bin/zsh -lc "${raw_cmd}"
open -na "Warp" --args /bin/zsh -lc "\"\${MO_BIN}\" \${MO_SUBCOMMAND}"
return \$?
fi
;;
@@ -204,7 +208,7 @@ APPLESCRIPT
}
if [[ -n "\${TERM:-}" && "\${TERM}" != "dumb" ]]; then
"${mo_bin}" ${subcommand}
"\${MO_BIN}" \${MO_SUBCOMMAND}
exit \$?
fi
@@ -223,7 +227,7 @@ fi
echo "TERM environment variable not set and no launcher succeeded."
echo "Run this manually:"
echo " ${raw_cmd}"
echo " \"\${MO_BIN}\" \${MO_SUBCOMMAND}"
exit 1
EOF
chmod +x "$target"
@@ -244,28 +248,15 @@ create_raycast_commands() {
log_success "Scripts ready in: $dir"
log_header "Raycast Configuration"
if command -v open > /dev/null 2>&1; then
if open "raycast://extensions/raycast/raycast-settings/extensions" > /dev/null 2>&1; then
log_step "Raycast settings opened."
else
log_warn "Could not auto-open Raycast."
fi
else
log_warn "open command not available; please open Raycast manually."
fi
echo "If Raycast asks to add a Script Directory, use:"
echo " $dir"
log_step "Open Raycast → Settings → Extensions → Script Commands."
echo "1. Click \"+\" → Add Script Directory."
echo "2. Choose: $dir"
echo "3. Click \"Reload Script Directories\"."
if is_interactive; then
log_header "Finalizing Setup"
prompt_enter "Press [Enter] to reload script directories in Raycast..."
if command -v open > /dev/null 2>&1 && open "raycast://extensions/raycast/raycast/reload-script-directories" > /dev/null 2>&1; then
log_step "Raycast script directories reloaded."
else
log_warn "Could not auto-reload Raycast script directories."
fi
log_warn "Please complete the Raycast steps above before continuing."
prompt_enter "Press [Enter] to continue..."
log_success "Raycast setup complete!"
else
log_warn "Non-interactive mode; skip Raycast reload. Please run 'Reload Script Directories' in Raycast."

View File

@@ -117,7 +117,7 @@ total_size_cleaned=0
# Simulate 'Enter' for confirmation
printf '\n' | batch_uninstall_applications > /dev/null 2>&1
grep -q "uninstall --cask brew-app-cask" "$HOME/brew_calls.log"
grep -q "uninstall --cask --zap brew-app-cask" "$HOME/brew_calls.log"
EOF
[ "$status" -eq 0 ]

View File

@@ -80,15 +80,137 @@ EOF
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
ls() { return 1; }
stop_section_spinner() { :; }
rm -rf "$HOME/Library/Caches"
clean_orphaned_app_data
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Skipped: No permission"* ]]
[[ "$output" == *"No permission"* ]]
}
@test "clean_orphaned_app_data handles paths with spaces correctly" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
# Mock scan_installed_apps - return empty (no installed apps)
scan_installed_apps() {
: > "$1"
}
# Mock mdfind to return empty (no app found)
mdfind() {
return 0
}
# Ensure local function mock works even if timeout/gtimeout is installed
run_with_timeout() { shift; "$@"; }
# Mock safe_clean (normally from bin/clean.sh)
safe_clean() {
rm -rf "$1"
return 0
}
# Create required Library structure for permission check
mkdir -p "$HOME/Library/Caches"
# Create test structure with spaces in path (old modification time: 61 days ago)
mkdir -p "$HOME/Library/Saved Application State/com.test.orphan.savedState"
# Create a file with some content so directory size > 0
echo "test data" > "$HOME/Library/Saved Application State/com.test.orphan.savedState/data.plist"
# Set modification time to 61 days ago (older than 60-day threshold)
touch -t "$(date -v-61d +%Y%m%d%H%M.%S 2>/dev/null || date -d '61 days ago' +%Y%m%d%H%M.%S)" "$HOME/Library/Saved Application State/com.test.orphan.savedState" 2>/dev/null || true
# Disable spinner for test
start_section_spinner() { :; }
stop_section_spinner() { :; }
# Run cleanup
clean_orphaned_app_data
# Verify path with spaces was handled correctly (not split into multiple paths)
if [[ -d "$HOME/Library/Saved Application State/com.test.orphan.savedState" ]]; then
echo "ERROR: Orphaned savedState not deleted"
exit 1
else
echo "SUCCESS: Orphaned savedState deleted correctly"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"SUCCESS"* ]]
}
@test "clean_orphaned_app_data only counts successful deletions" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
# Mock scan_installed_apps - return empty
scan_installed_apps() {
: > "$1"
}
# Mock mdfind to return empty (no app found)
mdfind() {
return 0
}
# Ensure local function mock works even if timeout/gtimeout is installed
run_with_timeout() { shift; "$@"; }
# Create required Library structure for permission check
mkdir -p "$HOME/Library/Caches"
# Create test files (old modification time: 61 days ago)
mkdir -p "$HOME/Library/Caches/com.test.orphan1"
mkdir -p "$HOME/Library/Caches/com.test.orphan2"
# Create files with content so size > 0
echo "data1" > "$HOME/Library/Caches/com.test.orphan1/data"
echo "data2" > "$HOME/Library/Caches/com.test.orphan2/data"
# Set modification time to 61 days ago
touch -t "$(date -v-61d +%Y%m%d%H%M.%S 2>/dev/null || date -d '61 days ago' +%Y%m%d%H%M.%S)" "$HOME/Library/Caches/com.test.orphan1" 2>/dev/null || true
touch -t "$(date -v-61d +%Y%m%d%H%M.%S 2>/dev/null || date -d '61 days ago' +%Y%m%d%H%M.%S)" "$HOME/Library/Caches/com.test.orphan2" 2>/dev/null || true
# Mock safe_clean to fail on first item, succeed on second
safe_clean() {
if [[ "$1" == *"orphan1"* ]]; then
return 1 # Fail
else
rm -rf "$1"
return 0 # Succeed
fi
}
# Disable spinner
start_section_spinner() { :; }
stop_section_spinner() { :; }
# Run cleanup
clean_orphaned_app_data
# Verify first item still exists (safe_clean failed)
if [[ -d "$HOME/Library/Caches/com.test.orphan1" ]]; then
echo "PASS: Failed deletion preserved"
fi
# Verify second item deleted
if [[ ! -d "$HOME/Library/Caches/com.test.orphan2" ]]; then
echo "PASS: Successful deletion removed"
fi
# Check that output shows correct count (only 1, not 2)
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"PASS: Failed deletion preserved"* ]]
[[ "$output" == *"PASS: Successful deletion removed"* ]]
}
@test "is_critical_system_component matches known system services" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
@@ -160,3 +282,144 @@ EOF
[[ "$output" != *"rm-called"* ]]
[[ "$output" != *"launchctl-called"* ]]
}
@test "is_launch_item_orphaned detects orphan when program missing" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
tmp_dir="$(mktemp -d)"
tmp_plist="$tmp_dir/com.test.orphan.plist"
cat > "$tmp_plist" << 'PLIST'
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.test.orphan</string>
<key>ProgramArguments</key>
<array>
<string>/nonexistent/app/program</string>
</array>
</dict>
</plist>
PLIST
run_with_timeout() { shift; "$@"; }
if is_launch_item_orphaned "$tmp_plist"; then
echo "orphan"
fi
rm -rf "$tmp_dir"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"orphan"* ]]
}
@test "is_launch_item_orphaned protects when program exists" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
tmp_dir="$(mktemp -d)"
tmp_plist="$tmp_dir/com.test.active.plist"
tmp_program="$tmp_dir/program"
touch "$tmp_program"
cat > "$tmp_plist" << PLIST
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.test.active</string>
<key>ProgramArguments</key>
<array>
<string>$tmp_program</string>
</array>
</dict>
</plist>
PLIST
run_with_timeout() { shift; "$@"; }
if is_launch_item_orphaned "$tmp_plist"; then
echo "orphan"
else
echo "not-orphan"
fi
rm -rf "$tmp_dir"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"not-orphan"* ]]
}
@test "is_launch_item_orphaned protects when app support active" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
tmp_dir="$(mktemp -d)"
tmp_plist="$tmp_dir/com.test.appsupport.plist"
mkdir -p "$HOME/Library/Application Support/TestApp"
touch "$HOME/Library/Application Support/TestApp/recent.txt"
cat > "$tmp_plist" << 'PLIST'
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>com.test.appsupport</string>
<key>ProgramArguments</key>
<array>
<string>$HOME/Library/Application Support/TestApp/Current/app</string>
</array>
</dict>
</plist>
PLIST
run_with_timeout() { shift; "$@"; }
if is_launch_item_orphaned "$tmp_plist"; then
echo "orphan"
else
echo "not-orphan"
fi
rm -rf "$tmp_dir"
rm -rf "$HOME/Library/Application Support/TestApp"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"not-orphan"* ]]
}
@test "clean_orphaned_launch_agents skips when no orphans" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
mkdir -p "$HOME/Library/LaunchAgents"
start_section_spinner() { :; }
stop_section_spinner() { :; }
note_activity() { :; }
get_path_size_kb() { echo "1"; }
run_with_timeout() { shift; "$@"; }
clean_orphaned_launch_agents
EOF
[ "$status" -eq 0 ]
}

View File

@@ -28,7 +28,23 @@ CALL_LOG="$HOME/system_calls.log"
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/system.sh"
sudo() { return 0; }
sudo() {
if [[ "$1" == "test" ]]; then
return 0
fi
if [[ "$1" == "find" ]]; then
case "$2" in
/Library/Caches) printf '%s\0' "/Library/Caches/test.log" ;;
/private/var/log) printf '%s\0' "/private/var/log/system.log" ;;
esac
return 0
fi
if [[ "$1" == "stat" ]]; then
echo "0"
return 0
fi
return 0
}
safe_sudo_find_delete() {
echo "safe_sudo_find_delete:$1:$2" >> "$CALL_LOG"
return 0
@@ -562,11 +578,24 @@ CALL_LOG="$HOME/memory_exception_calls.log"
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/system.sh"
sudo() { return 0; }
safe_sudo_find_delete() {
echo "safe_sudo_find_delete:$1:$2:$3:$4" >> "$CALL_LOG"
sudo() {
if [[ "$1" == "test" ]]; then
return 0
fi
if [[ "$1" == "find" ]]; then
echo "sudo_find:$*" >> "$CALL_LOG"
if [[ "$2" == "/private/var/db/reportmemoryexception/MemoryLimitViolations" && "$*" != *"-delete"* ]]; then
printf '%s\0' "/private/var/db/reportmemoryexception/MemoryLimitViolations/report.bin"
fi
return 0
fi
if [[ "$1" == "stat" ]]; then
echo "1024"
return 0
fi
return 0
}
safe_sudo_find_delete() { return 0; }
safe_sudo_remove() { return 0; }
log_success() { :; }
is_sip_enabled() { return 1; }
@@ -579,7 +608,8 @@ EOF
[ "$status" -eq 0 ]
[[ "$output" == *"reportmemoryexception/MemoryLimitViolations"* ]]
[[ "$output" == *":30:"* ]] # 30-day retention
[[ "$output" == *"-mtime +30"* ]] # 30-day retention
[[ "$output" == *"-delete"* ]]
}
@test "clean_deep_system cleans diagnostic trace logs" {
@@ -590,12 +620,29 @@ CALL_LOG="$HOME/diag_calls.log"
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/system.sh"
sudo() { return 0; }
sudo() {
if [[ "$1" == "test" ]]; then
return 0
fi
if [[ "$1" == "find" ]]; then
echo "sudo_find:$*" >> "$CALL_LOG"
if [[ "$2" == "/private/var/db/diagnostics" ]]; then
printf '%s\0' \
"/private/var/db/diagnostics/Persist/test.tracev3" \
"/private/var/db/diagnostics/Special/test.tracev3"
fi
return 0
fi
return 0
}
safe_sudo_find_delete() {
echo "safe_sudo_find_delete:$1:$2" >> "$CALL_LOG"
return 0
}
safe_sudo_remove() { return 0; }
safe_sudo_remove() {
echo "safe_sudo_remove:$1" >> "$CALL_LOG"
return 0
}
log_success() { :; }
start_section_spinner() { :; }
stop_section_spinner() { :; }

View File

@@ -102,16 +102,17 @@ setup() {
run bash --noprofile --norc -c "cd '$PROJECT_ROOT'; printf \$'\\n' | HOME='$HOME' ./mo clean --whitelist"
[ "$status" -eq 0 ]
grep -q "\\.m2/repository" "$whitelist_file"
first_pattern=$(grep -v '^[[:space:]]*#' "$whitelist_file" | grep -v '^[[:space:]]*$' | head -n 1)
[ -n "$first_pattern" ]
run bash --noprofile --norc -c "cd '$PROJECT_ROOT'; printf \$' \\n' | HOME='$HOME' ./mo clean --whitelist"
[ "$status" -eq 0 ]
run grep -q "\\.m2/repository" "$whitelist_file"
run grep -Fxq "$first_pattern" "$whitelist_file"
[ "$status" -eq 1 ]
run bash --noprofile --norc -c "cd '$PROJECT_ROOT'; printf \$'\\n' | HOME='$HOME' ./mo clean --whitelist"
[ "$status" -eq 0 ]
run grep -q "\\.m2/repository" "$whitelist_file"
run grep -Fxq "$first_pattern" "$whitelist_file"
[ "$status" -eq 1 ]
}

View File

@@ -48,6 +48,15 @@ setup() {
[[ "$result" =~ "Library/Application Support/MaestroStudio" ]]
}
@test "find_app_files detects Maestro Studio auth directory (.mobiledev)" {
mkdir -p "$HOME/.mobiledev"
echo "token" > "$HOME/.mobiledev/authtoken"
result=$(find_app_files "com.maestro.studio" "Maestro Studio")
[[ "$result" =~ .mobiledev ]]
}
@test "find_app_files extracts base name from version suffix (Zed Nightly -> zed)" {
mkdir -p "$HOME/.config/zed"
mkdir -p "$HOME/Library/Application Support/Zed"
@@ -60,6 +69,18 @@ setup() {
[[ "$result" =~ "Library/Application Support/Zed" ]]
}
@test "find_app_files detects Zed channel variants in HTTPStorages only" {
mkdir -p "$HOME/Library/HTTPStorages/dev.zed.Zed-Preview"
mkdir -p "$HOME/Library/Application Support/Firefox/Profiles/default/storage/default/https+++zed.dev"
echo "test" > "$HOME/Library/HTTPStorages/dev.zed.Zed-Preview/data"
echo "test" > "$HOME/Library/Application Support/Firefox/Profiles/default/storage/default/https+++zed.dev/data"
result=$(find_app_files "dev.zed.Zed-Nightly" "Zed Nightly")
[[ "$result" =~ Library/HTTPStorages/dev\.zed\.Zed-Preview ]]
[[ ! "$result" =~ storage/default/https\+\+\+zed\.dev ]]
}
@test "find_app_files detects multiple naming variants simultaneously" {
mkdir -p "$HOME/.config/maestro-studio"
mkdir -p "$HOME/Library/Application Support/MaestroStudio"