1
0
mirror of https://github.com/tw93/Mole.git synced 2026-03-22 18:30:08 +00:00

Fix cleanup regressions and analyze navigation

Refs #605 #607 #608 #609 #610
This commit is contained in:
Tw93
2026-03-21 13:04:48 +08:00
parent d51e1a621d
commit d6b9d9f3f3
13 changed files with 692 additions and 199 deletions

View File

@@ -24,6 +24,7 @@ source "$SCRIPT_DIR/../lib/clean/user.sh"
SYSTEM_CLEAN=false
DRY_RUN=false
PROTECT_FINDER_METADATA=false
EXTERNAL_VOLUME_TARGET=""
IS_M_SERIES=$([[ "$(uname -m)" == "arm64" ]] && echo "true" || echo "false")
EXPORT_LIST_FILE="$HOME/.config/mole/clean-list.txt"
@@ -731,6 +732,19 @@ start_cleanup() {
printf '\033[2J\033[H'
fi
printf '\n'
if [[ -n "$EXTERNAL_VOLUME_TARGET" ]]; then
echo -e "${PURPLE_BOLD}Clean External Volume${NC}"
echo -e "${GRAY}${EXTERNAL_VOLUME_TARGET}${NC}"
echo ""
if [[ "$DRY_RUN" == "true" ]]; then
echo -e "${YELLOW}Dry Run Mode${NC}, Preview only, no deletions"
echo ""
fi
SYSTEM_CLEAN=false
return 0
fi
echo -e "${PURPLE_BOLD}Clean Your Mac${NC}"
echo ""
@@ -823,9 +837,15 @@ EOF
}
perform_cleanup() {
if [[ -n "$EXTERNAL_VOLUME_TARGET" ]]; then
total_items=0
files_cleaned=0
total_size_cleaned=0
fi
# Test mode skips expensive scans and returns minimal output.
local test_mode_enabled=false
if [[ "${MOLE_TEST_MODE:-0}" == "1" ]]; then
if [[ -z "$EXTERNAL_VOLUME_TARGET" && "${MOLE_TEST_MODE:-0}" == "1" ]]; then
test_mode_enabled=true
if [[ "$DRY_RUN" == "true" ]]; then
echo -e "${YELLOW}Dry Run Mode${NC}, Preview only, no deletions"
@@ -859,7 +879,7 @@ perform_cleanup() {
total_size_cleaned=0
fi
if [[ "$test_mode_enabled" == "false" ]]; then
if [[ "$test_mode_enabled" == "false" && -z "$EXTERNAL_VOLUME_TARGET" ]]; then
echo -e "${BLUE}${ICON_ADMIN}${NC} $(detect_architecture) | Free space: $(get_free_space)"
fi
@@ -874,7 +894,9 @@ perform_cleanup() {
fi
# Pre-check TCC permissions to avoid mid-run prompts.
if [[ -z "$EXTERNAL_VOLUME_TARGET" ]]; then
check_tcc_permissions
fi
if [[ ${#WHITELIST_PATTERNS[@]} -gt 0 ]]; then
local predefined_count=0
@@ -935,6 +957,11 @@ perform_cleanup() {
# Allow per-section failures without aborting the full run.
set +e
if [[ -n "$EXTERNAL_VOLUME_TARGET" ]]; then
start_section "External volume"
clean_external_volume_target "$EXTERNAL_VOLUME_TARGET"
end_section
else
# ===== 1. System =====
if [[ "$SYSTEM_CLEAN" == "true" ]]; then
start_section "System"
@@ -954,7 +981,6 @@ perform_cleanup() {
start_section "User essentials"
clean_user_essentials
clean_finder_metadata
scan_external_volumes
end_section
# ===== 3. App caches (merged sandboxed and standard app caches) =====
@@ -1027,6 +1053,7 @@ perform_cleanup() {
start_section "Project artifacts"
show_project_artifact_hint_notice
end_section
fi
# ===== Final summary =====
echo ""
@@ -1116,8 +1143,8 @@ perform_cleanup() {
}
main() {
for arg in "$@"; do
case "$arg" in
while [[ $# -gt 0 ]]; do
case "$1" in
"--help" | "-h")
show_clean_help
exit 0
@@ -1129,12 +1156,21 @@ main() {
DRY_RUN=true
export MOLE_DRY_RUN=1
;;
"--external")
shift
if [[ $# -eq 0 ]]; then
echo "Missing path for --external" >&2
exit 1
fi
EXTERNAL_VOLUME_TARGET=$(validate_external_volume_target "$1") || exit 1
;;
"--whitelist")
source "$SCRIPT_DIR/../lib/manage/whitelist.sh"
manage_whitelist "clean"
exit 0
;;
esac
shift
done
start_cleanup

View File

@@ -11,6 +11,8 @@ import (
"sync/atomic"
"testing"
"time"
tea "github.com/charmbracelet/bubbletea"
)
func resetOverviewSnapshotForTest() {
@@ -182,6 +184,68 @@ func TestOverviewStoreAndLoad(t *testing.T) {
}
}
func TestUpdateKeyEscGoesBackFromDirectoryView(t *testing.T) {
m := model{
path: "/tmp/child",
history: []historyEntry{
{
Path: "/tmp",
Entries: []dirEntry{{Name: "child", Path: "/tmp/child", Size: 1, IsDir: true}},
TotalSize: 1,
Selected: 0,
EntryOffset: 0,
},
},
entries: []dirEntry{{Name: "file.txt", Path: "/tmp/child/file.txt", Size: 1}},
}
updated, cmd := m.updateKey(tea.KeyMsg{Type: tea.KeyEsc})
if cmd != nil {
t.Fatalf("expected no command when returning from cached history, got %v", cmd)
}
got, ok := updated.(model)
if !ok {
t.Fatalf("expected model, got %T", updated)
}
if got.path != "/tmp" {
t.Fatalf("expected path /tmp after Esc, got %s", got.path)
}
if got.status == "" {
t.Fatalf("expected status to be updated after Esc navigation")
}
}
func TestUpdateKeyCtrlCQuits(t *testing.T) {
m := model{}
_, cmd := m.updateKey(tea.KeyMsg{Type: tea.KeyCtrlC})
if cmd == nil {
t.Fatalf("expected quit command for Ctrl+C")
}
if _, ok := cmd().(tea.QuitMsg); !ok {
t.Fatalf("expected tea.QuitMsg from quit command")
}
}
func TestViewShowsEscBackAndCtrlCQuitHints(t *testing.T) {
m := model{
path: "/tmp/project",
history: []historyEntry{{Path: "/tmp"}},
entries: []dirEntry{{Name: "cache", Path: "/tmp/project/cache", Size: 1, IsDir: true}},
largeFiles: []fileEntry{{Name: "large.bin", Path: "/tmp/project/large.bin", Size: 1024}},
totalSize: 1024,
}
view := m.View()
if !strings.Contains(view, "Esc Back") {
t.Fatalf("expected Esc Back hint in view, got:\n%s", view)
}
if !strings.Contains(view, "Ctrl+C Quit") {
t.Fatalf("expected Ctrl+C Quit hint in view, got:\n%s", view)
}
}
func TestCacheSaveLoadRoundTrip(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)

View File

@@ -612,20 +612,22 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
m.deleteConfirm = false
m.deleteTarget = nil
return m, nil
case "ctrl+c":
return m, tea.Quit
default:
return m, nil
}
}
switch msg.String() {
case "q", "ctrl+c", "Q":
case "q", "Q", "ctrl+c":
return m, tea.Quit
case "esc":
if m.showLargeFiles {
m.showLargeFiles = false
return m, nil
}
return m, tea.Quit
return m.goBack()
case "up", "k", "K":
if m.showLargeFiles {
if m.largeSelected > 0 {
@@ -666,53 +668,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
m.showLargeFiles = false
return m, nil
}
if len(m.history) == 0 {
if !m.inOverviewMode() {
return m, m.switchToOverviewMode()
}
return m, nil
}
last := m.history[len(m.history)-1]
m.history = m.history[:len(m.history)-1]
m.path = last.Path
m.selected = last.Selected
m.offset = last.EntryOffset
m.largeSelected = last.LargeSelected
m.largeOffset = last.LargeOffset
m.isOverview = last.IsOverview
if last.Dirty {
// On overview return, refresh cached entries.
if last.IsOverview {
m.hydrateOverviewEntries()
m.totalSize = sumKnownEntrySizes(m.entries)
m.status = "Ready"
m.scanning = false
if nextPendingOverviewIndex(m.entries) >= 0 {
m.overviewScanning = true
return m, m.scheduleOverviewScans()
}
return m, nil
}
m.status = "Scanning..."
m.scanning = true
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
}
m.entries = last.Entries
m.largeFiles = last.LargeFiles
m.totalSize = last.TotalSize
m.clampEntrySelection()
m.clampLargeSelection()
if len(m.entries) == 0 {
m.selected = 0
} else if m.selected >= len(m.entries) {
m.selected = len(m.entries) - 1
}
if m.selected < 0 {
m.selected = 0
}
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
m.scanning = false
return m, nil
return m.goBack()
case "r", "R":
m.multiSelected = make(map[string]bool)
m.largeMultiSelected = make(map[string]bool)
@@ -962,6 +918,57 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
return m, nil
}
func (m model) goBack() (tea.Model, tea.Cmd) {
if len(m.history) == 0 {
if !m.inOverviewMode() {
return m, m.switchToOverviewMode()
}
return m, nil
}
last := m.history[len(m.history)-1]
m.history = m.history[:len(m.history)-1]
m.path = last.Path
m.selected = last.Selected
m.offset = last.EntryOffset
m.largeSelected = last.LargeSelected
m.largeOffset = last.LargeOffset
m.isOverview = last.IsOverview
if last.Dirty {
// On overview return, refresh cached entries.
if last.IsOverview {
m.hydrateOverviewEntries()
m.totalSize = sumKnownEntrySizes(m.entries)
m.status = "Ready"
m.scanning = false
if nextPendingOverviewIndex(m.entries) >= 0 {
m.overviewScanning = true
return m, m.scheduleOverviewScans()
}
return m, nil
}
m.status = "Scanning..."
m.scanning = true
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
}
m.entries = last.Entries
m.largeFiles = last.LargeFiles
m.totalSize = last.TotalSize
m.clampEntrySelection()
m.clampLargeSelection()
if len(m.entries) == 0 {
m.selected = 0
} else if m.selected >= len(m.entries) {
m.selected = len(m.entries) - 1
}
if m.selected < 0 {
m.selected = 0
}
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
m.scanning = false
return m, nil
}
func (m *model) switchToOverviewMode() tea.Cmd {
m.isOverview = true
m.path = "/"

View File

@@ -327,31 +327,31 @@ func (m model) View() string {
fmt.Fprintln(&b)
if m.inOverviewMode() {
if len(m.history) > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | Back | Q Quit%s\n", colorGray, colorReset)
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | Esc Back | Q/Ctrl+C Quit%s\n", colorGray, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓→ | Enter | R Refresh | O Open | F File | Q Quit%s\n", colorGray, colorReset)
fmt.Fprintf(&b, "%s↑↓→ | Enter | R Refresh | O Open | F File | Ctrl+C Quit%s\n", colorGray, colorReset)
}
} else if m.showLargeFiles {
selectCount := len(m.largeMultiSelected)
if selectCount > 0 {
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del %d | Back | Q Quit%s\n", colorGray, selectCount, colorReset)
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del %d | Esc Back | Q/Ctrl+C Quit%s\n", colorGray, selectCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del | Back | Q Quit%s\n", colorGray, colorReset)
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del | Esc Back | Q/Ctrl+C Quit%s\n", colorGray, colorReset)
}
} else {
largeFileCount := len(m.largeFiles)
selectCount := len(m.multiSelected)
if selectCount > 0 {
if largeFileCount > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del %d | T Top %d | Q Quit%s\n", colorGray, selectCount, largeFileCount, colorReset)
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del %d | T Top %d | Esc Back | Q/Ctrl+C Quit%s\n", colorGray, selectCount, largeFileCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del %d | Q Quit%s\n", colorGray, selectCount, colorReset)
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del %d | Esc Back | Q/Ctrl+C Quit%s\n", colorGray, selectCount, colorReset)
}
} else {
if largeFileCount > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | T Top %d | Q Quit%s\n", colorGray, largeFileCount, colorReset)
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | T Top %d | Esc Back | Q/Ctrl+C Quit%s\n", colorGray, largeFileCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | Q Quit%s\n", colorGray, colorReset)
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | Esc Back | Q/Ctrl+C Quit%s\n", colorGray, colorReset)
}
}
}

View File

@@ -102,24 +102,60 @@ discover_project_dirs() {
printf '%s\n' "${discovered[@]}" | sort -u
}
# Save discovered paths to config.
save_discovered_paths() {
# Prepare purge config directory/file ownership when possible.
prepare_purge_config_path() {
ensure_user_dir "$(dirname "$PURGE_CONFIG_FILE")"
ensure_user_file "$PURGE_CONFIG_FILE"
}
# Write purge config content atomically when possible.
write_purge_config() {
local header="$1"
shift
local -a paths=("$@")
ensure_user_dir "$(dirname "$PURGE_CONFIG_FILE")"
prepare_purge_config_path
cat > "$PURGE_CONFIG_FILE" << 'EOF'
# Mole Purge Paths - Auto-discovered project directories
# Edit this file to customize, or run: mo purge --paths
# Add one path per line (supports ~ for home directory)
local tmp_file
tmp_file=$(mktemp_file "mole-purge-paths") || return 1
if ! cat > "$tmp_file" << EOF; then
$header
EOF
rm -f "$tmp_file" 2> /dev/null || true
return 1
fi
printf '\n' >> "$PURGE_CONFIG_FILE"
for path in "${paths[@]}"; do
# Convert $HOME to ~ for portability
path="${path/#$HOME/~}"
echo "$path" >> "$PURGE_CONFIG_FILE"
if ! printf '%s\n' "$path" >> "$tmp_file"; then
rm -f "$tmp_file" 2> /dev/null || true
return 1
fi
done
if ! mv "$tmp_file" "$PURGE_CONFIG_FILE" 2> /dev/null; then
rm -f "$tmp_file" 2> /dev/null || true
return 1
fi
return 0
}
warn_purge_config_write_failure() {
[[ -t 1 ]] || return 0
[[ -z "${_PURGE_DISCOVERY_SILENT:-}" ]] || return 0
echo -e "${YELLOW}${ICON_WARNING}${NC} Could not save purge paths to ${PURGE_CONFIG_FILE/#$HOME/~}, using discovered paths for this run" >&2
}
# Save discovered paths to config.
save_discovered_paths() {
local -a paths=("$@")
write_purge_config "# Mole Purge Paths - Auto-discovered project directories
# Edit this file to customize, or run: mo purge --paths
# Add one path per line (supports ~ for home directory)
" "${paths[@]}"
}
# Load purge paths from config or auto-discover
@@ -142,11 +178,13 @@ load_purge_config() {
if [[ ${#discovered[@]} -gt 0 ]]; then
PURGE_SEARCH_PATHS=("${discovered[@]}")
save_discovered_paths "${discovered[@]}"
if save_discovered_paths "${discovered[@]}"; then
if [[ -t 1 ]] && [[ -z "${_PURGE_DISCOVERY_SILENT:-}" ]]; then
echo -e "${GRAY}Found ${#discovered[@]} project directories, saved to config${NC}" >&2
fi
else
warn_purge_config_write_failure
fi
else
PURGE_SEARCH_PATHS=("${DEFAULT_PURGE_SEARCH_PATHS[@]}")
fi

View File

@@ -510,6 +510,30 @@ cache_top_level_entry_count_capped() {
printf '%s\n' "$count"
}
directory_has_entries() {
local dir="$1"
[[ -d "$dir" ]] || return 1
local _nullglob_state
local _dotglob_state
_nullglob_state=$(shopt -p nullglob || true)
_dotglob_state=$(shopt -p dotglob || true)
shopt -s nullglob dotglob
local item
for item in "$dir"/*; do
if [[ -e "$item" ]]; then
eval "$_nullglob_state"
eval "$_dotglob_state"
return 0
fi
done
eval "$_nullglob_state"
eval "$_dotglob_state"
return 1
}
clean_app_caches() {
start_section_spinner "Scanning app caches..."
@@ -545,6 +569,9 @@ clean_app_caches() {
local total_size_partial=false
local cleaned_count=0
local found_any=false
local precise_size_limit="${MOLE_CONTAINER_CACHE_PRECISE_SIZE_LIMIT:-64}"
[[ "$precise_size_limit" =~ ^[0-9]+$ ]] || precise_size_limit=64
local precise_size_used=0
local _ng_state
_ng_state=$(shopt -p nullglob || true)
@@ -587,12 +614,11 @@ process_container_cache() {
local container_dir="$1"
[[ -d "$container_dir" ]] || return 0
[[ -L "$container_dir" ]] && return 0
local bundle_id
bundle_id=$(basename "$container_dir")
local bundle_id="${container_dir##*/}"
if is_critical_system_component "$bundle_id"; then
return 0
fi
if should_protect_data "$bundle_id" || should_protect_data "$(echo "$bundle_id" | LC_ALL=C tr '[:upper:]' '[:lower:]')"; then
if should_protect_data "$bundle_id"; then
return 0
fi
local cache_dir="$container_dir/Data/Library/Caches"
@@ -603,11 +629,12 @@ process_container_cache() {
[[ "$item_count" =~ ^[0-9]+$ ]] || item_count=0
[[ "$item_count" -eq 0 ]] && return 0
if [[ "$item_count" -le 100 ]]; then
if [[ "$item_count" -le 100 && "$precise_size_used" -lt "$precise_size_limit" ]]; then
local size
size=$(get_path_size_kb "$cache_dir" 2> /dev/null || echo "0")
[[ "$size" =~ ^[0-9]+$ ]] || size=0
total_size=$((total_size + size))
precise_size_used=$((precise_size_used + 1))
else
total_size_partial=true
fi
@@ -634,7 +661,7 @@ process_container_cache() {
clean_group_container_caches() {
local group_containers_dir="$HOME/Library/Group Containers"
[[ -d "$group_containers_dir" ]] || return 0
if ! find "$group_containers_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
if ! directory_has_entries "$group_containers_dir"; then
return 0
fi
@@ -644,14 +671,15 @@ clean_group_container_caches() {
local cleaned_count=0
local found_any=false
# Collect all non-Apple container directories first
local -a containers=()
local container_dir
local _nullglob_state
_nullglob_state=$(shopt -p nullglob || true)
shopt -s nullglob
for container_dir in "$group_containers_dir"/*; do
[[ -d "$container_dir" ]] || continue
[[ -L "$container_dir" ]] && continue
local container_id
container_id=$(basename "$container_dir")
local container_id="${container_dir##*/}"
# Skip Apple-owned shared containers entirely.
case "$container_id" in
@@ -659,13 +687,6 @@ clean_group_container_caches() {
continue
;;
esac
containers+=("$container_dir")
done
# Process each container's candidate directories
for container_dir in "${containers[@]}"; do
local container_id
container_id=$(basename "$container_dir")
local normalized_id="$container_id"
[[ "$normalized_id" == group.* ]] && normalized_id="${normalized_id#group.}"
@@ -753,6 +774,7 @@ clean_group_container_caches() {
fi
done
done
eval "$_nullglob_state"
stop_section_spinner
@@ -781,6 +803,168 @@ clean_group_container_caches() {
fi
}
resolve_existing_path() {
local path="$1"
[[ -e "$path" ]] || return 1
if command -v realpath > /dev/null 2>&1; then
realpath "$path" 2> /dev/null && return 0
fi
local dir base
dir=$(cd -P "$(dirname "$path")" 2> /dev/null && pwd) || return 1
base=$(basename "$path")
printf '%s/%s\n' "$dir" "$base"
}
external_volume_root() {
printf '%s\n' "${MOLE_EXTERNAL_VOLUMES_ROOT:-/Volumes}"
}
validate_external_volume_target() {
local target="$1"
local root
root=$(external_volume_root)
local resolved_root="$root"
if [[ -e "$root" ]]; then
resolved_root=$(resolve_existing_path "$root" 2> /dev/null || printf '%s\n' "$root")
fi
resolved_root="${resolved_root%/}"
if [[ -z "$target" ]]; then
echo "Missing external volume path" >&2
return 1
fi
if [[ "$target" != /* ]]; then
echo "External volume path must be absolute: $target" >&2
return 1
fi
if [[ "$target" == "$root" || "$target" == "$resolved_root" ]]; then
echo "Refusing to clean the volumes root directly: $resolved_root" >&2
return 1
fi
if [[ -L "$target" ]]; then
echo "Refusing to clean symlinked volume path: $target" >&2
return 1
fi
local resolved
resolved=$(resolve_existing_path "$target") || {
echo "External volume path does not exist: $target" >&2
return 1
}
if [[ "$resolved" != "$resolved_root/"* ]]; then
echo "External volume path must be under $resolved_root: $resolved" >&2
return 1
fi
local relative_path="${resolved#"$resolved_root"/}"
if [[ -z "$relative_path" || "$relative_path" == "$resolved" || "$relative_path" == */* ]]; then
echo "External cleanup only supports mounted paths directly under $resolved_root: $resolved" >&2
return 1
fi
local disk_info=""
disk_info=$(run_with_timeout 2 command diskutil info "$resolved" 2> /dev/null || echo "")
if [[ -n "$disk_info" ]]; then
if echo "$disk_info" | grep -Eq 'Internal:[[:space:]]+Yes'; then
echo "Refusing to clean an internal volume: $resolved" >&2
return 1
fi
local protocol=""
protocol=$(echo "$disk_info" | awk -F: '/Protocol:/ {gsub(/^[[:space:]]+/, "", $2); print $2; exit}')
case "$protocol" in
SMB | NFS | AFP | CIFS | WebDAV)
echo "Refusing to clean network volume protocol $protocol: $resolved" >&2
return 1
;;
esac
fi
printf '%s\n' "$resolved"
}
clean_external_volume_target() {
local volume="$1"
[[ -d "$volume" ]] || return 1
[[ -L "$volume" ]] && return 1
local -a top_level_targets=(
"$volume/.TemporaryItems"
"$volume/.Trashes"
"$volume/.Spotlight-V100"
"$volume/.fseventsd"
)
local cleaned_count=0
local total_size=0
local found_any=false
local volume_name="${volume##*/}"
start_section_spinner "Scanning external volume..."
local target_path
for target_path in "${top_level_targets[@]}"; do
[[ -e "$target_path" ]] || continue
[[ -L "$target_path" ]] && continue
if should_protect_path "$target_path" 2> /dev/null || is_path_whitelisted "$target_path" 2> /dev/null; then
continue
fi
local size_kb
size_kb=$(get_path_size_kb "$target_path" 2> /dev/null || echo "0")
[[ "$size_kb" =~ ^[0-9]+$ ]] || size_kb=0
found_any=true
cleaned_count=$((cleaned_count + 1))
total_size=$((total_size + size_kb))
if [[ "$DRY_RUN" != "true" ]]; then
safe_remove "$target_path" true > /dev/null 2>&1 || true
fi
done
if [[ "$PROTECT_FINDER_METADATA" != "true" ]]; then
clean_ds_store_tree "$volume" "${volume_name} volume, .DS_Store"
fi
while IFS= read -r -d '' metadata_file; do
[[ -e "$metadata_file" ]] || continue
if should_protect_path "$metadata_file" 2> /dev/null || is_path_whitelisted "$metadata_file" 2> /dev/null; then
continue
fi
local size_kb
size_kb=$(get_path_size_kb "$metadata_file" 2> /dev/null || echo "0")
[[ "$size_kb" =~ ^[0-9]+$ ]] || size_kb=0
found_any=true
cleaned_count=$((cleaned_count + 1))
total_size=$((total_size + size_kb))
if [[ "$DRY_RUN" != "true" ]]; then
safe_remove "$metadata_file" true > /dev/null 2>&1 || true
fi
done < <(command find "$volume" -type f -name "._*" -print0 2> /dev/null || true)
stop_section_spinner
if [[ "$found_any" == "true" ]]; then
local size_human
size_human=$(bytes_to_human "$((total_size * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} External volume cleanup${NC}, ${YELLOW}${volume_name}, $size_human dry${NC}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} External volume cleanup${NC}, ${GREEN}${volume_name}, $size_human${NC}"
fi
files_cleaned=$((files_cleaned + cleaned_count))
total_size_cleaned=$((total_size_cleaned + total_size))
total_items=$((total_items + 1))
note_activity
fi
return 0
}
# Browser caches (Safari/Chrome/Edge/Firefox).
clean_browsers() {
safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache"

View File

@@ -559,7 +559,7 @@ declare -a MOLE_TEMP_DIRS=()
# Create tracked temporary file
create_temp_file() {
local temp
temp=$(mktemp) || return 1
temp=$(mktemp "${TMPDIR:-/tmp}/mole.XXXXXX") || return 1
register_temp_file "$temp"
echo "$temp"
}
@@ -567,7 +567,7 @@ create_temp_file() {
# Create tracked temporary directory
create_temp_dir() {
local temp
temp=$(mktemp -d) || return 1
temp=$(mktemp -d "${TMPDIR:-/tmp}/mole.XXXXXX") || return 1
register_temp_dir "$temp"
echo "$temp"
}
@@ -601,7 +601,9 @@ mktemp_file() {
# Cleanup all tracked temp files and directories
cleanup_temp_files() {
if declare -F stop_inline_spinner > /dev/null 2>&1; then
stop_inline_spinner || true
fi
local file
if [[ ${#MOLE_TEMP_FILES[@]} -gt 0 ]]; then
for file in "${MOLE_TEMP_FILES[@]}"; do

View File

@@ -7,6 +7,7 @@ show_clean_help() {
echo ""
echo "Options:"
echo " --dry-run, -n Preview cleanup without making changes"
echo " --external PATH Clean OS metadata from a mounted external volume"
echo " --whitelist Manage protected paths"
echo " --debug Show detailed operation logs"
echo " -h, --help Show this help message"

View File

@@ -12,15 +12,13 @@ if [[ -z "${PURGE_TARGETS:-}" ]]; then
source "$_MOLE_MANAGE_DIR/../clean/project.sh"
fi
# Config file path (use :- to avoid re-declaration if already set)
PURGE_PATHS_CONFIG="${PURGE_PATHS_CONFIG:-$HOME/.config/mole/purge_paths}"
# Config file path (prefer the shared project constant when available)
PURGE_PATHS_CONFIG="${PURGE_PATHS_CONFIG:-${PURGE_CONFIG_FILE:-$HOME/.config/mole/purge_paths}}"
# Ensure config file exists with helpful template
ensure_config_template() {
if [[ ! -f "$PURGE_PATHS_CONFIG" ]]; then
ensure_user_dir "$(dirname "$PURGE_PATHS_CONFIG")"
cat > "$PURGE_PATHS_CONFIG" << 'EOF'
# Mole Purge Paths - Directories to scan for project artifacts
if ! write_purge_config "# Mole Purge Paths - Directories to scan for project artifacts
# Add one path per line (supports ~ for home directory)
# Delete all paths or this file to use defaults
#
@@ -28,7 +26,9 @@ ensure_config_template() {
# ~/Documents/MyProjects
# ~/Work/ClientA
# ~/Work/ClientB
EOF
"; then
echo -e "${YELLOW}${ICON_WARNING}${NC} Could not initialize ${PURGE_PATHS_CONFIG/#$HOME/~}" >&2
fi
fi
}

View File

@@ -445,11 +445,27 @@ opt_launch_services_rebuild() {
}
# Font cache rebuild.
browser_family_is_running() {
local browser_name="$1"
case "$browser_name" in
"Firefox")
pgrep -if "Firefox|org\\.mozilla\\.firefox|firefox .*contentproc|firefox .*plugin-container|firefox .*crashreporter" > /dev/null 2>&1
;;
"Zen Browser")
pgrep -if "Zen Browser|org\\.mozilla\\.zen|Zen Browser Helper|zen .*contentproc" > /dev/null 2>&1
;;
*)
pgrep -ix "$browser_name" > /dev/null 2>&1
;;
esac
}
opt_font_cache_rebuild() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "Font Cache Rebuild" "Clear and rebuild font cache"
debug_operation_detail "Method" "Run atsutil databases -remove"
debug_operation_detail "Safety checks" "Skip when browsers are running to avoid cache rebuild conflicts"
debug_operation_detail "Safety checks" "Skip when browsers or browser helpers are running to avoid cache rebuild conflicts"
debug_operation_detail "Expected outcome" "Fixed font display issues, removed corrupted font cache"
debug_risk_level "LOW" "System automatically rebuilds font database"
fi
@@ -457,15 +473,13 @@ opt_font_cache_rebuild() {
local success=false
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
# Some browsers (notably Firefox) can keep stale GPU/text caches in /var/folders if
# system font databases are reset while browser/helper processes are still running.
# Some browsers can keep stale GPU/text caches in /var/folders if system font
# databases are reset while browser/helper processes are still running.
local -a running_browsers=()
if pgrep -if "Firefox|org\\.mozilla\\.firefox|firefox-gpu-helper" > /dev/null 2>&1; then
running_browsers+=("Firefox")
fi
local browser_name
local -a browser_checks=(
"Firefox"
"Safari"
"Google Chrome"
"Chromium"
@@ -478,7 +492,7 @@ opt_font_cache_rebuild() {
"Helium"
)
for browser_name in "${browser_checks[@]}"; do
if pgrep -ix "$browser_name" > /dev/null 2>&1; then
if browser_family_is_running "$browser_name"; then
running_browsers+=("$browser_name")
fi
done
@@ -487,8 +501,8 @@ opt_font_cache_rebuild() {
local running_list
running_list=$(printf "%s, " "${running_browsers[@]}")
running_list="${running_list%, }"
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped font cache rebuild because browsers are running: ${running_list}"
echo -e " ${GRAY}${ICON_REVIEW}${NC} ${GRAY}Quit browsers completely, then rerun optimize if font issues persist${NC}"
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped font cache rebuild because browsers or helpers are still running: ${running_list}"
echo -e " ${GRAY}${ICON_REVIEW}${NC} ${GRAY}Quit affected browsers completely, then rerun optimize if font issues persist${NC}"
return 0
fi

View File

@@ -645,3 +645,74 @@ EOF
[[ "$output" == *"FOUND: .hidden_dir"* ]]
[[ "$output" == *"FOUND: regular_file.txt"* ]]
}
@test "validate_external_volume_target canonicalizes root before comparing target" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
mock_bin="$HOME/bin"
mkdir -p "$mock_bin"
cat > "$mock_bin/diskutil" <<'MOCK'
#!/bin/bash
exit 0
MOCK
chmod +x "$mock_bin/diskutil"
export PATH="$mock_bin:$PATH"
real_root="$(mktemp -d "$HOME/ext-real.XXXXXX")"
link_root="$HOME/ext-link"
ln -s "$real_root" "$link_root"
mkdir -p "$link_root/USB"
export MOLE_EXTERNAL_VOLUMES_ROOT="$link_root"
resolved=$(validate_external_volume_target "$link_root/USB")
echo "RESOLVED=$resolved"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"RESOLVED="*"/USB"* ]]
[[ "$output" != *"must be under"* ]]
}
@test "clean_app_caches caps precise sandbox size scans when many containers exist" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true MOLE_CONTAINER_CACHE_PRECISE_SIZE_LIMIT=2 bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
start_section_spinner() { :; }
stop_section_spinner() { :; }
safe_clean() { :; }
clean_support_app_data() { :; }
clean_group_container_caches() { :; }
bytes_to_human() { echo "0B"; }
note_activity() { :; }
should_protect_data() { return 1; }
is_critical_system_component() { return 1; }
files_cleaned=0
total_size_cleaned=0
total_items=0
count_file="$HOME/size-count"
get_path_size_kb() {
local count
count=$(cat "$count_file" 2> /dev/null || echo "0")
count=$((count + 1))
echo "$count" > "$count_file"
echo "1"
}
for i in $(seq 1 5); do
mkdir -p "$HOME/Library/Containers/com.example.$i/Data/Library/Caches"
touch "$HOME/Library/Containers/com.example.$i/Data/Library/Caches/file-$i.tmp"
done
clean_app_caches
echo "SIZE_CALLS=$(cat "$count_file")"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Sandboxed app caches"* ]]
[[ "$output" == *"SIZE_CALLS=2"* ]]
}

View File

@@ -237,6 +237,34 @@ EOF
[ "$status" -eq 0 ]
}
@test "mo clean --help includes external volume option" {
run env HOME="$HOME" "$PROJECT_ROOT/mole" clean --help
[ "$status" -eq 0 ]
[[ "$output" == *"--external PATH"* ]]
}
@test "mo clean --external accepts canonicalized custom root" {
real_root="$(mktemp -d "$HOME/ext-real.XXXXXX")"
link_root="$HOME/ext-link"
ln -s "$real_root" "$link_root"
mkdir -p "$link_root/USB/.Trashes"
touch "$link_root/USB/.Trashes/cache.tmp"
mock_bin="$HOME/mock-bin"
mkdir -p "$mock_bin"
cat > "$mock_bin/diskutil" <<'EOF'
#!/usr/bin/env bash
exit 0
EOF
chmod +x "$mock_bin/diskutil"
run env HOME="$HOME" PATH="$mock_bin:$PATH" MOLE_EXTERNAL_VOLUMES_ROOT="$link_root" \
MOLE_TEST_NO_AUTH=1 "$PROJECT_ROOT/mole" clean --external "$link_root/USB" --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"Clean External Volume"* ]]
[[ "$output" == *"External volume cleanup"* ]]
}
@test "touchid status reflects pam file contents" {
pam_file="$HOME/pam_test"
cat >"$pam_file" <<'EOF'

View File

@@ -141,6 +141,54 @@ EOF
[[ "$output" == *"Font cache cleared"* ]]
}
@test "opt_font_cache_rebuild skips when Firefox helpers are running" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
pgrep() {
case "$*" in
*"Firefox|org\\.mozilla\\.firefox|firefox .*contentproc|firefox .*plugin-container|firefox .*crashreporter"*)
return 0
;;
*)
return 1
;;
esac
}
export -f pgrep
opt_font_cache_rebuild
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Skipped font cache rebuild because browsers or helpers are still running: Firefox"* ]]
}
@test "browser_family_is_running does not treat generic renderer helpers as Zen Browser" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
pgrep() {
case "$*" in
*"renderer|gpu"*)
return 0
;;
*)
return 1
;;
esac
}
export -f pgrep
if browser_family_is_running "Zen Browser"; then
echo "MATCHED"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" != *"MATCHED"* ]]
}
@test "opt_dock_refresh clears cache files" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" MOLE_DRY_RUN=1 bash --noprofile --norc <<'EOF'
set -euo pipefail