mirror of
https://github.com/tw93/Mole.git
synced 2026-02-04 11:31:46 +00:00
Performance and speed optimization
This commit is contained in:
224
bin/clean.sh
224
bin/clean.sh
@@ -284,11 +284,15 @@ safe_clean() {
|
||||
(
|
||||
local size
|
||||
size=$(get_path_size_kb "$path")
|
||||
local count
|
||||
count=$(find "$path" -type f 2> /dev/null | wc -l | tr -d ' ')
|
||||
# Use index + PID for unique filename
|
||||
local tmp_file="$temp_dir/result_${idx}.$$"
|
||||
echo "$size $count" > "$tmp_file"
|
||||
# Optimization: Skip expensive file counting. Size is the key metric.
|
||||
# Just indicate presence if size > 0
|
||||
if [[ "$size" -gt 0 ]]; then
|
||||
echo "$size 1" > "$tmp_file"
|
||||
else
|
||||
echo "0 0" > "$tmp_file"
|
||||
fi
|
||||
mv "$tmp_file" "$temp_dir/result_${idx}" 2> /dev/null || true
|
||||
) &
|
||||
pids+=($!)
|
||||
@@ -298,8 +302,8 @@ safe_clean() {
|
||||
wait "${pids[0]}" 2> /dev/null || true
|
||||
pids=("${pids[@]:1}")
|
||||
((completed++))
|
||||
# Update progress every 10 items for smoother display
|
||||
if [[ -t 1 ]] && ((completed % 10 == 0)); then
|
||||
# Update progress less frequently to reduce overhead
|
||||
if [[ -t 1 ]] && ((completed % 20 == 0)); then
|
||||
stop_inline_spinner
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning items ($completed/$total_paths)..."
|
||||
fi
|
||||
@@ -327,7 +331,7 @@ safe_clean() {
|
||||
fi
|
||||
fi
|
||||
((total_size_bytes += size))
|
||||
((total_count += count))
|
||||
((total_count += 1))
|
||||
removed_any=1
|
||||
fi
|
||||
fi
|
||||
@@ -343,10 +347,9 @@ safe_clean() {
|
||||
for path in "${existing_paths[@]}"; do
|
||||
local size_bytes
|
||||
size_bytes=$(get_path_size_kb "$path")
|
||||
local count
|
||||
count=$(find "$path" -type f 2> /dev/null | wc -l | tr -d ' ')
|
||||
|
||||
if [[ "$count" -gt 0 && "$size_bytes" -gt 0 ]]; then
|
||||
# Optimization: Skip expensive file counting
|
||||
if [[ "$size_bytes" -gt 0 ]]; then
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
# Handle symbolic links separately (only remove the link, not the target)
|
||||
if [[ -L "$path" ]]; then
|
||||
@@ -356,7 +359,7 @@ safe_clean() {
|
||||
fi
|
||||
fi
|
||||
((total_size_bytes += size_bytes))
|
||||
((total_count += count))
|
||||
((total_count += 1))
|
||||
removed_any=1
|
||||
fi
|
||||
done
|
||||
@@ -658,191 +661,7 @@ perform_cleanup() {
|
||||
# Only touch apps missing from scan + 60+ days inactive
|
||||
# Skip protected vendors, keep Preferences/Application Support
|
||||
start_section "Uninstalled app data"
|
||||
|
||||
# Check if we have permission to access Library folders
|
||||
# Use simple ls test instead of find to avoid hanging
|
||||
local has_library_access=true
|
||||
if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then
|
||||
has_library_access=false
|
||||
fi
|
||||
|
||||
if [[ "$has_library_access" == "false" ]]; then
|
||||
note_activity
|
||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Library folders"
|
||||
echo -e " ${GRAY}Tip: Grant 'Full Disk Access' to iTerm2/Terminal in System Settings${NC}"
|
||||
else
|
||||
|
||||
local -r ORPHAN_AGE_THRESHOLD=60 # 60 days - good balance between safety and cleanup
|
||||
|
||||
# Build list of installed application bundle identifiers
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning installed applications..."
|
||||
local installed_bundles=$(create_temp_file)
|
||||
|
||||
# Simplified: only scan primary locations (reduces scan time by ~70%)
|
||||
local -a search_paths=(
|
||||
"/Applications"
|
||||
"$HOME/Applications"
|
||||
)
|
||||
|
||||
# Scan for .app bundles with timeout protection
|
||||
for search_path in "${search_paths[@]}"; do
|
||||
[[ -d "$search_path" ]] || continue
|
||||
while IFS= read -r app; do
|
||||
[[ -f "$app/Contents/Info.plist" ]] || continue
|
||||
bundle_id=$(defaults read "$app/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "")
|
||||
[[ -n "$bundle_id" ]] && echo "$bundle_id" >> "$installed_bundles"
|
||||
done < <(run_with_timeout 10 find "$search_path" -maxdepth 2 -type d -name "*.app" 2> /dev/null || true)
|
||||
done
|
||||
|
||||
# Get running applications and LaunchAgents with timeout protection
|
||||
local running_apps=$(run_with_timeout 5 osascript -e 'tell application "System Events" to get bundle identifier of every application process' 2> /dev/null || echo "")
|
||||
echo "$running_apps" | tr ',' '\n' | sed -e 's/^ *//;s/ *$//' -e '/^$/d' >> "$installed_bundles"
|
||||
|
||||
run_with_timeout 5 find ~/Library/LaunchAgents /Library/LaunchAgents \
|
||||
-name "*.plist" -type f 2> /dev/null | while IFS= read -r plist; do
|
||||
basename "$plist" .plist
|
||||
done >> "$installed_bundles" 2> /dev/null || true
|
||||
|
||||
# Deduplicate
|
||||
sort -u "$installed_bundles" -o "$installed_bundles"
|
||||
|
||||
local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ')
|
||||
stop_inline_spinner
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Found $app_count active/installed apps"
|
||||
|
||||
# Track statistics
|
||||
local orphaned_count=0
|
||||
local total_orphaned_kb=0
|
||||
|
||||
# Check if bundle is orphaned - conservative approach
|
||||
is_orphaned() {
|
||||
local bundle_id="$1"
|
||||
local directory_path="$2"
|
||||
|
||||
# Skip system-critical and protected apps
|
||||
if should_protect_data "$bundle_id"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if app exists in our scan
|
||||
if grep -q "^$bundle_id$" "$installed_bundles" 2> /dev/null; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extra check for system bundles
|
||||
case "$bundle_id" in
|
||||
com.apple.* | loginwindow | dock | systempreferences | finder | safari)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Skip major vendors
|
||||
case "$bundle_id" in
|
||||
com.adobe.* | com.microsoft.* | com.google.* | org.mozilla.* | com.jetbrains.* | com.docker.*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check file age - only clean if 60+ days inactive
|
||||
# Use modification time (mtime) instead of access time (atime)
|
||||
# because macOS disables atime updates by default for performance
|
||||
if [[ -e "$directory_path" ]]; then
|
||||
local last_modified_epoch=$(get_file_mtime "$directory_path")
|
||||
local current_epoch=$(date +%s)
|
||||
local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400))
|
||||
|
||||
if [[ $days_since_modified -lt $ORPHAN_AGE_THRESHOLD ]]; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Unified orphaned resource scanner (caches, logs, states, webkit, HTTP, cookies)
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning orphaned app resources..."
|
||||
|
||||
# Define resource types to scan
|
||||
# CRITICAL: NEVER add LaunchAgents or LaunchDaemons (breaks login items/startup apps)
|
||||
local -a resource_types=(
|
||||
"$HOME/Library/Caches|Caches|com.*:org.*:net.*:io.*"
|
||||
"$HOME/Library/Logs|Logs|com.*:org.*:net.*:io.*"
|
||||
"$HOME/Library/Saved Application State|States|*.savedState"
|
||||
"$HOME/Library/WebKit|WebKit|com.*:org.*:net.*:io.*"
|
||||
"$HOME/Library/HTTPStorages|HTTP|com.*:org.*:net.*:io.*"
|
||||
"$HOME/Library/Cookies|Cookies|*.binarycookies"
|
||||
)
|
||||
|
||||
orphaned_count=0
|
||||
|
||||
for resource_type in "${resource_types[@]}"; do
|
||||
IFS='|' read -r base_path label patterns <<< "$resource_type"
|
||||
|
||||
# Check both existence and permission to avoid hanging
|
||||
if [[ ! -d "$base_path" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Quick permission check - if we can't ls the directory, skip it
|
||||
if ! ls "$base_path" > /dev/null 2>&1; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Build file pattern array
|
||||
local -a file_patterns=()
|
||||
IFS=':' read -ra pattern_arr <<< "$patterns"
|
||||
for pat in "${pattern_arr[@]}"; do
|
||||
file_patterns+=("$base_path/$pat")
|
||||
done
|
||||
|
||||
# Scan and clean orphaned items
|
||||
for item_path in "${file_patterns[@]}"; do
|
||||
# Use shell glob (no ls needed)
|
||||
# Limit iterations to prevent hanging on directories with too many files
|
||||
local iteration_count=0
|
||||
local max_iterations=100
|
||||
|
||||
for match in $item_path; do
|
||||
[[ -e "$match" ]] || continue
|
||||
|
||||
# Safety: limit iterations to prevent infinite loops on massive directories
|
||||
((iteration_count++))
|
||||
if [[ $iteration_count -gt $max_iterations ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
# Extract bundle ID from filename
|
||||
local bundle_id=$(basename "$match")
|
||||
bundle_id="${bundle_id%.savedState}"
|
||||
bundle_id="${bundle_id%.binarycookies}"
|
||||
|
||||
if is_orphaned "$bundle_id" "$match"; then
|
||||
# Use timeout to prevent du from hanging on large/problematic directories
|
||||
local size_kb
|
||||
size_kb=$(run_with_timeout 2 du -sk "$match" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||
if [[ -z "$size_kb" || "$size_kb" == "0" ]]; then
|
||||
continue
|
||||
fi
|
||||
safe_clean "$match" "Orphaned $label: $bundle_id"
|
||||
((orphaned_count++))
|
||||
((total_orphaned_kb += size_kb))
|
||||
fi
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
stop_inline_spinner
|
||||
|
||||
if [[ $orphaned_count -gt 0 ]]; then
|
||||
local orphaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}')
|
||||
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count items (~${orphaned_mb}MB)"
|
||||
note_activity
|
||||
fi
|
||||
|
||||
rm -f "$installed_bundles"
|
||||
|
||||
fi # end of has_library_access check
|
||||
|
||||
clean_orphaned_app_data
|
||||
end_section
|
||||
|
||||
# ===== 13. Apple Silicon optimizations =====
|
||||
@@ -888,7 +707,7 @@ perform_cleanup() {
|
||||
if [[ "$DRY_RUN" == "true" ]]; then
|
||||
# Build compact stats line for dry run
|
||||
local stats="Potential space: ${GREEN}${freed_gb}GB${NC}"
|
||||
[[ $files_cleaned -gt 0 ]] && stats+=" | Files: $files_cleaned"
|
||||
[[ $files_cleaned -gt 0 ]] && stats+=" | Items: $files_cleaned"
|
||||
[[ $total_items -gt 0 ]] && stats+=" | Categories: $total_items"
|
||||
[[ $whitelist_skipped_count -gt 0 ]] && stats+=" | Protected: $whitelist_skipped_count"
|
||||
summary_details+=("$stats")
|
||||
@@ -900,7 +719,7 @@ perform_cleanup() {
|
||||
echo "# Summary"
|
||||
echo "# ============================================"
|
||||
echo "# Potential cleanup: ${freed_gb}GB"
|
||||
echo "# Files: $files_cleaned"
|
||||
echo "# Items: $files_cleaned"
|
||||
echo "# Categories: $total_items"
|
||||
[[ $whitelist_skipped_count -gt 0 ]] && echo "# Protected by whitelist: $whitelist_skipped_count"
|
||||
} >> "$EXPORT_LIST_FILE"
|
||||
@@ -912,11 +731,11 @@ perform_cleanup() {
|
||||
summary_details+=("Free space now: $(get_free_space)")
|
||||
|
||||
if [[ $files_cleaned -gt 0 && $total_items -gt 0 ]]; then
|
||||
local stats="Files cleaned: $files_cleaned | Categories: $total_items"
|
||||
local stats="Items cleaned: $files_cleaned | Categories: $total_items"
|
||||
[[ $whitelist_skipped_count -gt 0 ]] && stats+=" | Protected: $whitelist_skipped_count"
|
||||
summary_details+=("$stats")
|
||||
elif [[ $files_cleaned -gt 0 ]]; then
|
||||
local stats="Files cleaned: $files_cleaned"
|
||||
local stats="Items cleaned: $files_cleaned"
|
||||
[[ $whitelist_skipped_count -gt 0 ]] && stats+=" | Protected: $whitelist_skipped_count"
|
||||
summary_details+=("$stats")
|
||||
elif [[ $total_items -gt 0 ]]; then
|
||||
@@ -943,14 +762,17 @@ perform_cleanup() {
|
||||
summary_details+=("Free space now: $(get_free_space)")
|
||||
fi
|
||||
|
||||
print_summary_block "$summary_status" "$summary_heading" "${summary_details[@]}"
|
||||
print_summary_block "$summary_heading" "${summary_details[@]}"
|
||||
printf '\n'
|
||||
}
|
||||
|
||||
main() {
|
||||
# Parse args (only dry-run and whitelist)
|
||||
# Parse args
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
"--debug")
|
||||
export MO_DEBUG=1
|
||||
;;
|
||||
"--dry-run" | "-n")
|
||||
DRY_RUN=true
|
||||
;;
|
||||
|
||||
@@ -10,6 +10,7 @@ source "$SCRIPT_DIR/lib/manage/update.sh"
|
||||
source "$SCRIPT_DIR/lib/manage/autofix.sh"
|
||||
source "$SCRIPT_DIR/lib/optimize/maintenance.sh"
|
||||
source "$SCRIPT_DIR/lib/optimize/tasks.sh"
|
||||
source "$SCRIPT_DIR/lib/check/health_json.sh"
|
||||
|
||||
# Load check modules
|
||||
source "$SCRIPT_DIR/lib/check/all.sh"
|
||||
@@ -73,7 +74,6 @@ show_optimization_summary() {
|
||||
if ((safe_count == 0 && confirm_count == 0)) && [[ -z "${AUTO_FIX_SUMMARY:-}" ]]; then
|
||||
return
|
||||
fi
|
||||
echo ""
|
||||
local summary_title="Optimization and Check Complete"
|
||||
local -a summary_details=()
|
||||
|
||||
@@ -98,7 +98,7 @@ show_optimization_summary() {
|
||||
if [[ "${OPTIMIZE_SHOW_TOUCHID_TIP:-false}" == "true" ]]; then
|
||||
echo -e "${YELLOW}☻${NC} Run ${GRAY}mo touchid${NC} to approve sudo via Touch ID"
|
||||
fi
|
||||
print_summary_block "success" "$summary_title" "${summary_details[@]}"
|
||||
print_summary_block "$summary_title" "${summary_details[@]}"
|
||||
}
|
||||
|
||||
show_system_health() {
|
||||
@@ -325,6 +325,15 @@ cleanup_all() {
|
||||
}
|
||||
|
||||
main() {
|
||||
# Parse args
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
"--debug")
|
||||
export MO_DEBUG=1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Register unified cleanup handler
|
||||
trap cleanup_all EXIT INT TERM
|
||||
|
||||
|
||||
@@ -4,6 +4,27 @@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ============================================================================
|
||||
# Helper Functions
|
||||
# ============================================================================
|
||||
|
||||
list_login_items() {
|
||||
if ! command -v osascript > /dev/null 2>&1; then
|
||||
return
|
||||
fi
|
||||
|
||||
local raw_items
|
||||
raw_items=$(osascript -e 'tell application "System Events" to get the name of every login item' 2> /dev/null || echo "")
|
||||
[[ -z "$raw_items" || "$raw_items" == "missing value" ]] && return
|
||||
|
||||
IFS=',' read -ra login_items_array <<< "$raw_items"
|
||||
for entry in "${login_items_array[@]}"; do
|
||||
local trimmed
|
||||
trimmed=$(echo "$entry" | sed 's/^[[:space:]]*//; s/[[:space:]]*$//')
|
||||
[[ -n "$trimmed" ]] && printf "%s\n" "$trimmed"
|
||||
done
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Configuration Checks
|
||||
# ============================================================================
|
||||
|
||||
153
lib/check/health_json.sh
Normal file
153
lib/check/health_json.sh
Normal file
@@ -0,0 +1,153 @@
|
||||
#!/bin/bash
|
||||
# System Health Check - JSON Generator
|
||||
# Extracted from tasks.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Ensure dependencies are loaded (only if running standalone)
|
||||
if [[ -z "${MOLE_FILE_OPS_LOADED:-}" ]]; then
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
|
||||
source "$SCRIPT_DIR/lib/core/file_ops.sh"
|
||||
fi
|
||||
|
||||
# Get memory info in GB
|
||||
get_memory_info() {
|
||||
local total_bytes used_gb total_gb
|
||||
|
||||
# Total memory
|
||||
total_bytes=$(sysctl -n hw.memsize 2> /dev/null || echo "0")
|
||||
total_gb=$(awk "BEGIN {printf \"%.2f\", $total_bytes / (1024*1024*1024)}" 2> /dev/null || echo "0")
|
||||
[[ -z "$total_gb" || "$total_gb" == "" ]] && total_gb="0"
|
||||
|
||||
# Used memory from vm_stat
|
||||
local vm_output active wired compressed page_size
|
||||
vm_output=$(vm_stat 2> /dev/null || echo "")
|
||||
page_size=4096
|
||||
|
||||
active=$(echo "$vm_output" | awk '/Pages active:/ {print $NF}' | tr -d '.' 2> /dev/null || echo "0")
|
||||
wired=$(echo "$vm_output" | awk '/Pages wired down:/ {print $NF}' | tr -d '.' 2> /dev/null || echo "0")
|
||||
compressed=$(echo "$vm_output" | awk '/Pages occupied by compressor:/ {print $NF}' | tr -d '.' 2> /dev/null || echo "0")
|
||||
|
||||
active=${active:-0}
|
||||
wired=${wired:-0}
|
||||
compressed=${compressed:-0}
|
||||
|
||||
local used_bytes=$(((active + wired + compressed) * page_size))
|
||||
used_gb=$(awk "BEGIN {printf \"%.2f\", $used_bytes / (1024*1024*1024)}" 2> /dev/null || echo "0")
|
||||
[[ -z "$used_gb" || "$used_gb" == "" ]] && used_gb="0"
|
||||
|
||||
echo "$used_gb $total_gb"
|
||||
}
|
||||
|
||||
# Get disk info
|
||||
get_disk_info() {
|
||||
local home="${HOME:-/}"
|
||||
local df_output total_gb used_gb used_percent
|
||||
|
||||
df_output=$(command df -k "$home" 2> /dev/null | tail -1)
|
||||
|
||||
local total_kb used_kb
|
||||
total_kb=$(echo "$df_output" | awk '{print $2}' 2> /dev/null || echo "0")
|
||||
used_kb=$(echo "$df_output" | awk '{print $3}' 2> /dev/null || echo "0")
|
||||
|
||||
total_kb=${total_kb:-0}
|
||||
used_kb=${used_kb:-0}
|
||||
[[ "$total_kb" == "0" ]] && total_kb=1 # Avoid division by zero
|
||||
|
||||
total_gb=$(awk "BEGIN {printf \"%.2f\", $total_kb / (1024*1024)}" 2> /dev/null || echo "0")
|
||||
used_gb=$(awk "BEGIN {printf \"%.2f\", $used_kb / (1024*1024)}" 2> /dev/null || echo "0")
|
||||
used_percent=$(awk "BEGIN {printf \"%.1f\", ($used_kb / $total_kb) * 100}" 2> /dev/null || echo "0")
|
||||
|
||||
[[ -z "$total_gb" || "$total_gb" == "" ]] && total_gb="0"
|
||||
[[ -z "$used_gb" || "$used_gb" == "" ]] && used_gb="0"
|
||||
[[ -z "$used_percent" || "$used_percent" == "" ]] && used_percent="0"
|
||||
|
||||
echo "$used_gb $total_gb $used_percent"
|
||||
}
|
||||
|
||||
# Get uptime in days
|
||||
get_uptime_days() {
|
||||
local boot_output boot_time uptime_days
|
||||
|
||||
boot_output=$(sysctl -n kern.boottime 2> /dev/null || echo "")
|
||||
boot_time=$(echo "$boot_output" | sed -n 's/.*sec = \([0-9]*\).*/\1/p' 2> /dev/null || echo "")
|
||||
|
||||
if [[ -n "$boot_time" && "$boot_time" =~ ^[0-9]+$ ]]; then
|
||||
local now=$(date +%s 2> /dev/null || echo "0")
|
||||
local uptime_sec=$((now - boot_time))
|
||||
uptime_days=$(awk "BEGIN {printf \"%.1f\", $uptime_sec / 86400}" 2> /dev/null || echo "0")
|
||||
else
|
||||
uptime_days="0"
|
||||
fi
|
||||
|
||||
[[ -z "$uptime_days" || "$uptime_days" == "" ]] && uptime_days="0"
|
||||
echo "$uptime_days"
|
||||
}
|
||||
|
||||
# Generate JSON output
|
||||
generate_health_json() {
|
||||
# System info
|
||||
read -r mem_used mem_total <<< "$(get_memory_info)"
|
||||
read -r disk_used disk_total disk_percent <<< "$(get_disk_info)"
|
||||
local uptime=$(get_uptime_days)
|
||||
|
||||
# Ensure all values are valid numbers (fallback to 0)
|
||||
mem_used=${mem_used:-0}
|
||||
mem_total=${mem_total:-0}
|
||||
disk_used=${disk_used:-0}
|
||||
disk_total=${disk_total:-0}
|
||||
disk_percent=${disk_percent:-0}
|
||||
uptime=${uptime:-0}
|
||||
|
||||
# Start JSON
|
||||
cat << EOF
|
||||
{
|
||||
"memory_used_gb": $mem_used,
|
||||
"memory_total_gb": $mem_total,
|
||||
"disk_used_gb": $disk_used,
|
||||
"disk_total_gb": $disk_total,
|
||||
"disk_used_percent": $disk_percent,
|
||||
"uptime_days": $uptime,
|
||||
"optimizations": [
|
||||
EOF
|
||||
|
||||
# Collect all optimization items
|
||||
local -a items=()
|
||||
|
||||
# Always-on items (no size checks - instant)
|
||||
items+=('system_maintenance|System Maintenance|Rebuild system databases & flush caches|true')
|
||||
items+=('maintenance_scripts|Maintenance Scripts|Run daily/weekly/monthly scripts & rotate logs|true')
|
||||
items+=('radio_refresh|Bluetooth & Wi-Fi Refresh|Reset wireless preference caches|true')
|
||||
items+=('recent_items|Recent Items|Clear recent apps/documents/servers lists|true')
|
||||
items+=('log_cleanup|Diagnostics Cleanup|Purge old diagnostic & crash logs|true')
|
||||
items+=('startup_cache|Startup Cache Rebuild|Rebuild kext caches & prelinked kernel|true')
|
||||
|
||||
# Output items as JSON
|
||||
local first=true
|
||||
for item in "${items[@]}"; do
|
||||
IFS='|' read -r action name desc safe <<< "$item"
|
||||
|
||||
[[ "$first" == "true" ]] && first=false || echo ","
|
||||
|
||||
cat << EOF
|
||||
{
|
||||
"category": "system",
|
||||
"name": "$name",
|
||||
"description": "$desc",
|
||||
"action": "$action",
|
||||
"safe": $safe
|
||||
}
|
||||
EOF
|
||||
done
|
||||
|
||||
# Close JSON
|
||||
cat << 'EOF'
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# Main execution (for testing)
|
||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
generate_health_json
|
||||
fi
|
||||
@@ -137,7 +137,7 @@ clean_orphaned_app_data() {
|
||||
fi
|
||||
|
||||
# Check if app exists in our scan
|
||||
if grep -q "^$bundle_id$" "$installed_bundles" 2> /dev/null; then
|
||||
if grep -Fxq "$bundle_id" "$installed_bundles" 2> /dev/null; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
@@ -231,7 +231,7 @@ clean_orphaned_app_data() {
|
||||
if is_orphaned "$bundle_id" "$match"; then
|
||||
# Use timeout to prevent du from hanging on network mounts or problematic paths
|
||||
local size_kb
|
||||
size_kb=$(run_with_timeout 5 get_path_size_kb "$match")
|
||||
size_kb=$(get_path_size_kb "$match")
|
||||
if [[ -z "$size_kb" || "$size_kb" == "0" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
@@ -64,7 +64,8 @@ clean_orphaned_casks() {
|
||||
if sudo -n true 2> /dev/null; then
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Cleaning orphaned casks..."
|
||||
echo -e " ${BLUE}${ICON_ARROW}${NC} Removing orphaned Homebrew casks (may require password for certain apps)"
|
||||
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Cleaning..."
|
||||
fi
|
||||
|
||||
local removed_casks=0
|
||||
@@ -76,7 +77,7 @@ clean_orphaned_casks() {
|
||||
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
[[ $removed_casks -gt 0 ]] && log_success "Orphaned Homebrew casks ($removed_casks apps)"
|
||||
[[ $removed_casks -gt 0 ]] && echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed $removed_casks orphaned cask(s)"
|
||||
else
|
||||
# Sudo session expired - inform user to run brew manually
|
||||
if [[ -t 1 ]]; then stop_inline_spinner; fi
|
||||
|
||||
@@ -3,6 +3,22 @@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Helper function to clean tool caches using their built-in commands
|
||||
# Args: $1 - description, $@ - command to execute
|
||||
# Env: DRY_RUN
|
||||
clean_tool_cache() {
|
||||
local description="$1"
|
||||
shift
|
||||
|
||||
if [[ "$DRY_RUN" != "true" ]]; then
|
||||
if "$@" > /dev/null 2>&1; then
|
||||
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $description"
|
||||
fi
|
||||
else
|
||||
echo -e " ${YELLOW}→${NC} $description (would clean)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clean npm cache (command + directories)
|
||||
# npm cache clean clears official npm cache, safe_clean handles alternative package managers
|
||||
# Env: DRY_RUN
|
||||
|
||||
@@ -53,7 +53,9 @@ clean_deep_system() {
|
||||
fi
|
||||
|
||||
# Clean orphaned cask records (delegated to clean_brew module)
|
||||
clean_orphaned_casks
|
||||
# DISABLED: This feature triggers password prompts and provides minimal benefit
|
||||
# Users can manually run: brew list --cask && brew uninstall --cask <name>
|
||||
# clean_orphaned_casks
|
||||
|
||||
# Clean macOS Install Data (system upgrade leftovers)
|
||||
# Only remove if older than 30 days to ensure system stability
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
set -euo pipefail
|
||||
|
||||
# Get script directory and source dependencies
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/../core/common.sh"
|
||||
source "$SCRIPT_DIR/../ui/menu_simple.sh"
|
||||
_MOLE_MANAGE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$_MOLE_MANAGE_DIR/../core/common.sh"
|
||||
source "$_MOLE_MANAGE_DIR/../ui/menu_simple.sh"
|
||||
|
||||
# Config file path
|
||||
WHITELIST_CONFIG="$HOME/.config/mole/whitelist"
|
||||
@@ -214,7 +214,7 @@ manage_whitelist_categories() {
|
||||
cache_patterns+=("$pattern")
|
||||
menu_options+=("$display_name")
|
||||
|
||||
((index++))
|
||||
((index++)) || true
|
||||
done < <(get_all_cache_items)
|
||||
|
||||
# Identify custom patterns (not in predefined list)
|
||||
@@ -325,6 +325,7 @@ manage_whitelist_categories() {
|
||||
|
||||
local total_protected=$((${#selected_patterns[@]} + ${#custom_patterns[@]}))
|
||||
local -a summary_lines=()
|
||||
summary_lines+=("Whitelist Updated")
|
||||
if [[ ${#custom_patterns[@]} -gt 0 ]]; then
|
||||
summary_lines+=("Protected ${#selected_patterns[@]} predefined + ${#custom_patterns[@]} custom patterns")
|
||||
else
|
||||
@@ -332,7 +333,7 @@ manage_whitelist_categories() {
|
||||
fi
|
||||
summary_lines+=("Saved to ${WHITELIST_CONFIG}")
|
||||
|
||||
print_summary_block "success" "${summary_lines[@]}"
|
||||
print_summary_block "${summary_lines[@]}"
|
||||
printf '\n'
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ fix_broken_preferences() {
|
||||
# Remove broken plist
|
||||
rm -f "$plist_file" 2> /dev/null || true
|
||||
((broken_count++))
|
||||
done < <(run_with_timeout 10 sh -c "find '$prefs_dir' -maxdepth 1 -name '*.plist' -type f 2> /dev/null || true")
|
||||
done < <(command find "$prefs_dir" -maxdepth 1 -name "*.plist" -type f 2> /dev/null || true)
|
||||
|
||||
# Check ByHost preferences with timeout protection
|
||||
local byhost_dir="$prefs_dir/ByHost"
|
||||
@@ -57,7 +57,7 @@ fix_broken_preferences() {
|
||||
|
||||
rm -f "$plist_file" 2> /dev/null || true
|
||||
((broken_count++))
|
||||
done < <(run_with_timeout 10 sh -c "find '$byhost_dir' -name '*.plist' -type f 2> /dev/null || true")
|
||||
done < <(command find "$byhost_dir" -name "*.plist" -type f 2> /dev/null || true)
|
||||
fi
|
||||
|
||||
echo "$broken_count"
|
||||
@@ -105,51 +105,7 @@ fix_broken_login_items() {
|
||||
launchctl unload "$plist_file" 2> /dev/null || true
|
||||
rm -f "$plist_file" 2> /dev/null || true
|
||||
((broken_count++))
|
||||
done < <(run_with_timeout 10 sh -c "find '$launch_agents_dir' -name '*.plist' -type f 2> /dev/null || true")
|
||||
done < <(command find "$launch_agents_dir" -name "*.plist" -type f 2> /dev/null || true)
|
||||
|
||||
echo "$broken_count"
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Check for broken configurations
|
||||
# Returns: JSON line if issues found, empty otherwise
|
||||
# ============================================================================
|
||||
|
||||
check_broken_configs() {
|
||||
local prefs_dir="$HOME/Library/Preferences"
|
||||
local launch_agents_dir="$HOME/Library/LaunchAgents"
|
||||
|
||||
local broken_prefs=0
|
||||
local broken_items=0
|
||||
|
||||
# Count broken preferences
|
||||
if [[ -d "$prefs_dir" ]]; then
|
||||
while IFS= read -r plist_file; do
|
||||
[[ -f "$plist_file" ]] || continue
|
||||
local filename=$(basename "$plist_file")
|
||||
case "$filename" in
|
||||
com.apple.* | .GlobalPreferences* | loginwindow.plist) continue ;;
|
||||
esac
|
||||
plutil -lint "$plist_file" > /dev/null 2>&1 || ((broken_prefs++))
|
||||
done < <(run_with_timeout 10 sh -c "find '$prefs_dir' -maxdepth 1 -name '*.plist' -type f 2> /dev/null || true")
|
||||
fi
|
||||
|
||||
# Count broken login items
|
||||
if [[ -d "$launch_agents_dir" ]]; then
|
||||
while IFS= read -r plist_file; do
|
||||
[[ -f "$plist_file" ]] || continue
|
||||
local filename=$(basename "$plist_file")
|
||||
case "$filename" in com.apple.*) continue ;; esac
|
||||
|
||||
local program=$(plutil -extract Program raw "$plist_file" 2> /dev/null || echo "")
|
||||
[[ -z "$program" ]] && program=$(plutil -extract ProgramArguments.0 raw "$plist_file" 2> /dev/null || echo "")
|
||||
[[ -z "$program" ]] && continue
|
||||
[[ -e "$program" ]] || ((broken_items++))
|
||||
done < <(run_with_timeout 10 sh -c "find '$launch_agents_dir' -name '*.plist' -type f 2> /dev/null || true")
|
||||
fi
|
||||
|
||||
local total=$((broken_prefs + broken_items))
|
||||
if [[ $total -gt 0 ]]; then
|
||||
echo "fix_broken_configs|Fix Broken Configurations|Fix $total broken preference/login item files|false"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -112,7 +112,6 @@ opt_maintenance_scripts() {
|
||||
stop_inline_spinner
|
||||
fi
|
||||
|
||||
# Show final status
|
||||
if [[ "$success" == "true" ]]; then
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Complete"
|
||||
else
|
||||
@@ -204,21 +203,16 @@ opt_mail_downloads() {
|
||||
fi
|
||||
|
||||
# Only delete old attachments (safety window)
|
||||
local deleted=0
|
||||
local cleaned=false
|
||||
for target_path in "${mail_dirs[@]}"; do
|
||||
if [[ -d "$target_path" ]]; then
|
||||
# Timeout protection: prevent find from hanging on large mail directories
|
||||
local file_count=$(run_with_timeout 15 sh -c "find \"$target_path\" -type f -mtime \"+$MOLE_LOG_AGE_DAYS\" 2> /dev/null | wc -l | tr -d ' '")
|
||||
[[ -z "$file_count" || ! "$file_count" =~ ^[0-9]+$ ]] && file_count=0
|
||||
if [[ "$file_count" -gt 0 ]]; then
|
||||
safe_find_delete "$target_path" "*" "$MOLE_LOG_AGE_DAYS" "f"
|
||||
deleted=$((deleted + file_count))
|
||||
fi
|
||||
safe_find_delete "$target_path" "*" "$MOLE_LOG_AGE_DAYS" "f"
|
||||
cleaned=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $deleted -gt 0 ]]; then
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Removed $deleted old attachment(s)"
|
||||
if [[ "$cleaned" == "true" ]]; then
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Cleaned old attachments (> ${MOLE_LOG_AGE_DAYS} days)"
|
||||
else
|
||||
echo -e "${GRAY}-${NC} No old attachments found"
|
||||
fi
|
||||
@@ -350,10 +344,11 @@ opt_local_snapshots() {
|
||||
stop_inline_spinner
|
||||
fi
|
||||
|
||||
after=$(count_local_snapshots)
|
||||
local removed=$((before - after))
|
||||
[[ "$removed" -lt 0 ]] && removed=0
|
||||
|
||||
if [[ "$success" == "true" ]]; then
|
||||
after=$(count_local_snapshots)
|
||||
local removed=$((before - after))
|
||||
[[ "$removed" -lt 0 ]] && removed=0
|
||||
echo -e "${GREEN}${ICON_SUCCESS}${NC} Removed $removed snapshots (remaining: $after)"
|
||||
else
|
||||
echo -e "${YELLOW}!${NC} Timed out or failed"
|
||||
@@ -441,278 +436,3 @@ execute_optimization() {
|
||||
;;
|
||||
esac
|
||||
}
|
||||
#!/bin/bash
|
||||
# System Health Check - Pure Bash Implementation
|
||||
# Replaces optimize-go
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Get memory info in GB
|
||||
get_memory_info() {
|
||||
local total_bytes used_gb total_gb
|
||||
|
||||
# Total memory
|
||||
total_bytes=$(sysctl -n hw.memsize 2> /dev/null || echo "0")
|
||||
total_gb=$(awk "BEGIN {printf \"%.2f\", $total_bytes / (1024*1024*1024)}" 2> /dev/null || echo "0")
|
||||
[[ -z "$total_gb" || "$total_gb" == "" ]] && total_gb="0"
|
||||
|
||||
# Used memory from vm_stat
|
||||
local vm_output active wired compressed page_size
|
||||
vm_output=$(vm_stat 2> /dev/null || echo "")
|
||||
page_size=4096
|
||||
|
||||
active=$(echo "$vm_output" | awk '/Pages active:/ {print $NF}' | tr -d '.' 2> /dev/null || echo "0")
|
||||
wired=$(echo "$vm_output" | awk '/Pages wired down:/ {print $NF}' | tr -d '.' 2> /dev/null || echo "0")
|
||||
compressed=$(echo "$vm_output" | awk '/Pages occupied by compressor:/ {print $NF}' | tr -d '.' 2> /dev/null || echo "0")
|
||||
|
||||
active=${active:-0}
|
||||
wired=${wired:-0}
|
||||
compressed=${compressed:-0}
|
||||
|
||||
local used_bytes=$(((active + wired + compressed) * page_size))
|
||||
used_gb=$(awk "BEGIN {printf \"%.2f\", $used_bytes / (1024*1024*1024)}" 2> /dev/null || echo "0")
|
||||
[[ -z "$used_gb" || "$used_gb" == "" ]] && used_gb="0"
|
||||
|
||||
echo "$used_gb $total_gb"
|
||||
}
|
||||
|
||||
# Get disk info
|
||||
get_disk_info() {
|
||||
local home="${HOME:-/}"
|
||||
local df_output total_gb used_gb used_percent
|
||||
|
||||
df_output=$(command df -k "$home" 2> /dev/null | tail -1)
|
||||
|
||||
local total_kb used_kb
|
||||
total_kb=$(echo "$df_output" | awk '{print $2}' 2> /dev/null || echo "0")
|
||||
used_kb=$(echo "$df_output" | awk '{print $3}' 2> /dev/null || echo "0")
|
||||
|
||||
total_kb=${total_kb:-0}
|
||||
used_kb=${used_kb:-0}
|
||||
[[ "$total_kb" == "0" ]] && total_kb=1 # Avoid division by zero
|
||||
|
||||
total_gb=$(awk "BEGIN {printf \"%.2f\", $total_kb / (1024*1024)}" 2> /dev/null || echo "0")
|
||||
used_gb=$(awk "BEGIN {printf \"%.2f\", $used_kb / (1024*1024)}" 2> /dev/null || echo "0")
|
||||
used_percent=$(awk "BEGIN {printf \"%.1f\", ($used_kb / $total_kb) * 100}" 2> /dev/null || echo "0")
|
||||
|
||||
[[ -z "$total_gb" || "$total_gb" == "" ]] && total_gb="0"
|
||||
[[ -z "$used_gb" || "$used_gb" == "" ]] && used_gb="0"
|
||||
[[ -z "$used_percent" || "$used_percent" == "" ]] && used_percent="0"
|
||||
|
||||
echo "$used_gb $total_gb $used_percent"
|
||||
}
|
||||
|
||||
# Get uptime in days
|
||||
get_uptime_days() {
|
||||
local boot_output boot_time uptime_days
|
||||
|
||||
boot_output=$(sysctl -n kern.boottime 2> /dev/null || echo "")
|
||||
boot_time=$(echo "$boot_output" | sed -n 's/.*sec = \([0-9]*\).*/\1/p' 2> /dev/null || echo "")
|
||||
|
||||
if [[ -n "$boot_time" && "$boot_time" =~ ^[0-9]+$ ]]; then
|
||||
local now=$(date +%s 2> /dev/null || echo "0")
|
||||
local uptime_sec=$((now - boot_time))
|
||||
uptime_days=$(awk "BEGIN {printf \"%.1f\", $uptime_sec / 86400}" 2> /dev/null || echo "0")
|
||||
else
|
||||
uptime_days="0"
|
||||
fi
|
||||
|
||||
[[ -z "$uptime_days" || "$uptime_days" == "" ]] && uptime_days="0"
|
||||
echo "$uptime_days"
|
||||
}
|
||||
|
||||
# Get directory size in KB
|
||||
# Format size from KB
|
||||
format_size_kb() {
|
||||
local kb="$1"
|
||||
[[ "$kb" -le 0 ]] && echo "0B" && return
|
||||
|
||||
local mb gb
|
||||
mb=$(awk "BEGIN {printf \"%.1f\", $kb / 1024}")
|
||||
gb=$(awk "BEGIN {printf \"%.2f\", $mb / 1024}")
|
||||
|
||||
if awk "BEGIN {exit !($gb >= 1)}"; then
|
||||
echo "${gb}GB"
|
||||
elif awk "BEGIN {exit !($mb >= 1)}"; then
|
||||
printf "%.0fMB\n" "$mb"
|
||||
else
|
||||
echo "${kb}KB"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check cache size
|
||||
check_cache_refresh() {
|
||||
local cache_dir="$HOME/Library/Caches"
|
||||
local size_kb=$(get_path_size_kb "$cache_dir")
|
||||
local desc="Refresh Finder previews, Quick Look, and Safari caches"
|
||||
|
||||
if [[ $size_kb -gt 0 ]]; then
|
||||
local size_str=$(format_size_kb "$size_kb")
|
||||
desc="Refresh ${size_str} of Finder/Safari caches"
|
||||
fi
|
||||
|
||||
echo "cache_refresh|User Cache Refresh|${desc}|true"
|
||||
}
|
||||
|
||||
# Check Mail downloads
|
||||
check_mail_downloads() {
|
||||
local dirs=(
|
||||
"$HOME/Library/Mail Downloads"
|
||||
"$HOME/Library/Containers/com.apple.mail/Data/Library/Mail Downloads"
|
||||
)
|
||||
|
||||
local total_kb=0
|
||||
for dir in "${dirs[@]}"; do
|
||||
total_kb=$((total_kb + $(get_path_size_kb "$dir")))
|
||||
done
|
||||
|
||||
if [[ $total_kb -gt 0 ]]; then
|
||||
local size_str=$(format_size_kb "$total_kb")
|
||||
echo "mail_downloads|Mail Downloads|Recover ${size_str} of Mail attachments|true"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check saved state
|
||||
check_saved_state() {
|
||||
local state_dir="$HOME/Library/Saved Application State"
|
||||
local size_kb=$(get_path_size_kb "$state_dir")
|
||||
|
||||
if [[ $size_kb -gt 0 ]]; then
|
||||
local size_str=$(format_size_kb "$size_kb")
|
||||
echo "saved_state_cleanup|Saved State|Clear ${size_str} of stale saved states|true"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check swap files
|
||||
check_swap_cleanup() {
|
||||
local total_kb=0
|
||||
local file
|
||||
|
||||
for file in /private/var/vm/swapfile*; do
|
||||
[[ -f "$file" ]] && total_kb=$((total_kb + $(get_file_size "$file") / 1024))
|
||||
done
|
||||
|
||||
if [[ $total_kb -gt 0 ]]; then
|
||||
local size_str=$(format_size_kb "$total_kb")
|
||||
echo "swap_cleanup|Memory & Swap|Purge swap (${size_str}) & inactive memory|false"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check local snapshots
|
||||
check_local_snapshots() {
|
||||
command -v tmutil > /dev/null 2>&1 || return
|
||||
|
||||
local snapshots
|
||||
snapshots=$(tmutil listlocalsnapshots / 2> /dev/null || echo "")
|
||||
|
||||
local count
|
||||
count=$(echo "$snapshots" | grep -c "com.apple.TimeMachine" 2> /dev/null)
|
||||
count=$(echo "$count" | tr -d ' \n')
|
||||
count=${count:-0}
|
||||
[[ "$count" =~ ^[0-9]+$ ]] && [[ $count -gt 0 ]] && echo "local_snapshots|Local Snapshots|${count} APFS local snapshots detected|true"
|
||||
}
|
||||
|
||||
# Check developer cleanup
|
||||
check_developer_cleanup() {
|
||||
local dirs=(
|
||||
"$HOME/Library/Developer/Xcode/DerivedData"
|
||||
"$HOME/Library/Developer/Xcode/Archives"
|
||||
"$HOME/Library/Developer/Xcode/iOS DeviceSupport"
|
||||
"$HOME/Library/Developer/CoreSimulator/Caches"
|
||||
)
|
||||
|
||||
local total_kb=0
|
||||
for dir in "${dirs[@]}"; do
|
||||
total_kb=$((total_kb + $(get_path_size_kb "$dir")))
|
||||
done
|
||||
|
||||
if [[ $total_kb -gt 0 ]]; then
|
||||
local size_str=$(format_size_kb "$total_kb")
|
||||
echo "developer_cleanup|Developer Cleanup|Recover ${size_str} of Xcode/simulator data|false"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate JSON output
|
||||
generate_health_json() {
|
||||
# System info
|
||||
read -r mem_used mem_total <<< "$(get_memory_info)"
|
||||
read -r disk_used disk_total disk_percent <<< "$(get_disk_info)"
|
||||
local uptime=$(get_uptime_days)
|
||||
|
||||
# Ensure all values are valid numbers (fallback to 0)
|
||||
mem_used=${mem_used:-0}
|
||||
mem_total=${mem_total:-0}
|
||||
disk_used=${disk_used:-0}
|
||||
disk_total=${disk_total:-0}
|
||||
disk_percent=${disk_percent:-0}
|
||||
uptime=${uptime:-0}
|
||||
|
||||
# Start JSON
|
||||
cat << EOF
|
||||
{
|
||||
"memory_used_gb": $mem_used,
|
||||
"memory_total_gb": $mem_total,
|
||||
"disk_used_gb": $disk_used,
|
||||
"disk_total_gb": $disk_total,
|
||||
"disk_used_percent": $disk_percent,
|
||||
"uptime_days": $uptime,
|
||||
"optimizations": [
|
||||
EOF
|
||||
|
||||
# Collect all optimization items
|
||||
local -a items=()
|
||||
|
||||
# Always-on items (no size checks - instant)
|
||||
items+=('system_maintenance|System Maintenance|Rebuild system databases & flush caches|true')
|
||||
items+=('maintenance_scripts|Maintenance Scripts|Run daily/weekly/monthly scripts & rotate logs|true')
|
||||
items+=('radio_refresh|Bluetooth & Wi-Fi Refresh|Reset wireless preference caches|true')
|
||||
items+=('recent_items|Recent Items|Clear recent apps/documents/servers lists|true')
|
||||
items+=('log_cleanup|Diagnostics Cleanup|Purge old diagnostic & crash logs|true')
|
||||
items+=('startup_cache|Startup Cache Rebuild|Rebuild kext caches & prelinked kernel|true')
|
||||
|
||||
# Skip conditional checks - they require du which is slow
|
||||
# Users will see actual results when optimization runs
|
||||
# item=$(check_cache_refresh || true)
|
||||
# [[ -n "$item" ]] && items+=("$item")
|
||||
# item=$(check_mail_downloads || true)
|
||||
# [[ -n "$item" ]] && items+=("$item")
|
||||
# item=$(check_saved_state || true)
|
||||
# [[ -n "$item" ]] && items+=("$item")
|
||||
# item=$(check_swap_cleanup || true)
|
||||
# [[ -n "$item" ]] && items+=("$item")
|
||||
# item=$(check_local_snapshots || true)
|
||||
# [[ -n "$item" ]] && items+=("$item")
|
||||
# item=$(check_developer_cleanup || true)
|
||||
# [[ -n "$item" ]] && items+=("$item")
|
||||
# item=$(check_broken_configs || true)
|
||||
# [[ -n "$item" ]] && items+=("$item")
|
||||
|
||||
# Output items as JSON
|
||||
local first=true
|
||||
for item in "${items[@]}"; do
|
||||
IFS='|' read -r action name desc safe <<< "$item"
|
||||
|
||||
[[ "$first" == "true" ]] && first=false || echo ","
|
||||
|
||||
cat << EOF
|
||||
{
|
||||
"category": "system",
|
||||
"name": "$name",
|
||||
"description": "$desc",
|
||||
"action": "$action",
|
||||
"safe": $safe
|
||||
}
|
||||
EOF
|
||||
done
|
||||
|
||||
# Close JSON
|
||||
cat << 'EOF'
|
||||
]
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# Main execution
|
||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
generate_health_json
|
||||
fi
|
||||
|
||||
@@ -69,7 +69,7 @@ TOTAL_CHECKS=0
|
||||
|
||||
# Check 1: Keyboard input handling (restored to 1s for reliability)
|
||||
((TOTAL_CHECKS++))
|
||||
if grep -q "read -r -s -n 1 -t 1" lib/core/common.sh; then
|
||||
if grep -q "read -r -s -n 1 -t 1" lib/core/ui.sh; then
|
||||
echo -e "${GREEN} ✓ Keyboard timeout properly configured (1s)${NC}"
|
||||
((OPTIMIZATION_SCORE++))
|
||||
else
|
||||
@@ -78,7 +78,8 @@ fi
|
||||
|
||||
# Check 2: Single-pass drain_pending_input
|
||||
((TOTAL_CHECKS++))
|
||||
DRAIN_PASSES=$(grep -c "while IFS= read -r -s -n 1" lib/core/common.sh || echo 0)
|
||||
DRAIN_PASSES=$(grep -c "while IFS= read -r -s -n 1" lib/core/ui.sh 2> /dev/null || true)
|
||||
DRAIN_PASSES=${DRAIN_PASSES:-0}
|
||||
if [[ $DRAIN_PASSES -eq 1 ]]; then
|
||||
echo -e "${GREEN} ✓ drain_pending_input optimized (single-pass)${NC}"
|
||||
((OPTIMIZATION_SCORE++))
|
||||
@@ -88,7 +89,7 @@ fi
|
||||
|
||||
# Check 3: Log rotation once per session
|
||||
((TOTAL_CHECKS++))
|
||||
if grep -q "rotate_log_once" lib/core/common.sh && ! grep "rotate_log()" lib/core/common.sh | grep -v "rotate_log_once" > /dev/null 2>&1; then
|
||||
if grep -q "rotate_log_once" lib/core/log.sh; then
|
||||
echo -e "${GREEN} ✓ Log rotation optimized (once per session)${NC}"
|
||||
((OPTIMIZATION_SCORE++))
|
||||
else
|
||||
|
||||
112
scripts/run-tests.sh
Executable file
112
scripts/run-tests.sh
Executable file
@@ -0,0 +1,112 @@
|
||||
#!/bin/bash
|
||||
# Quick test runner script
|
||||
# Runs all tests before committing
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR/.."
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
echo "================================"
|
||||
echo " Mole Test Runner"
|
||||
echo "================================"
|
||||
echo ""
|
||||
|
||||
# Track failures
|
||||
FAILED=0
|
||||
|
||||
# 1. ShellCheck
|
||||
echo "1. Running ShellCheck..."
|
||||
if command -v shellcheck > /dev/null 2>&1; then
|
||||
if shellcheck mole bin/*.sh 2> /dev/null &&
|
||||
find lib -name "*.sh" -type f -exec shellcheck {} + 2> /dev/null; then
|
||||
echo -e "${GREEN}✓ ShellCheck passed${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ ShellCheck failed${NC}"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠ ShellCheck not installed, skipping${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 2. Syntax Check
|
||||
echo "2. Running syntax check..."
|
||||
if bash -n mole &&
|
||||
bash -n bin/*.sh 2> /dev/null &&
|
||||
find lib -name "*.sh" -type f -exec bash -n {} \; 2> /dev/null; then
|
||||
echo -e "${GREEN}✓ Syntax check passed${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Syntax check failed${NC}"
|
||||
((FAILED++))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 3. Unit Tests
|
||||
echo "3. Running unit tests..."
|
||||
if command -v bats > /dev/null 2>&1; then
|
||||
if bats tests/*.bats; then
|
||||
echo -e "${GREEN}✓ Unit tests passed${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Unit tests failed${NC}"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠ Bats not installed, skipping unit tests${NC}"
|
||||
echo " Install with: brew install bats-core"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 4. Go Tests
|
||||
echo "4. Running Go tests..."
|
||||
if command -v go > /dev/null 2>&1; then
|
||||
if go build ./... && go vet ./cmd/...; then
|
||||
echo -e "${GREEN}✓ Go tests passed${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Go tests failed${NC}"
|
||||
((FAILED++))
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}⚠ Go not installed, skipping Go tests${NC}"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 5. Module Loading Test
|
||||
echo "5. Testing module loading..."
|
||||
if bash -c 'source lib/core/common.sh && echo "OK"' > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓ Module loading passed${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Module loading failed${NC}"
|
||||
((FAILED++))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# 6. Integration Tests
|
||||
echo "6. Running integration tests..."
|
||||
if ./bin/clean.sh --dry-run > /dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓ Clean dry-run passed${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Clean dry-run failed${NC}"
|
||||
((FAILED++))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Summary
|
||||
echo "================================"
|
||||
if [[ $FAILED -eq 0 ]]; then
|
||||
echo -e "${GREEN}All tests passed!${NC}"
|
||||
echo ""
|
||||
echo "You can now commit your changes."
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}$FAILED test(s) failed!${NC}"
|
||||
echo ""
|
||||
echo "Please fix the failing tests before committing."
|
||||
exit 1
|
||||
fi
|
||||
Reference in New Issue
Block a user