diff --git a/bin/clean.sh b/bin/clean.sh index 5a810f4..d1f9837 100755 --- a/bin/clean.sh +++ b/bin/clean.sh @@ -9,6 +9,9 @@ export LANG=C SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "$SCRIPT_DIR/../lib/core/common.sh" + +# Set up cleanup trap for temporary files +trap cleanup_temp_files EXIT INT TERM source "$SCRIPT_DIR/../lib/core/sudo.sh" source "$SCRIPT_DIR/../lib/clean/brew.sh" source "$SCRIPT_DIR/../lib/clean/caches.sh" @@ -138,12 +141,6 @@ cleanup() { stop_sudo_session show_cursor - - # If interrupted, show message - if [[ "$signal" == "INT" ]] || [[ $exit_code -eq 130 ]]; then - printf "\r\033[K" >&2 - echo -e "${YELLOW}Interrupted by user${NC}" >&2 - fi } trap 'cleanup EXIT $?' EXIT diff --git a/bin/optimize.sh b/bin/optimize.sh index b6ff118..56c16ee 100755 --- a/bin/optimize.sh +++ b/bin/optimize.sh @@ -8,6 +8,9 @@ export LANG=C SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" source "$SCRIPT_DIR/lib/core/common.sh" + +# Set up cleanup trap for temporary files +trap cleanup_temp_files EXIT INT TERM source "$SCRIPT_DIR/lib/core/sudo.sh" source "$SCRIPT_DIR/lib/manage/update.sh" source "$SCRIPT_DIR/lib/manage/autofix.sh" diff --git a/bin/purge.sh b/bin/purge.sh index f5f1c14..46ecac1 100755 --- a/bin/purge.sh +++ b/bin/purge.sh @@ -11,6 +11,9 @@ export LANG=C # Get script directory and source common functions SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "$SCRIPT_DIR/../lib/core/common.sh" + +# Set up cleanup trap for temporary files +trap cleanup_temp_files EXIT INT TERM source "$SCRIPT_DIR/../lib/core/log.sh" source "$SCRIPT_DIR/../lib/clean/project.sh" diff --git a/bin/uninstall.sh b/bin/uninstall.sh index ce2502e..59021f5 100755 --- a/bin/uninstall.sh +++ b/bin/uninstall.sh @@ -15,6 +15,9 @@ export LANG=C # Get script directory and source common functions SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" source "$SCRIPT_DIR/../lib/core/common.sh" + +# Set up cleanup trap for temporary files +trap cleanup_temp_files EXIT INT TERM source "$SCRIPT_DIR/../lib/ui/menu_paginated.sh" source "$SCRIPT_DIR/../lib/ui/app_selector.sh" source "$SCRIPT_DIR/../lib/uninstall/batch.sh" diff --git a/lib/clean/project.sh b/lib/clean/project.sh index f89b288..663d04a 100644 --- a/lib/clean/project.sh +++ b/lib/clean/project.sh @@ -423,9 +423,7 @@ clean_project_artifacts() { if [[ -t 1 ]]; then stop_inline_spinner fi - printf '\n' - echo -e "${GRAY}Interrupted${NC}" - printf '\n' + echo "" exit 130 } trap cleanup_scan INT TERM diff --git a/lib/core/base.sh b/lib/core/base.sh index e5a65ae..8005309 100644 --- a/lib/core/base.sh +++ b/lib/core/base.sh @@ -397,44 +397,20 @@ ensure_user_file() { # Convert bytes to human-readable format (e.g., 1.5GB) bytes_to_human() { local bytes="$1" - if [[ ! "$bytes" =~ ^[0-9]+$ ]]; then - echo "0B" - return 1 - fi + [[ "$bytes" =~ ^[0-9]+$ ]] || { echo "0B"; return 1; } - if ((bytes >= 1073741824)); then # >= 1GB - local divisor=1073741824 - local whole=$((bytes / divisor)) - local remainder=$((bytes % divisor)) - local frac=$(((remainder * 100 + divisor / 2) / divisor)) - if ((frac >= 100)); then - frac=0 - ((whole++)) - fi - printf "%d.%02dGB\n" "$whole" "$frac" - return 0 + # GB: >= 1073741824 bytes + if ((bytes >= 1073741824)); then + printf "%d.%02dGB\n" $((bytes / 1073741824)) $(((bytes % 1073741824) * 100 / 1073741824)) + # MB: >= 1048576 bytes + elif ((bytes >= 1048576)); then + printf "%d.%01dMB\n" $((bytes / 1048576)) $(((bytes % 1048576) * 10 / 1048576)) + # KB: >= 1024 bytes (round up) + elif ((bytes >= 1024)); then + printf "%dKB\n" $(((bytes + 512) / 1024)) + else + printf "%dB\n" "$bytes" fi - - if ((bytes >= 1048576)); then # >= 1MB - local divisor=1048576 - local whole=$((bytes / divisor)) - local remainder=$((bytes % divisor)) - local frac=$(((remainder * 10 + divisor / 2) / divisor)) - if ((frac >= 10)); then - frac=0 - ((whole++)) - fi - printf "%d.%01dMB\n" "$whole" "$frac" - return 0 - fi - - if ((bytes >= 1024)); then - local rounded_kb=$(((bytes + 512) / 1024)) - printf "%dKB\n" "$rounded_kb" - return 0 - fi - - printf "%dB\n" "$bytes" } # Convert kilobytes to human-readable format diff --git a/lib/core/common.sh b/lib/core/common.sh index 7a32a6a..b37d42a 100755 --- a/lib/core/common.sh +++ b/lib/core/common.sh @@ -29,28 +29,52 @@ fi # Update via Homebrew update_via_homebrew() { local current_version="$1" + local temp_update temp_upgrade + temp_update=$(mktemp_file "brew_update") + temp_upgrade=$(mktemp_file "brew_upgrade") + # Set up trap for interruption (Ctrl+C) with inline cleanup + trap 'stop_inline_spinner 2>/dev/null; rm -f "$temp_update" "$temp_upgrade" 2>/dev/null; echo ""; exit 130' INT TERM + + # Update Homebrew if [[ -t 1 ]]; then start_inline_spinner "Updating Homebrew..." else echo "Updating Homebrew..." fi - brew update 2>&1 | grep -Ev "^(==>|Already up-to-date)" || true + + brew update > "$temp_update" 2>&1 & + local update_pid=$! + wait $update_pid 2>/dev/null || true # Continue even if brew update fails + if [[ -t 1 ]]; then stop_inline_spinner fi + # Upgrade Mole if [[ -t 1 ]]; then start_inline_spinner "Upgrading Mole..." else echo "Upgrading Mole..." fi + + brew upgrade mole > "$temp_upgrade" 2>&1 & + local upgrade_pid=$! + wait $upgrade_pid 2>/dev/null || true # Continue even if brew upgrade fails + local upgrade_output - upgrade_output=$(brew upgrade mole 2>&1) || true + upgrade_output=$(cat "$temp_upgrade") + if [[ -t 1 ]]; then stop_inline_spinner fi + # Clear trap + trap - INT TERM + + # Cleanup temp files + rm -f "$temp_update" "$temp_upgrade" + if echo "$upgrade_output" | grep -q "already installed"; then local installed_version installed_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}') diff --git a/mole b/mole index 682d039..4ec96b5 100755 --- a/mole +++ b/mole @@ -21,8 +21,11 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # Source common functions source "$SCRIPT_DIR/lib/core/common.sh" +# Set up cleanup trap for temporary files +trap cleanup_temp_files EXIT INT TERM + # Version info -VERSION="1.14.5" +VERSION="1.14.6" MOLE_TAGLINE="Deep clean and optimize your Mac." # Check TouchID configuration @@ -252,7 +255,7 @@ show_help() { update_mole() { # Set up cleanup trap for update process local update_interrupted=false - trap 'update_interrupted=true; echo ""; log_error "Update interrupted by user"; exit 130' INT TERM + trap 'update_interrupted=true; echo ""; exit 130' INT TERM # Check if installed via Homebrew if is_homebrew_install; then diff --git a/tests/performance.bats b/tests/performance.bats new file mode 100644 index 0000000..5ea8c3e --- /dev/null +++ b/tests/performance.bats @@ -0,0 +1,288 @@ +#!/usr/bin/env bats +# Performance benchmark tests for Mole optimizations +# Tests the performance improvements introduced in V1.14.0+ + +setup_file() { + PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)" + export PROJECT_ROOT + + # Create test data directory + TEST_DATA_DIR="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-perf.XXXXXX")" + export TEST_DATA_DIR +} + +teardown_file() { + rm -rf "$TEST_DATA_DIR" +} + +setup() { + source "$PROJECT_ROOT/lib/core/base.sh" +} + +# ============================================================================ +# bytes_to_human Performance Tests +# ============================================================================ + +@test "bytes_to_human handles large values efficiently" { + local start end elapsed + + # Warm up + bytes_to_human 1073741824 > /dev/null + + # Benchmark: 1000 iterations should complete in < 2 seconds (relaxed threshold) + start=$(date +%s%N) + for i in {1..1000}; do + bytes_to_human 1073741824 > /dev/null + done + end=$(date +%s%N) + + elapsed=$(( (end - start) / 1000000 )) # Convert to milliseconds + + # Should complete in less than 2000ms (2 seconds) + [ "$elapsed" -lt 2000 ] +} + +@test "bytes_to_human produces correct output for GB range" { + result=$(bytes_to_human 1073741824) + [ "$result" = "1.00GB" ] + + result=$(bytes_to_human 5368709120) + [ "$result" = "5.00GB" ] +} + +@test "bytes_to_human produces correct output for MB range" { + result=$(bytes_to_human 1048576) + [ "$result" = "1.0MB" ] + + result=$(bytes_to_human 104857600) + [ "$result" = "100.0MB" ] +} + +@test "bytes_to_human produces correct output for KB range" { + result=$(bytes_to_human 1024) + [ "$result" = "1KB" ] + + result=$(bytes_to_human 10240) + [ "$result" = "10KB" ] +} + +@test "bytes_to_human handles edge cases" { + # Zero bytes + result=$(bytes_to_human 0) + [ "$result" = "0B" ] + + # Invalid input returns 0B (with error code 1) + run bytes_to_human "invalid" + [ "$status" -eq 1 ] + [ "$output" = "0B" ] + + # Negative should also fail validation + run bytes_to_human "-100" + [ "$status" -eq 1 ] + [ "$output" = "0B" ] +} + +# ============================================================================ +# BSD Stat Wrapper Performance Tests +# ============================================================================ + +@test "get_file_size is faster than multiple stat calls" { + # Create test file + local test_file="$TEST_DATA_DIR/size_test.txt" + dd if=/dev/zero of="$test_file" bs=1024 count=100 2> /dev/null + + # Benchmark: 100 calls should complete quickly + local start end elapsed + start=$(date +%s%N) + for i in {1..100}; do + get_file_size "$test_file" > /dev/null + done + end=$(date +%s%N) + + elapsed=$(( (end - start) / 1000000 )) + + # Should complete in less than 1000ms (relaxed threshold) + [ "$elapsed" -lt 1000 ] +} + +@test "get_file_mtime returns valid timestamp" { + local test_file="$TEST_DATA_DIR/mtime_test.txt" + touch "$test_file" + + result=$(get_file_mtime "$test_file") + + # Should be a valid epoch timestamp (10 digits) + [[ "$result" =~ ^[0-9]{10,}$ ]] +} + +@test "get_file_owner returns current user for owned files" { + local test_file="$TEST_DATA_DIR/owner_test.txt" + touch "$test_file" + + result=$(get_file_owner "$test_file") + current_user=$(whoami) + + [ "$result" = "$current_user" ] +} + +# ============================================================================ +# User Context Detection Performance Tests +# ============================================================================ + +@test "get_invoking_user executes quickly" { + local start end elapsed + + start=$(date +%s%N) + for i in {1..100}; do + get_invoking_user > /dev/null + done + end=$(date +%s%N) + + elapsed=$(( (end - start) / 1000000 )) + + # Should complete in less than 200ms + [ "$elapsed" -lt 200 ] +} + +@test "get_darwin_major caches correctly" { + # Multiple calls should return same result + local first second + first=$(get_darwin_major) + second=$(get_darwin_major) + + [ "$first" = "$second" ] + [[ "$first" =~ ^[0-9]+$ ]] +} + +# ============================================================================ +# Temporary File Management Performance Tests +# ============================================================================ + +@test "create_temp_file and cleanup_temp_files work efficiently" { + local start end elapsed + + # Ensure MOLE_TEMP_DIRS is initialized (base.sh should do this) + declare -a MOLE_TEMP_DIRS=() + + # Create 50 temp files (reduced from 100 for faster testing) + start=$(date +%s%N) + for i in {1..50}; do + create_temp_file > /dev/null + done + end=$(date +%s%N) + + elapsed=$(( (end - start) / 1000000 )) + + # Should complete in less than 1000ms + [ "$elapsed" -lt 1000 ] + + # Verify temp files were tracked + [ "${#MOLE_TEMP_FILES[@]}" -eq 50 ] + + # Cleanup should also be reasonably fast + start=$(date +%s%N) + cleanup_temp_files + end=$(date +%s%N) + + elapsed=$(( (end - start) / 1000000 )) + # Relaxed threshold: should complete within 2 seconds + [ "$elapsed" -lt 2000 ] + + # Verify cleanup + [ "${#MOLE_TEMP_FILES[@]}" -eq 0 ] +} + +@test "mktemp_file creates files with correct prefix" { + local temp_file + temp_file=$(mktemp_file "test_prefix") + + # Should contain prefix + [[ "$temp_file" =~ test_prefix ]] + + # Should exist + [ -f "$temp_file" ] + + # Cleanup + rm -f "$temp_file" +} + +# ============================================================================ +# Brand Name Lookup Performance Tests +# ============================================================================ + +@test "get_brand_name handles common apps efficiently" { + local start end elapsed + + # Warm up (first call includes defaults read which is slow) + get_brand_name "wechat" > /dev/null + + # Benchmark: 50 lookups (reduced from 100) + start=$(date +%s%N) + for i in {1..50}; do + get_brand_name "wechat" > /dev/null + get_brand_name "QQ" > /dev/null + get_brand_name "dingtalk" > /dev/null + done + end=$(date +%s%N) + + elapsed=$(( (end - start) / 1000000 )) + + # Relaxed threshold: defaults read is called multiple times + # Should complete within 5 seconds on most systems + [ "$elapsed" -lt 5000 ] +} + +@test "get_brand_name returns correct localized names" { + # Test should work regardless of system language + local result + result=$(get_brand_name "wechat") + + # Should return either "WeChat" or "微信" + [[ "$result" == "WeChat" || "$result" == "微信" ]] +} + +# ============================================================================ +# Parallel Job Calculation Tests +# ============================================================================ + +@test "get_optimal_parallel_jobs returns sensible values" { + local result + + # Default mode + result=$(get_optimal_parallel_jobs) + [[ "$result" =~ ^[0-9]+$ ]] + [ "$result" -gt 0 ] + [ "$result" -le 128 ] + + # Scan mode (should be higher) + local scan_jobs + scan_jobs=$(get_optimal_parallel_jobs "scan") + [ "$scan_jobs" -gt "$result" ] + + # Compute mode (should be lower) + local compute_jobs + compute_jobs=$(get_optimal_parallel_jobs "compute") + [ "$compute_jobs" -le "$scan_jobs" ] +} + +# ============================================================================ +# Section Tracking Performance Tests +# ============================================================================ + +@test "section tracking has minimal overhead" { + local start end elapsed + + # Warm up + note_activity + + start=$(date +%s%N) + for i in {1..1000}; do + note_activity + done + end=$(date +%s%N) + + elapsed=$(( (end - start) / 1000000 )) + + # Should complete in less than 1000ms (relaxed threshold) + [ "$elapsed" -lt 1000 ] +}