1
0
mirror of https://github.com/tw93/Mole.git synced 2026-02-07 11:38:28 +00:00

chore: restructure windows branch (move windows/ content to root, remove macos files)

This commit is contained in:
Tw93
2026-01-10 13:23:29 +08:00
parent e84a457c2f
commit edf5ed09a9
140 changed files with 1472 additions and 34059 deletions

199
tests/Clean.Tests.ps1 Normal file
View File

@@ -0,0 +1,199 @@
# Mole Windows - Cleanup Module Tests
# Pester tests for lib/clean functionality
BeforeAll {
# Get the windows directory path (tests are in windows/tests/)
$script:WindowsDir = Split-Path -Parent $PSScriptRoot
$script:LibDir = Join-Path $script:WindowsDir "lib"
# Import core modules first
. "$script:LibDir\core\base.ps1"
. "$script:LibDir\core\log.ps1"
. "$script:LibDir\core\ui.ps1"
. "$script:LibDir\core\file_ops.ps1"
# Import cleanup modules
. "$script:LibDir\clean\user.ps1"
. "$script:LibDir\clean\caches.ps1"
. "$script:LibDir\clean\dev.ps1"
. "$script:LibDir\clean\apps.ps1"
. "$script:LibDir\clean\system.ps1"
# Enable dry-run mode for all tests
$env:MOLE_DRY_RUN = "1"
Set-DryRunMode -Enabled $true
}
AfterAll {
$env:MOLE_DRY_RUN = $null
Set-DryRunMode -Enabled $false
}
Describe "User Cleanup Module" {
Context "Clear-UserTempFiles" {
It "Should have Clear-UserTempFiles function" {
Get-Command Clear-UserTempFiles -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
It "Should run without error in dry-run mode" {
{ Clear-UserTempFiles } | Should -Not -Throw
}
}
Context "Clear-OldDownloads" {
It "Should have Clear-OldDownloads function" {
Get-Command Clear-OldDownloads -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Clear-RecycleBin" {
It "Should have Clear-RecycleBin function" {
Get-Command Clear-RecycleBin -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Invoke-UserCleanup" {
It "Should have main user cleanup function" {
Get-Command Invoke-UserCleanup -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
}
Describe "Cache Cleanup Module" {
Context "Browser Cache Functions" {
It "Should have Clear-BrowserCaches function" {
Get-Command Clear-BrowserCaches -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
It "Should run browser cache cleanup without error" {
{ Clear-BrowserCaches } | Should -Not -Throw
}
}
Context "Application Cache Functions" {
It "Should have Clear-AppCaches function" {
Get-Command Clear-AppCaches -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Windows Update Cache" {
It "Should have Clear-WindowsUpdateCache function" {
Get-Command Clear-WindowsUpdateCache -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Invoke-CacheCleanup" {
It "Should have main cache cleanup function" {
Get-Command Invoke-CacheCleanup -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
}
Describe "Developer Tools Cleanup Module" {
Context "Node.js Cleanup" {
It "Should have npm cache cleanup function" {
Get-Command Clear-NpmCache -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Python Cleanup" {
It "Should have Python cache cleanup function" {
Get-Command Clear-PythonCaches -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Go Cleanup" {
It "Should have Go cache cleanup function" {
Get-Command Clear-GoCaches -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Rust Cleanup" {
It "Should have Rust cache cleanup function" {
Get-Command Clear-RustCaches -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Docker Cleanup" {
It "Should have Docker cache cleanup function" {
Get-Command Clear-DockerCaches -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Invoke-DevToolsCleanup" {
It "Should have main dev tools cleanup function" {
Get-Command Invoke-DevToolsCleanup -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
It "Should run without error in dry-run mode" {
{ Invoke-DevToolsCleanup } | Should -Not -Throw
}
}
}
Describe "Apps Cleanup Module" {
Context "Orphan Detection" {
It "Should have Find-OrphanedAppData function" {
Get-Command Find-OrphanedAppData -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
It "Should have Clear-OrphanedAppData function" {
Get-Command Clear-OrphanedAppData -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Specific App Cleanup" {
It "Should have Clear-OfficeCache function" {
Get-Command Clear-OfficeCache -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
It "Should have Clear-AdobeData function" {
Get-Command Clear-AdobeData -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Invoke-AppCleanup" {
It "Should have main app cleanup function" {
Get-Command Invoke-AppCleanup -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
}
Describe "System Cleanup Module" {
Context "System Temp" {
It "Should have Clear-SystemTempFiles function" {
Get-Command Clear-SystemTempFiles -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Windows Logs" {
It "Should have Clear-WindowsLogs function" {
Get-Command Clear-WindowsLogs -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Windows Update Cleanup" {
It "Should have Clear-WindowsUpdateFiles function" {
Get-Command Clear-WindowsUpdateFiles -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Memory Dumps" {
It "Should have Clear-MemoryDumps function" {
Get-Command Clear-MemoryDumps -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
Context "Admin Requirements" {
It "Should check for admin when needed" {
# System cleanup should handle non-admin gracefully
{ Clear-SystemTempFiles } | Should -Not -Throw
}
}
Context "Invoke-SystemCleanup" {
It "Should have main system cleanup function" {
Get-Command Invoke-SystemCleanup -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
}

140
tests/Commands.Tests.ps1 Normal file
View File

@@ -0,0 +1,140 @@
# Mole Windows - Command Tests
# Pester tests for bin/ command scripts
BeforeAll {
# Get the windows directory path (tests are in windows/tests/)
$script:WindowsDir = Split-Path -Parent $PSScriptRoot
$script:BinDir = Join-Path $script:WindowsDir "bin"
}
Describe "Clean Command" {
Context "Help Display" {
It "Should show help without error" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\clean.ps1" -ShowHelp 2>&1
$result | Should -Not -BeNullOrEmpty
$LASTEXITCODE | Should -Be 0
}
It "Should mention dry-run in help" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\clean.ps1" -ShowHelp 2>&1
$result -join "`n" | Should -Match "DryRun"
}
}
Context "Dry Run Mode" {
It "Should support -DryRun parameter" {
# Just verify it starts without immediate error
$job = Start-Job -ScriptBlock {
param($binDir)
& powershell -ExecutionPolicy Bypass -File "$binDir\clean.ps1" -DryRun 2>&1
} -ArgumentList $script:BinDir
Start-Sleep -Seconds 3
Stop-Job $job -ErrorAction SilentlyContinue
Remove-Job $job -Force -ErrorAction SilentlyContinue
# If we got here without exception, test passes
$true | Should -Be $true
}
}
}
Describe "Uninstall Command" {
Context "Help Display" {
It "Should show help without error" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\uninstall.ps1" -ShowHelp 2>&1
$result | Should -Not -BeNullOrEmpty
$LASTEXITCODE | Should -Be 0
}
}
}
Describe "Optimize Command" {
Context "Help Display" {
It "Should show help without error" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\optimize.ps1" -ShowHelp 2>&1
$result | Should -Not -BeNullOrEmpty
$LASTEXITCODE | Should -Be 0
}
It "Should mention optimization options in help" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\optimize.ps1" -ShowHelp 2>&1
$result -join "`n" | Should -Match "DryRun|Disk|DNS"
}
}
}
Describe "Purge Command" {
Context "Help Display" {
It "Should show help without error" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\purge.ps1" -ShowHelp 2>&1
$result | Should -Not -BeNullOrEmpty
$LASTEXITCODE | Should -Be 0
}
It "Should list artifact types in help" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\purge.ps1" -ShowHelp 2>&1
$result -join "`n" | Should -Match "node_modules|vendor|venv"
}
}
}
Describe "Analyze Command" {
Context "Help Display" {
It "Should show help without error" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\analyze.ps1" -ShowHelp 2>&1
$result | Should -Not -BeNullOrEmpty
$LASTEXITCODE | Should -Be 0
}
It "Should mention keybindings in help" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\analyze.ps1" -ShowHelp 2>&1
$result -join "`n" | Should -Match "Navigate|Enter|Quit"
}
}
}
Describe "Status Command" {
Context "Help Display" {
It "Should show help without error" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\status.ps1" -ShowHelp 2>&1
$result | Should -Not -BeNullOrEmpty
$LASTEXITCODE | Should -Be 0
}
It "Should mention system metrics in help" {
$result = & powershell -ExecutionPolicy Bypass -File "$script:BinDir\status.ps1" -ShowHelp 2>&1
$result -join "`n" | Should -Match "CPU|Memory|Disk|health"
}
}
}
Describe "Main Entry Point" {
Context "mole.ps1" {
BeforeAll {
$script:MolePath = Join-Path $script:WindowsDir "mole.ps1"
}
It "Should show help without error" {
$result = & powershell -ExecutionPolicy Bypass -File $script:MolePath -ShowHelp 2>&1
$result | Should -Not -BeNullOrEmpty
}
It "Should show version without error" {
$result = & powershell -ExecutionPolicy Bypass -File $script:MolePath -Version 2>&1
$result | Should -Not -BeNullOrEmpty
$result -join "`n" | Should -Match "Mole|v\d+\.\d+"
}
It "Should list available commands in help" {
$result = & powershell -ExecutionPolicy Bypass -File $script:MolePath -ShowHelp 2>&1
$helpText = $result -join "`n"
$helpText | Should -Match "clean"
$helpText | Should -Match "uninstall"
$helpText | Should -Match "optimize"
$helpText | Should -Match "purge"
$helpText | Should -Match "analyze"
$helpText | Should -Match "status"
}
}
}

242
tests/Core.Tests.ps1 Normal file
View File

@@ -0,0 +1,242 @@
# Mole Windows - Core Module Tests
# Pester tests for lib/core functionality
BeforeAll {
# Get the windows directory path (tests are in windows/tests/)
$script:WindowsDir = Split-Path -Parent $PSScriptRoot
$script:LibDir = Join-Path $script:WindowsDir "lib"
# Import core modules
. "$script:LibDir\core\base.ps1"
. "$script:LibDir\core\log.ps1"
. "$script:LibDir\core\ui.ps1"
. "$script:LibDir\core\file_ops.ps1"
}
Describe "Base Module" {
Context "Color Definitions" {
It "Should define color codes" {
$script:Colors | Should -Not -BeNullOrEmpty
$script:Colors.Cyan | Should -Not -BeNullOrEmpty
$script:Colors.Green | Should -Not -BeNullOrEmpty
$script:Colors.Red | Should -Not -BeNullOrEmpty
$script:Colors.NC | Should -Not -BeNullOrEmpty
}
It "Should define icon set" {
$script:Icons | Should -Not -BeNullOrEmpty
$script:Icons.Success | Should -Not -BeNullOrEmpty
$script:Icons.Error | Should -Not -BeNullOrEmpty
$script:Icons.Warning | Should -Not -BeNullOrEmpty
}
}
Context "Test-IsAdmin" {
It "Should return a boolean" {
$result = Test-IsAdmin
$result | Should -BeOfType [bool]
}
}
Context "Get-WindowsVersion" {
It "Should return version info" {
$result = Get-WindowsVersion
$result | Should -Not -BeNullOrEmpty
$result.Name | Should -Not -BeNullOrEmpty
$result.Version | Should -Not -BeNullOrEmpty
$result.Build | Should -Not -BeNullOrEmpty
}
}
Context "Get-FreeSpace" {
It "Should return free space string" {
$result = Get-FreeSpace
$result | Should -Not -BeNullOrEmpty
# Format is like "100.00GB" or "50.5MB" (no space between number and unit)
$result | Should -Match "\d+(\.\d+)?(B|KB|MB|GB|TB)"
}
It "Should accept drive parameter" {
$result = Get-FreeSpace -Drive "C:"
$result | Should -Not -BeNullOrEmpty
}
}
}
Describe "File Operations Module" {
BeforeAll {
# Create temp test directory
$script:TestDir = Join-Path $env:TEMP "mole_test_$(Get-Random)"
New-Item -ItemType Directory -Path $script:TestDir -Force | Out-Null
}
AfterAll {
# Cleanup test directory
if (Test-Path $script:TestDir) {
Remove-Item -Path $script:TestDir -Recurse -Force -ErrorAction SilentlyContinue
}
}
Context "Format-ByteSize" {
It "Should format bytes correctly" {
# Actual format: no space, uses N0/N1/N2 formatting
Format-ByteSize -Bytes 0 | Should -Be "0B"
Format-ByteSize -Bytes 1024 | Should -Be "1KB"
Format-ByteSize -Bytes 1048576 | Should -Be "1.0MB"
Format-ByteSize -Bytes 1073741824 | Should -Be "1.00GB"
}
It "Should handle large numbers" {
Format-ByteSize -Bytes 1099511627776 | Should -Be "1.00TB"
}
}
Context "Get-PathSize" {
BeforeEach {
# Create test file
$testFile = Join-Path $script:TestDir "testfile.txt"
"Hello World" | Set-Content -Path $testFile
}
It "Should return size for file" {
$testFile = Join-Path $script:TestDir "testfile.txt"
$result = Get-PathSize -Path $testFile
$result | Should -BeGreaterThan 0
}
It "Should return size for directory" {
$result = Get-PathSize -Path $script:TestDir
$result | Should -BeGreaterThan 0
}
It "Should return 0 for non-existent path" {
$result = Get-PathSize -Path "C:\NonExistent\Path\12345"
$result | Should -Be 0
}
}
Context "Test-ProtectedPath" {
It "Should protect Windows directory" {
Test-ProtectedPath -Path "C:\Windows" | Should -Be $true
Test-ProtectedPath -Path "C:\Windows\System32" | Should -Be $true
}
It "Should protect Windows Defender paths" {
Test-ProtectedPath -Path "C:\Program Files\Windows Defender" | Should -Be $true
Test-ProtectedPath -Path "C:\ProgramData\Microsoft\Windows Defender" | Should -Be $true
}
It "Should not protect temp directories" {
Test-ProtectedPath -Path $env:TEMP | Should -Be $false
}
}
Context "Test-SafePath" {
It "Should return false for protected paths" {
Test-SafePath -Path "C:\Windows" | Should -Be $false
Test-SafePath -Path "C:\Windows\System32" | Should -Be $false
}
It "Should return true for safe paths" {
Test-SafePath -Path $env:TEMP | Should -Be $true
}
It "Should return false for empty paths" {
# Test-SafePath has mandatory path parameter, so empty/null throws
# But internally it should handle empty strings gracefully
{ Test-SafePath -Path "" } | Should -Throw
}
}
Context "Remove-SafeItem" {
BeforeEach {
$script:TestFile = Join-Path $script:TestDir "safe_remove_test.txt"
"Test content" | Set-Content -Path $script:TestFile
}
It "Should remove file successfully" {
$result = Remove-SafeItem -Path $script:TestFile
$result | Should -Be $true
Test-Path $script:TestFile | Should -Be $false
}
It "Should respect DryRun mode" {
$env:MOLE_DRY_RUN = "1"
try {
# Reset the module's DryRun state
Set-DryRunMode -Enabled $true
$result = Remove-SafeItem -Path $script:TestFile
$result | Should -Be $true
Test-Path $script:TestFile | Should -Be $true # File should still exist
}
finally {
$env:MOLE_DRY_RUN = $null
Set-DryRunMode -Enabled $false
}
}
It "Should not remove protected paths" {
$result = Remove-SafeItem -Path "C:\Windows\System32"
$result | Should -Be $false
}
}
}
Describe "Logging Module" {
Context "Write-Log Functions" {
It "Should have Write-Info function" {
{ Write-Info "Test message" } | Should -Not -Throw
}
It "Should have Write-Success function" {
{ Write-Success "Test message" } | Should -Not -Throw
}
It "Should have Write-MoleWarning function" {
# Note: The actual function is Write-MoleWarning
{ Write-MoleWarning "Test message" } | Should -Not -Throw
}
It "Should have Write-MoleError function" {
# Note: The actual function is Write-MoleError
{ Write-MoleError "Test message" } | Should -Not -Throw
}
}
Context "Section Functions" {
It "Should start and stop sections without error" {
{ Start-Section -Title "Test Section" } | Should -Not -Throw
{ Stop-Section } | Should -Not -Throw
}
}
}
Describe "UI Module" {
Context "Show-Banner" {
It "Should display banner without error" {
{ Show-Banner } | Should -Not -Throw
}
}
Context "Show-Header" {
It "Should display header without error" {
{ Show-Header -Title "Test Header" } | Should -Not -Throw
}
It "Should accept subtitle parameter" {
{ Show-Header -Title "Test" -Subtitle "Subtitle" } | Should -Not -Throw
}
}
Context "Show-Summary" {
It "Should display summary without error" {
{ Show-Summary -SizeBytes 1024 -ItemCount 5 } | Should -Not -Throw
}
}
Context "Read-Confirmation" {
It "Should have Read-Confirmation function" {
Get-Command Read-Confirmation -ErrorAction SilentlyContinue | Should -Not -BeNullOrEmpty
}
}
}

View File

@@ -1,164 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-app-caches.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
@test "clean_xcode_tools skips derived data when Xcode running" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
pgrep() { return 0; }
safe_clean() { echo "$2"; }
clean_xcode_tools
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Xcode is running"* ]]
[[ "$output" != *"derived data"* ]]
[[ "$output" != *"archives"* ]]
}
@test "clean_media_players protects spotify offline cache" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
mkdir -p "$HOME/Library/Application Support/Spotify/PersistentCache/Storage"
touch "$HOME/Library/Application Support/Spotify/PersistentCache/Storage/offline.bnk"
safe_clean() { echo "CLEAN:$2"; }
clean_media_players
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Spotify cache protected"* ]]
[[ "$output" != *"CLEAN: Spotify cache"* ]]
}
@test "clean_user_gui_applications calls all sections" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
stop_section_spinner() { :; }
safe_clean() { :; }
clean_xcode_tools() { echo "xcode"; }
clean_code_editors() { echo "editors"; }
clean_communication_apps() { echo "comm"; }
clean_user_gui_applications
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"xcode"* ]]
[[ "$output" == *"editors"* ]]
[[ "$output" == *"comm"* ]]
}
@test "clean_ai_apps calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_ai_apps
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"ChatGPT cache"* ]]
[[ "$output" == *"Claude desktop cache"* ]]
}
@test "clean_design_tools calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_design_tools
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Sketch cache"* ]]
[[ "$output" == *"Figma cache"* ]]
}
@test "clean_dingtalk calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_dingtalk
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"DingTalk iDingTalk cache"* ]]
[[ "$output" == *"DingTalk logs"* ]]
}
@test "clean_download_managers calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_download_managers
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Aria2 cache"* ]]
[[ "$output" == *"qBittorrent cache"* ]]
}
@test "clean_productivity_apps calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_productivity_apps
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"MiaoYan cache"* ]]
[[ "$output" == *"Flomo cache"* ]]
}
@test "clean_screenshot_tools calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_screenshot_tools
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"CleanShot cache"* ]]
[[ "$output" == *"Xnip cache"* ]]
}
@test "clean_office_applications calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/user.sh"
stop_section_spinner() { :; }
safe_clean() { echo "$2"; }
clean_office_applications
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Microsoft Word cache"* ]]
[[ "$output" == *"Apple iWork cache"* ]]
}

View File

@@ -1,116 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-apps-module.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
@test "clean_ds_store_tree reports dry-run summary" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
start_inline_spinner() { :; }
stop_section_spinner() { :; }
note_activity() { :; }
get_file_size() { echo 10; }
bytes_to_human() { echo "0B"; }
files_cleaned=0
total_size_cleaned=0
total_items=0
mkdir -p "$HOME/test_ds"
touch "$HOME/test_ds/.DS_Store"
clean_ds_store_tree "$HOME/test_ds" "DS test"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"DS test"* ]]
}
@test "scan_installed_apps uses cache when fresh" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
mkdir -p "$HOME/.cache/mole"
echo "com.example.App" > "$HOME/.cache/mole/installed_apps_cache"
get_file_mtime() { date +%s; }
debug_log() { :; }
scan_installed_apps "$HOME/installed.txt"
cat "$HOME/installed.txt"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"com.example.App"* ]]
}
@test "is_bundle_orphaned returns true for old uninstalled bundle" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" ORPHAN_AGE_THRESHOLD=60 bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
should_protect_data() { return 1; }
get_file_mtime() { echo 0; }
if is_bundle_orphaned "com.example.Old" "$HOME/old" "$HOME/installed.txt"; then
echo "orphan"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"orphan"* ]]
}
@test "clean_orphaned_app_data skips when no permission" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
ls() { return 1; }
stop_section_spinner() { :; }
clean_orphaned_app_data
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Skipped: No permission"* ]]
}
@test "is_critical_system_component matches known system services" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/app_protection.sh"
is_critical_system_component "backgroundtaskmanagement" && echo "yes"
is_critical_system_component "SystemSettings" && echo "yes"
EOF
[ "$status" -eq 0 ]
[[ "${lines[0]}" == "yes" ]]
[[ "${lines[1]}" == "yes" ]]
}
@test "is_critical_system_component ignores non-system names" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/app_protection.sh"
if is_critical_system_component "myapp"; then
echo "bad"
else
echo "ok"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" == "ok" ]]
}

View File

@@ -1,322 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-browser-cleanup.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
@test "clean_chrome_old_versions skips when Chrome is running" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
# Mock pgrep to simulate Chrome running
pgrep() { return 0; }
export -f pgrep
clean_chrome_old_versions
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Google Chrome running"* ]]
[[ "$output" == *"old versions cleanup skipped"* ]]
}
@test "clean_chrome_old_versions removes old versions but keeps current" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
# Mock pgrep to simulate Chrome not running
pgrep() { return 1; }
export -f pgrep
# Create mock Chrome directory structure
CHROME_APP="$HOME/Applications/Google Chrome.app"
VERSIONS_DIR="$CHROME_APP/Contents/Frameworks/Google Chrome Framework.framework/Versions"
mkdir -p "$VERSIONS_DIR"/{128.0.0.0,129.0.0.0,130.0.0.0}
# Create Current symlink pointing to 130.0.0.0
ln -s "130.0.0.0" "$VERSIONS_DIR/Current"
# Mock functions
is_path_whitelisted() { return 1; }
get_path_size_kb() { echo "10240"; }
bytes_to_human() { echo "10M"; }
note_activity() { :; }
export -f is_path_whitelisted get_path_size_kb bytes_to_human note_activity
# Initialize counters
files_cleaned=0
total_size_cleaned=0
total_items=0
clean_chrome_old_versions
# Verify output mentions old versions cleanup
echo "Cleaned: $files_cleaned items"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Chrome old versions"* ]]
[[ "$output" == *"dry"* ]]
[[ "$output" == *"Cleaned: 2 items"* ]]
}
@test "clean_chrome_old_versions respects whitelist" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
# Mock pgrep to simulate Chrome not running
pgrep() { return 1; }
export -f pgrep
# Create mock Chrome directory structure
CHROME_APP="$HOME/Applications/Google Chrome.app"
VERSIONS_DIR="$CHROME_APP/Contents/Frameworks/Google Chrome Framework.framework/Versions"
mkdir -p "$VERSIONS_DIR"/{128.0.0.0,129.0.0.0,130.0.0.0}
# Create Current symlink pointing to 130.0.0.0
ln -s "130.0.0.0" "$VERSIONS_DIR/Current"
# Mock is_path_whitelisted to protect version 128.0.0.0
is_path_whitelisted() {
[[ "$1" == *"128.0.0.0"* ]] && return 0
return 1
}
get_path_size_kb() { echo "10240"; }
bytes_to_human() { echo "10M"; }
note_activity() { :; }
export -f is_path_whitelisted get_path_size_kb bytes_to_human note_activity
# Initialize counters
files_cleaned=0
total_size_cleaned=0
total_items=0
clean_chrome_old_versions
# Should only clean 129.0.0.0 (not 128.0.0.0 which is whitelisted)
echo "Cleaned: $files_cleaned items"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Cleaned: 1 items"* ]]
}
@test "clean_edge_updater_old_versions keeps latest version" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
pgrep() { return 1; }
export -f pgrep
UPDATER_DIR="$HOME/Library/Application Support/Microsoft/EdgeUpdater/apps/msedge-stable"
mkdir -p "$UPDATER_DIR"/{117.0.2045.60,118.0.2088.46,119.0.2108.9}
is_path_whitelisted() { return 1; }
get_path_size_kb() { echo "10240"; }
bytes_to_human() { echo "10M"; }
note_activity() { :; }
export -f is_path_whitelisted get_path_size_kb bytes_to_human note_activity
files_cleaned=0
total_size_cleaned=0
total_items=0
clean_edge_updater_old_versions
echo "Cleaned: $files_cleaned items"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Edge updater old versions"* ]]
[[ "$output" == *"dry"* ]]
[[ "$output" == *"Cleaned: 2 items"* ]]
}
@test "clean_chrome_old_versions DRY_RUN mode does not delete files" {
# Create test directory
CHROME_APP="$HOME/Applications/Google Chrome.app"
VERSIONS_DIR="$CHROME_APP/Contents/Frameworks/Google Chrome Framework.framework/Versions"
mkdir -p "$VERSIONS_DIR"/{128.0.0.0,130.0.0.0}
# Remove Current if it exists as a directory, then create symlink
rm -rf "$VERSIONS_DIR/Current"
ln -s "130.0.0.0" "$VERSIONS_DIR/Current"
# Create a marker file in old version
touch "$VERSIONS_DIR/128.0.0.0/marker.txt"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
pgrep() { return 1; }
is_path_whitelisted() { return 1; }
get_path_size_kb() { echo "10240"; }
bytes_to_human() { echo "10M"; }
note_activity() { :; }
export -f pgrep is_path_whitelisted get_path_size_kb bytes_to_human note_activity
files_cleaned=0
total_size_cleaned=0
total_items=0
clean_chrome_old_versions
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"dry"* ]]
# Verify marker file still exists (not deleted in dry run)
[ -f "$VERSIONS_DIR/128.0.0.0/marker.txt" ]
}
@test "clean_chrome_old_versions handles missing Current symlink gracefully" {
# Use a fresh temp directory for this test
TEST_HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-test5.XXXXXX")"
run env HOME="$TEST_HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
pgrep() { return 1; }
is_path_whitelisted() { return 1; }
get_path_size_kb() { echo "10240"; }
bytes_to_human() { echo "10M"; }
note_activity() { :; }
export -f pgrep is_path_whitelisted get_path_size_kb bytes_to_human note_activity
# Initialize counters to prevent unbound variable errors
files_cleaned=0
total_size_cleaned=0
total_items=0
# Create Chrome app without Current symlink
CHROME_APP="$HOME/Applications/Google Chrome.app"
VERSIONS_DIR="$CHROME_APP/Contents/Frameworks/Google Chrome Framework.framework/Versions"
mkdir -p "$VERSIONS_DIR"/{128.0.0.0,129.0.0.0}
# No Current symlink created
clean_chrome_old_versions
EOF
rm -rf "$TEST_HOME"
[ "$status" -eq 0 ]
# Should exit gracefully with no output
}
@test "clean_edge_old_versions skips when Edge is running" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
# Mock pgrep to simulate Edge running
pgrep() { return 0; }
export -f pgrep
clean_edge_old_versions
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Microsoft Edge running"* ]]
[[ "$output" == *"old versions cleanup skipped"* ]]
}
@test "clean_edge_old_versions removes old versions but keeps current" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
pgrep() { return 1; }
export -f pgrep
# Create mock Edge directory structure
EDGE_APP="$HOME/Applications/Microsoft Edge.app"
VERSIONS_DIR="$EDGE_APP/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
mkdir -p "$VERSIONS_DIR"/{120.0.0.0,121.0.0.0,122.0.0.0}
# Create Current symlink pointing to 122.0.0.0
ln -s "122.0.0.0" "$VERSIONS_DIR/Current"
is_path_whitelisted() { return 1; }
get_path_size_kb() { echo "10240"; }
bytes_to_human() { echo "10M"; }
note_activity() { :; }
export -f is_path_whitelisted get_path_size_kb bytes_to_human note_activity
files_cleaned=0
total_size_cleaned=0
total_items=0
clean_edge_old_versions
echo "Cleaned: $files_cleaned items"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Edge old versions"* ]]
[[ "$output" == *"dry"* ]]
[[ "$output" == *"Cleaned: 2 items"* ]]
}
@test "clean_edge_old_versions handles no old versions gracefully" {
# Use a fresh temp directory for this test
TEST_HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-test8.XXXXXX")"
run env HOME="$TEST_HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
pgrep() { return 1; }
is_path_whitelisted() { return 1; }
get_path_size_kb() { echo "10240"; }
bytes_to_human() { echo "10M"; }
note_activity() { :; }
export -f pgrep is_path_whitelisted get_path_size_kb bytes_to_human note_activity
# Initialize counters
files_cleaned=0
total_size_cleaned=0
total_items=0
# Create Edge with only current version
EDGE_APP="$HOME/Applications/Microsoft Edge.app"
VERSIONS_DIR="$EDGE_APP/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
mkdir -p "$VERSIONS_DIR/122.0.0.0"
ln -s "122.0.0.0" "$VERSIONS_DIR/Current"
clean_edge_old_versions
EOF
rm -rf "$TEST_HOME"
[ "$status" -eq 0 ]
# Should exit gracefully with no cleanup output
[[ "$output" != *"Edge old versions"* ]]
}

View File

@@ -1,354 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-clean-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
export TERM="xterm-256color"
rm -rf "${HOME:?}"/*
rm -rf "$HOME/Library" "$HOME/.config"
mkdir -p "$HOME/Library/Caches" "$HOME/.config/mole"
}
@test "mo clean --dry-run skips system cleanup in non-interactive mode" {
run env HOME="$HOME" MOLE_TEST_MODE=1 "$PROJECT_ROOT/mole" clean --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"Dry Run Mode"* ]]
[[ "$output" != *"Deep system-level cleanup"* ]]
}
@test "mo clean --dry-run reports user cache without deleting it" {
mkdir -p "$HOME/Library/Caches/TestApp"
echo "cache data" > "$HOME/Library/Caches/TestApp/cache.tmp"
run env HOME="$HOME" MOLE_TEST_MODE=1 "$PROJECT_ROOT/mole" clean --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"User app cache"* ]]
[[ "$output" == *"Potential space"* ]]
[ -f "$HOME/Library/Caches/TestApp/cache.tmp" ]
}
@test "mo clean honors whitelist entries" {
mkdir -p "$HOME/Library/Caches/WhitelistedApp"
echo "keep me" > "$HOME/Library/Caches/WhitelistedApp/data.tmp"
cat > "$HOME/.config/mole/whitelist" << EOF
$HOME/Library/Caches/WhitelistedApp*
EOF
run env HOME="$HOME" MOLE_TEST_MODE=1 "$PROJECT_ROOT/mole" clean --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"Protected"* ]]
[ -f "$HOME/Library/Caches/WhitelistedApp/data.tmp" ]
}
@test "mo clean honors whitelist entries with $HOME literal" {
mkdir -p "$HOME/Library/Caches/WhitelistedApp"
echo "keep me" > "$HOME/Library/Caches/WhitelistedApp/data.tmp"
cat > "$HOME/.config/mole/whitelist" << 'EOF'
$HOME/Library/Caches/WhitelistedApp*
EOF
run env HOME="$HOME" MOLE_TEST_MODE=1 "$PROJECT_ROOT/mole" clean --dry-run
[ "$status" -eq 0 ]
[[ "$output" == *"Protected"* ]]
[ -f "$HOME/Library/Caches/WhitelistedApp/data.tmp" ]
}
@test "mo clean protects Maven repository by default" {
mkdir -p "$HOME/.m2/repository/org/example"
echo "dependency" > "$HOME/.m2/repository/org/example/lib.jar"
run env HOME="$HOME" MOLE_TEST_MODE=1 "$PROJECT_ROOT/mole" clean --dry-run
[ "$status" -eq 0 ]
[ -f "$HOME/.m2/repository/org/example/lib.jar" ]
[[ "$output" != *"Maven repository cache"* ]]
}
@test "FINDER_METADATA_SENTINEL in whitelist protects .DS_Store files" {
mkdir -p "$HOME/Documents"
touch "$HOME/Documents/.DS_Store"
cat > "$HOME/.config/mole/whitelist" << EOF
FINDER_METADATA_SENTINEL
EOF
# Test whitelist logic directly instead of running full clean
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/manage/whitelist.sh"
load_whitelist
if is_whitelisted "$HOME/Documents/.DS_Store"; then
echo "protected by whitelist"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"protected by whitelist"* ]]
[ -f "$HOME/Documents/.DS_Store" ]
}
@test "clean_recent_items removes shared file lists" {
local shared_dir="$HOME/Library/Application Support/com.apple.sharedfilelist"
mkdir -p "$shared_dir"
touch "$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl2"
touch "$shared_dir/com.apple.LSSharedFileList.RecentDocuments.sfl2"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
safe_clean() {
echo "safe_clean $1"
}
clean_recent_items
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Recent"* ]]
}
@test "clean_recent_items handles missing shared directory" {
rm -rf "$HOME/Library/Application Support/com.apple.sharedfilelist"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
safe_clean() {
echo "safe_clean $1"
}
clean_recent_items
EOF
[ "$status" -eq 0 ]
}
@test "clean_mail_downloads skips cleanup when size below threshold" {
mkdir -p "$HOME/Library/Mail Downloads"
echo "test" > "$HOME/Library/Mail Downloads/small.txt"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
clean_mail_downloads
EOF
[ "$status" -eq 0 ]
[ -f "$HOME/Library/Mail Downloads/small.txt" ]
}
@test "clean_mail_downloads removes old attachments" {
mkdir -p "$HOME/Library/Mail Downloads"
touch "$HOME/Library/Mail Downloads/old.pdf"
touch -t 202301010000 "$HOME/Library/Mail Downloads/old.pdf"
dd if=/dev/zero of="$HOME/Library/Mail Downloads/dummy.dat" bs=1024 count=6000 2>/dev/null
[ -f "$HOME/Library/Mail Downloads/old.pdf" ]
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
clean_mail_downloads
EOF
[ "$status" -eq 0 ]
[ ! -f "$HOME/Library/Mail Downloads/old.pdf" ]
}
@test "clean_time_machine_failed_backups detects running backup correctly" {
if ! command -v tmutil > /dev/null 2>&1; then
skip "tmutil not available"
fi
local mock_bin="$HOME/bin"
mkdir -p "$mock_bin"
cat > "$mock_bin/tmutil" << 'MOCK_TMUTIL'
#!/bin/bash
if [[ "$1" == "status" ]]; then
cat << 'TMUTIL_OUTPUT'
Backup session status:
{
ClientID = "com.apple.backupd";
Running = 0;
}
TMUTIL_OUTPUT
elif [[ "$1" == "destinationinfo" ]]; then
cat << 'DEST_OUTPUT'
====================================================
Name : TestBackup
Kind : Local
Mount Point : /Volumes/TestBackup
ID : 12345678-1234-1234-1234-123456789012
====================================================
DEST_OUTPUT
fi
MOCK_TMUTIL
chmod +x "$mock_bin/tmutil"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" PATH="$mock_bin:$PATH" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/system.sh"
clean_time_machine_failed_backups
EOF
[ "$status" -eq 0 ]
[[ "$output" != *"Time Machine backup in progress, skipping cleanup"* ]]
}
@test "clean_time_machine_failed_backups skips when backup is actually running" {
if ! command -v tmutil > /dev/null 2>&1; then
skip "tmutil not available"
fi
local mock_bin="$HOME/bin"
mkdir -p "$mock_bin"
cat > "$mock_bin/tmutil" << 'MOCK_TMUTIL'
#!/bin/bash
if [[ "$1" == "status" ]]; then
cat << 'TMUTIL_OUTPUT'
Backup session status:
{
ClientID = "com.apple.backupd";
Running = 1;
}
TMUTIL_OUTPUT
elif [[ "$1" == "destinationinfo" ]]; then
cat << 'DEST_OUTPUT'
====================================================
Name : TestBackup
Kind : Local
Mount Point : /Volumes/TestBackup
ID : 12345678-1234-1234-1234-123456789012
====================================================
DEST_OUTPUT
fi
MOCK_TMUTIL
chmod +x "$mock_bin/tmutil"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" PATH="$mock_bin:$PATH" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/system.sh"
clean_time_machine_failed_backups
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Time Machine backup in progress, skipping cleanup"* ]]
}
@test "clean_empty_library_items removes nested empty directories in Application Support" {
# Create nested empty directory structure
mkdir -p "$HOME/Library/Application Support/UninstalledApp1/SubDir/DeepDir"
mkdir -p "$HOME/Library/Application Support/UninstalledApp2/Cache"
mkdir -p "$HOME/Library/Application Support/ActiveApp/Data"
mkdir -p "$HOME/Library/Caches/EmptyCache/SubCache"
# Create a file in ActiveApp to make it non-empty
touch "$HOME/Library/Application Support/ActiveApp/Data/config.json"
# Create top-level empty directory in Library
mkdir -p "$HOME/Library/EmptyTopLevel"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
# Mock dependencies
is_path_whitelisted() { return 1; }
is_critical_system_component() { return 1; }
bytes_to_human() { echo "$1"; }
note_activity() { :; }
safe_clean() {
# Actually remove the directories for testing
for path in "$@"; do
if [ "$path" != "${@: -1}" ]; then # Skip the description (last arg)
rm -rf "$path" 2>/dev/null || true
fi
done
}
clean_empty_library_items
EOF
[ "$status" -eq 0 ]
# Empty nested dirs should be removed
[ ! -d "$HOME/Library/Application Support/UninstalledApp1" ]
[ ! -d "$HOME/Library/Application Support/UninstalledApp2" ]
[ ! -d "$HOME/Library/Caches/EmptyCache" ]
[ ! -d "$HOME/Library/EmptyTopLevel" ]
# Non-empty directory should remain
[ -d "$HOME/Library/Application Support/ActiveApp" ]
[ -f "$HOME/Library/Application Support/ActiveApp/Data/config.json" ]
}
@test "clean_empty_library_items respects whitelist for empty directories" {
mkdir -p "$HOME/Library/Application Support/ProtectedEmptyApp"
mkdir -p "$HOME/Library/Application Support/UnprotectedEmptyApp"
mkdir -p "$HOME/.config/mole"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
# Mock dependencies
is_critical_system_component() { return 1; }
bytes_to_human() { echo "$1"; }
note_activity() { :; }
# Mock whitelist to protect ProtectedEmptyApp
is_path_whitelisted() {
[[ "$1" == *"ProtectedEmptyApp"* ]]
}
safe_clean() {
# Actually remove the directories for testing
for path in "$@"; do
if [ "$path" != "${@: -1}" ]; then # Skip the description (last arg)
rm -rf "$path" 2>/dev/null || true
fi
done
}
clean_empty_library_items
EOF
[ "$status" -eq 0 ]
# Whitelisted directory should remain even if empty
[ -d "$HOME/Library/Application Support/ProtectedEmptyApp" ]
# Non-whitelisted directory should be removed
[ ! -d "$HOME/Library/Application Support/UnprotectedEmptyApp" ]
}

View File

@@ -1,107 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-dev-caches.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
@test "clean_dev_npm cleans orphaned pnpm store" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/dev.sh"
start_section_spinner() { :; }
stop_section_spinner() { :; }
clean_tool_cache() { echo "$1"; }
safe_clean() { echo "$2"; }
note_activity() { :; }
run_with_timeout() { shift; "$@"; }
pnpm() {
if [[ "$1" == "store" && "$2" == "prune" ]]; then
return 0
fi
if [[ "$1" == "store" && "$2" == "path" ]]; then
echo "/tmp/pnpm-store"
return 0
fi
return 0
}
npm() { return 0; }
export -f pnpm npm
clean_dev_npm
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Orphaned pnpm store"* ]]
}
@test "clean_dev_docker skips when daemon not running" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" MO_DEBUG=1 DRY_RUN=false bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/dev.sh"
start_section_spinner() { :; }
stop_section_spinner() { :; }
run_with_timeout() { return 1; }
clean_tool_cache() { echo "$1"; }
safe_clean() { echo "$2"; }
debug_log() { echo "$*"; }
docker() { return 1; }
export -f docker
clean_dev_docker
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Docker daemon not running"* ]]
[[ "$output" != *"Docker build cache"* ]]
}
@test "clean_developer_tools runs key stages" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/dev.sh"
stop_section_spinner() { :; }
clean_sqlite_temp_files() { :; }
clean_dev_npm() { echo "npm"; }
clean_homebrew() { echo "brew"; }
clean_project_caches() { :; }
clean_dev_python() { :; }
clean_dev_go() { :; }
clean_dev_rust() { :; }
clean_dev_docker() { :; }
clean_dev_cloud() { :; }
clean_dev_nix() { :; }
clean_dev_shell() { :; }
clean_dev_frontend() { :; }
clean_dev_mobile() { :; }
clean_dev_jvm() { :; }
clean_dev_other_langs() { :; }
clean_dev_cicd() { :; }
clean_dev_database() { :; }
clean_dev_api_tools() { :; }
clean_dev_network() { :; }
clean_dev_misc() { :; }
safe_clean() { :; }
debug_log() { :; }
clean_developer_tools
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"npm"* ]]
[[ "$output" == *"brew"* ]]
}

View File

@@ -1,224 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-clean-extras.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
@test "clean_cloud_storage calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
stop_section_spinner() { :; }
safe_clean() { echo "$2"; }
clean_cloud_storage
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Dropbox cache"* ]]
[[ "$output" == *"Google Drive cache"* ]]
}
@test "clean_virtualization_tools hits cache paths" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
stop_section_spinner() { :; }
safe_clean() { echo "$2"; }
clean_virtualization_tools
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"VMware Fusion cache"* ]]
[[ "$output" == *"Parallels cache"* ]]
}
@test "clean_email_clients calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_email_clients
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Spark cache"* ]]
[[ "$output" == *"Airmail cache"* ]]
}
@test "clean_note_apps calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_note_apps
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Notion cache"* ]]
[[ "$output" == *"Obsidian cache"* ]]
}
@test "clean_task_apps calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_task_apps
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Todoist cache"* ]]
[[ "$output" == *"Any.do cache"* ]]
}
@test "scan_external_volumes skips when no volumes" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
export DRY_RUN="false"
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
run_with_timeout() { return 1; }
# Mock missing dependencies and UI to ensure test passes regardless of volumes
clean_ds_store_tree() { :; }
start_section_spinner() { :; }
stop_section_spinner() { :; }
scan_external_volumes
EOF
[ "$status" -eq 0 ]
}
@test "clean_video_tools calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_video_tools
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"ScreenFlow cache"* ]]
[[ "$output" == *"Final Cut Pro cache"* ]]
}
@test "clean_video_players calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_video_players
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"IINA cache"* ]]
[[ "$output" == *"VLC cache"* ]]
}
@test "clean_3d_tools calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_3d_tools
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Blender cache"* ]]
[[ "$output" == *"Cinema 4D cache"* ]]
}
@test "clean_gaming_platforms calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_gaming_platforms
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Steam cache"* ]]
[[ "$output" == *"Epic Games cache"* ]]
}
@test "clean_translation_apps calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_translation_apps
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Youdao Dictionary cache"* ]]
[[ "$output" == *"Eudict cache"* ]]
}
@test "clean_launcher_apps calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_launcher_apps
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Alfred cache"* ]]
[[ "$output" == *"The Unarchiver cache"* ]]
}
@test "clean_remote_desktop calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_remote_desktop
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"TeamViewer cache"* ]]
[[ "$output" == *"AnyDesk cache"* ]]
}
@test "clean_system_utils calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_system_utils
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Input Source Pro cache"* ]]
[[ "$output" == *"WakaTime cache"* ]]
}
@test "clean_shell_utils calls expected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/app_caches.sh"
safe_clean() { echo "$2"; }
clean_shell_utils
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Zsh completion cache"* ]]
[[ "$output" == *"wget HSTS cache"* ]]
}

View File

@@ -1,179 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-clean-caches.XXXXXX")"
export HOME
mkdir -p "$HOME"
mkdir -p "$HOME/.cache/mole"
mkdir -p "$HOME/Library/Caches"
mkdir -p "$HOME/Library/Logs"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/caches.sh"
# Mock run_with_timeout to skip timeout overhead in tests
# shellcheck disable=SC2329
run_with_timeout() {
shift # Remove timeout argument
"$@"
}
export -f run_with_timeout
rm -f "$HOME/.cache/mole/permissions_granted"
}
@test "check_tcc_permissions skips in non-interactive mode" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/clean/caches.sh'; check_tcc_permissions" < /dev/null
[ "$status" -eq 0 ]
[[ ! -f "$HOME/.cache/mole/permissions_granted" ]]
}
@test "check_tcc_permissions skips when permissions already granted" {
mkdir -p "$HOME/.cache/mole"
touch "$HOME/.cache/mole/permissions_granted"
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/clean/caches.sh'; [[ -t 1 ]] || true; check_tcc_permissions"
[ "$status" -eq 0 ]
}
@test "check_tcc_permissions validates protected directories" {
[[ -d "$HOME/Library/Caches" ]]
[[ -d "$HOME/Library/Logs" ]]
[[ -d "$HOME/.cache/mole" ]]
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/clean/caches.sh'; check_tcc_permissions < /dev/null"
[ "$status" -eq 0 ]
}
@test "clean_service_worker_cache returns early when path doesn't exist" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/clean/caches.sh'; clean_service_worker_cache 'TestBrowser' '/nonexistent/path'"
[ "$status" -eq 0 ]
}
@test "clean_service_worker_cache handles empty cache directory" {
local test_cache="$HOME/test_sw_cache"
mkdir -p "$test_cache"
run bash -c "
run_with_timeout() { shift; \"\$@\"; }
export -f run_with_timeout
source '$PROJECT_ROOT/lib/core/common.sh'
source '$PROJECT_ROOT/lib/clean/caches.sh'
clean_service_worker_cache 'TestBrowser' '$test_cache'
"
[ "$status" -eq 0 ]
rm -rf "$test_cache"
}
@test "clean_service_worker_cache protects specified domains" {
local test_cache="$HOME/test_sw_cache"
mkdir -p "$test_cache/abc123_https_capcut.com_0"
mkdir -p "$test_cache/def456_https_example.com_0"
run bash -c "
run_with_timeout() {
local timeout=\"\$1\"
shift
if [[ \"\$1\" == \"get_path_size_kb\" ]]; then
echo 0
return 0
fi
if [[ \"\$1\" == \"sh\" ]]; then
printf '%s\n' \
'$test_cache/abc123_https_capcut.com_0' \
'$test_cache/def456_https_example.com_0'
return 0
fi
\"\$@\"
}
export -f run_with_timeout
export DRY_RUN=true
export PROTECTED_SW_DOMAINS=(capcut.com photopea.com)
source '$PROJECT_ROOT/lib/core/common.sh'
source '$PROJECT_ROOT/lib/clean/caches.sh'
clean_service_worker_cache 'TestBrowser' '$test_cache'
"
[ "$status" -eq 0 ]
[[ -d "$test_cache/abc123_https_capcut.com_0" ]]
rm -rf "$test_cache"
}
@test "clean_project_caches completes without errors" {
mkdir -p "$HOME/projects/test-app/.next/cache"
mkdir -p "$HOME/projects/python-app/__pycache__"
touch "$HOME/projects/test-app/.next/cache/test.cache"
touch "$HOME/projects/python-app/__pycache__/module.pyc"
run bash -c "
export DRY_RUN=true
source '$PROJECT_ROOT/lib/core/common.sh'
source '$PROJECT_ROOT/lib/clean/caches.sh'
clean_project_caches
"
[ "$status" -eq 0 ]
rm -rf "$HOME/projects"
}
@test "clean_project_caches handles timeout gracefully" {
if ! command -v gtimeout >/dev/null 2>&1 && ! command -v timeout >/dev/null 2>&1; then
skip "gtimeout/timeout not available"
fi
mkdir -p "$HOME/test-project/.next"
function find() {
sleep 2 # Simulate slow find
echo "$HOME/test-project/.next"
}
export -f find
timeout_cmd="timeout"
command -v timeout >/dev/null 2>&1 || timeout_cmd="gtimeout"
run $timeout_cmd 15 bash -c "
source '$PROJECT_ROOT/lib/core/common.sh'
source '$PROJECT_ROOT/lib/clean/caches.sh'
clean_project_caches
"
[ "$status" -eq 0 ] || [ "$status" -eq 124 ]
rm -rf "$HOME/test-project"
}
@test "clean_project_caches excludes Library and Trash directories" {
mkdir -p "$HOME/Library/.next/cache"
mkdir -p "$HOME/.Trash/.next/cache"
mkdir -p "$HOME/projects/.next/cache"
run bash -c "
export DRY_RUN=true
source '$PROJECT_ROOT/lib/core/common.sh'
source '$PROJECT_ROOT/lib/clean/caches.sh'
clean_project_caches
"
[ "$status" -eq 0 ]
rm -rf "$HOME/projects"
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,160 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-user-core.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
@test "clean_user_essentials respects Trash whitelist" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
start_section_spinner() { :; }
stop_section_spinner() { :; }
safe_clean() { echo "$2"; }
note_activity() { :; }
is_path_whitelisted() { [[ "$1" == "$HOME/.Trash" ]]; }
clean_user_essentials
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Trash"* ]]
[[ "$output" == *"whitelist"* ]]
}
@test "clean_macos_system_caches calls safe_clean for core paths" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
stop_section_spinner() { :; }
safe_clean() { echo "$2"; }
clean_macos_system_caches
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Saved application states"* ]]
[[ "$output" == *"QuickLook"* ]]
}
@test "clean_sandboxed_app_caches skips protected containers" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
start_section_spinner() { :; }
stop_section_spinner() { :; }
bytes_to_human() { echo "0B"; }
note_activity() { :; }
safe_clean() { :; }
should_protect_data() { return 0; }
is_critical_system_component() { return 0; }
files_cleaned=0
total_size_cleaned=0
total_items=0
mkdir -p "$HOME/Library/Containers/com.example.app/Data/Library/Caches"
process_container_cache "$HOME/Library/Containers/com.example.app"
clean_sandboxed_app_caches
EOF
[ "$status" -eq 0 ]
[[ "$output" != *"Sandboxed app caches"* ]]
}
@test "clean_finder_metadata respects protection flag" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" PROTECT_FINDER_METADATA=true /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
stop_section_spinner() { :; }
note_activity() { :; }
clean_finder_metadata
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Finder metadata"* ]]
[[ "$output" == *"protected"* ]]
}
@test "check_ios_device_backups returns when no backup dir" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
check_ios_device_backups
EOF
[ "$status" -eq 0 ]
}
@test "clean_empty_library_items only cleans empty dirs" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
safe_clean() { echo "$2"; }
mkdir -p "$HOME/Library/EmptyDir"
touch "$HOME/Library/empty.txt"
clean_empty_library_items
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Empty Library folders"* ]]
[[ "$output" != *"Empty Library files"* ]]
}
@test "clean_browsers calls expected cache paths" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
safe_clean() { echo "$2"; }
clean_browsers
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Safari cache"* ]]
[[ "$output" == *"Firefox cache"* ]]
}
@test "clean_application_support_logs skips when no access" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
note_activity() { :; }
clean_application_support_logs
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Skipped: No permission"* ]]
}
@test "clean_apple_silicon_caches exits when not M-series" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" IS_M_SERIES=false bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
safe_clean() { echo "$2"; }
clean_apple_silicon_caches
EOF
[ "$status" -eq 0 ]
[[ -z "$output" ]]
}

View File

@@ -1,178 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-cli-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
create_fake_utils() {
local dir="$1"
mkdir -p "$dir"
cat > "$dir/sudo" <<'SCRIPT'
#!/usr/bin/env bash
if [[ "$1" == "-n" || "$1" == "-v" ]]; then
exit 0
fi
exec "$@"
SCRIPT
chmod +x "$dir/sudo"
cat > "$dir/bioutil" <<'SCRIPT'
#!/usr/bin/env bash
if [[ "$1" == "-r" ]]; then
echo "Touch ID: 1"
exit 0
fi
exit 0
SCRIPT
chmod +x "$dir/bioutil"
}
setup() {
rm -rf "$HOME/.config"
mkdir -p "$HOME"
}
@test "mole --help prints command overview" {
run env HOME="$HOME" "$PROJECT_ROOT/mole" --help
[ "$status" -eq 0 ]
[[ "$output" == *"mo clean"* ]]
[[ "$output" == *"mo analyze"* ]]
}
@test "mole --version reports script version" {
expected_version="$(grep '^VERSION=' "$PROJECT_ROOT/mole" | head -1 | sed 's/VERSION=\"\(.*\)\"/\1/')"
run env HOME="$HOME" "$PROJECT_ROOT/mole" --version
[ "$status" -eq 0 ]
[[ "$output" == *"$expected_version"* ]]
}
@test "mole unknown command returns error" {
run env HOME="$HOME" "$PROJECT_ROOT/mole" unknown-command
[ "$status" -ne 0 ]
[[ "$output" == *"Unknown command: unknown-command"* ]]
}
@test "touchid status reports current configuration" {
run env HOME="$HOME" "$PROJECT_ROOT/mole" touchid status
[ "$status" -eq 0 ]
[[ "$output" == *"Touch ID"* ]]
}
@test "mo optimize command is recognized" {
run bash -c "grep -q '\"optimize\")' '$PROJECT_ROOT/mole'"
[ "$status" -eq 0 ]
}
@test "mo analyze binary is valid" {
if [[ -f "$PROJECT_ROOT/bin/analyze-go" ]]; then
[ -x "$PROJECT_ROOT/bin/analyze-go" ]
run file "$PROJECT_ROOT/bin/analyze-go"
[[ "$output" == *"Mach-O"* ]] || [[ "$output" == *"executable"* ]]
else
skip "analyze-go binary not built"
fi
}
@test "mo clean --debug creates debug log file" {
mkdir -p "$HOME/.config/mole"
run env HOME="$HOME" TERM="xterm-256color" MOLE_TEST_MODE=1 MO_DEBUG=1 "$PROJECT_ROOT/mole" clean --dry-run
[ "$status" -eq 0 ]
MOLE_OUTPUT="$output"
DEBUG_LOG="$HOME/.config/mole/mole_debug_session.log"
[ -f "$DEBUG_LOG" ]
run grep "Mole Debug Session" "$DEBUG_LOG"
[ "$status" -eq 0 ]
[[ "$MOLE_OUTPUT" =~ "Debug session log saved to" ]]
}
@test "mo clean without debug does not show debug log path" {
mkdir -p "$HOME/.config/mole"
run env HOME="$HOME" TERM="xterm-256color" MOLE_TEST_MODE=1 MO_DEBUG=0 "$PROJECT_ROOT/mole" clean --dry-run
[ "$status" -eq 0 ]
[[ "$output" != *"Debug session log saved to"* ]]
}
@test "mo clean --debug logs system info" {
mkdir -p "$HOME/.config/mole"
run env HOME="$HOME" TERM="xterm-256color" MOLE_TEST_MODE=1 MO_DEBUG=1 "$PROJECT_ROOT/mole" clean --dry-run
[ "$status" -eq 0 ]
DEBUG_LOG="$HOME/.config/mole/mole_debug_session.log"
run grep "User:" "$DEBUG_LOG"
[ "$status" -eq 0 ]
run grep "Architecture:" "$DEBUG_LOG"
[ "$status" -eq 0 ]
}
@test "touchid status reflects pam file contents" {
pam_file="$HOME/pam_test"
cat > "$pam_file" <<'EOF'
auth sufficient pam_opendirectory.so
EOF
run env MOLE_PAM_SUDO_FILE="$pam_file" "$PROJECT_ROOT/bin/touchid.sh" status
[ "$status" -eq 0 ]
[[ "$output" == *"not configured"* ]]
cat > "$pam_file" <<'EOF'
auth sufficient pam_tid.so
EOF
run env MOLE_PAM_SUDO_FILE="$pam_file" "$PROJECT_ROOT/bin/touchid.sh" status
[ "$status" -eq 0 ]
[[ "$output" == *"enabled"* ]]
}
@test "enable_touchid inserts pam_tid line in pam file" {
pam_file="$HOME/pam_enable"
cat > "$pam_file" <<'EOF'
auth sufficient pam_opendirectory.so
EOF
fake_bin="$HOME/fake-bin"
create_fake_utils "$fake_bin"
run env PATH="$fake_bin:$PATH" MOLE_PAM_SUDO_FILE="$pam_file" "$PROJECT_ROOT/bin/touchid.sh" enable
[ "$status" -eq 0 ]
grep -q "pam_tid.so" "$pam_file"
[[ -f "${pam_file}.mole-backup" ]]
}
@test "disable_touchid removes pam_tid line" {
pam_file="$HOME/pam_disable"
cat > "$pam_file" <<'EOF'
auth sufficient pam_tid.so
auth sufficient pam_opendirectory.so
EOF
fake_bin="$HOME/fake-bin-disable"
create_fake_utils "$fake_bin"
run env PATH="$fake_bin:$PATH" MOLE_PAM_SUDO_FILE="$pam_file" "$PROJECT_ROOT/bin/touchid.sh" disable
[ "$status" -eq 0 ]
run grep "pam_tid.so" "$pam_file"
[ "$status" -ne 0 ]
}

View File

@@ -1,160 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
ORIGINAL_PATH="${PATH:-}"
export ORIGINAL_PATH
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-completion-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
PATH="$PROJECT_ROOT:$PATH"
export PATH
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
if [[ -n "${ORIGINAL_PATH:-}" ]]; then
export PATH="$ORIGINAL_PATH"
fi
}
setup() {
rm -rf "$HOME/.config"
rm -rf "$HOME/.zshrc" "$HOME/.bashrc" "$HOME/.bash_profile"
mkdir -p "$HOME"
}
@test "completion script exists and is executable" {
[ -f "$PROJECT_ROOT/bin/completion.sh" ]
[ -x "$PROJECT_ROOT/bin/completion.sh" ]
}
@test "completion script has valid bash syntax" {
run bash -n "$PROJECT_ROOT/bin/completion.sh"
[ "$status" -eq 0 ]
}
@test "completion --help shows usage" {
run "$PROJECT_ROOT/bin/completion.sh" --help
[ "$status" -ne 0 ]
[[ "$output" == *"Usage: mole completion"* ]]
[[ "$output" == *"Auto-install"* ]]
}
@test "completion bash generates valid bash script" {
run "$PROJECT_ROOT/bin/completion.sh" bash
[ "$status" -eq 0 ]
[[ "$output" == *"_mole_completions"* ]]
[[ "$output" == *"complete -F _mole_completions mole mo"* ]]
}
@test "completion bash script includes all commands" {
run "$PROJECT_ROOT/bin/completion.sh" bash
[ "$status" -eq 0 ]
[[ "$output" == *"optimize"* ]]
[[ "$output" == *"clean"* ]]
[[ "$output" == *"uninstall"* ]]
[[ "$output" == *"analyze"* ]]
[[ "$output" == *"status"* ]]
[[ "$output" == *"purge"* ]]
[[ "$output" == *"touchid"* ]]
[[ "$output" == *"completion"* ]]
}
@test "completion bash script supports mo command" {
run "$PROJECT_ROOT/bin/completion.sh" bash
[ "$status" -eq 0 ]
[[ "$output" == *"complete -F _mole_completions mole mo"* ]]
}
@test "completion bash can be loaded in bash" {
run bash -c "eval \"\$(\"$PROJECT_ROOT/bin/completion.sh\" bash)\" && complete -p mole"
[ "$status" -eq 0 ]
[[ "$output" == *"_mole_completions"* ]]
}
@test "completion zsh generates valid zsh script" {
run "$PROJECT_ROOT/bin/completion.sh" zsh
[ "$status" -eq 0 ]
[[ "$output" == *"#compdef mole mo"* ]]
[[ "$output" == *"_mole()"* ]]
}
@test "completion zsh includes command descriptions" {
run "$PROJECT_ROOT/bin/completion.sh" zsh
[ "$status" -eq 0 ]
[[ "$output" == *"optimize:Check and maintain system"* ]]
[[ "$output" == *"clean:Free up disk space"* ]]
}
@test "completion fish generates valid fish script" {
run "$PROJECT_ROOT/bin/completion.sh" fish
[ "$status" -eq 0 ]
[[ "$output" == *"complete -c mole"* ]]
[[ "$output" == *"complete -c mo"* ]]
}
@test "completion fish includes both mole and mo commands" {
output="$("$PROJECT_ROOT/bin/completion.sh" fish)"
mole_count=$(echo "$output" | grep -c "complete -c mole")
mo_count=$(echo "$output" | grep -c "complete -c mo")
[ "$mole_count" -gt 0 ]
[ "$mo_count" -gt 0 ]
}
@test "completion auto-install detects zsh" {
# shellcheck disable=SC2030,SC2031
export SHELL=/bin/zsh
# Simulate auto-install (no interaction)
run bash -c "echo 'y' | \"$PROJECT_ROOT/bin/completion.sh\""
if [[ "$output" == *"Already configured"* ]]; then
skip "Already configured from previous test"
fi
[ -f "$HOME/.zshrc" ] || skip "Auto-install didn't create .zshrc"
run grep -E "mole[[:space:]]+completion" "$HOME/.zshrc"
[ "$status" -eq 0 ]
}
@test "completion auto-install detects already installed" {
# shellcheck disable=SC2031
export SHELL=/bin/zsh
mkdir -p "$HOME"
# shellcheck disable=SC2016
echo 'eval "$(mole completion zsh)"' > "$HOME/.zshrc"
run "$PROJECT_ROOT/bin/completion.sh"
[ "$status" -eq 0 ]
[[ "$output" == *"updated"* ]]
}
@test "completion script handles invalid shell argument" {
run "$PROJECT_ROOT/bin/completion.sh" invalid-shell
[ "$status" -ne 0 ]
}
@test "completion subcommand supports bash/zsh/fish" {
run "$PROJECT_ROOT/bin/completion.sh" bash
[ "$status" -eq 0 ]
run "$PROJECT_ROOT/bin/completion.sh" zsh
[ "$status" -eq 0 ]
run "$PROJECT_ROOT/bin/completion.sh" fish
[ "$status" -eq 0 ]
}

View File

@@ -1,207 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
rm -rf "$HOME/.config"
mkdir -p "$HOME"
}
@test "mo_spinner_chars returns default sequence" {
result="$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; mo_spinner_chars")"
[ "$result" = "|/-\\" ]
}
@test "detect_architecture maps current CPU to friendly label" {
expected="Intel"
if [[ "$(uname -m)" == "arm64" ]]; then
expected="Apple Silicon"
fi
result="$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; detect_architecture")"
[ "$result" = "$expected" ]
}
@test "get_free_space returns a non-empty value" {
result="$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; get_free_space")"
[[ -n "$result" ]]
}
@test "log_info prints message and appends to log file" {
local message="Informational message from test"
local stdout_output
stdout_output="$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; log_info '$message'")"
[[ "$stdout_output" == *"$message"* ]]
local log_file="$HOME/.config/mole/mole.log"
[[ -f "$log_file" ]]
grep -q "INFO: $message" "$log_file"
}
@test "log_error writes to stderr and log file" {
local message="Something went wrong"
local stderr_file="$HOME/log_error_stderr.txt"
HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; log_error '$message' 1>/dev/null 2>'$stderr_file'"
[[ -s "$stderr_file" ]]
grep -q "$message" "$stderr_file"
local log_file="$HOME/.config/mole/mole.log"
[[ -f "$log_file" ]]
grep -q "ERROR: $message" "$log_file"
}
@test "rotate_log_once only checks log size once per session" {
local log_file="$HOME/.config/mole/mole.log"
mkdir -p "$(dirname "$log_file")"
dd if=/dev/zero of="$log_file" bs=1024 count=1100 2> /dev/null
HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'"
[[ -f "${log_file}.old" ]]
result=$(HOME="$HOME" MOLE_LOG_ROTATED=1 bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; echo \$MOLE_LOG_ROTATED")
[[ "$result" == "1" ]]
}
@test "drain_pending_input clears stdin buffer" {
result=$(
(echo -e "test\ninput" | HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; drain_pending_input; echo done") &
pid=$!
sleep 2
if kill -0 "$pid" 2> /dev/null; then
kill "$pid" 2> /dev/null || true
wait "$pid" 2> /dev/null || true
echo "timeout"
else
wait "$pid" 2> /dev/null || true
fi
)
[[ "$result" == "done" ]]
}
@test "bytes_to_human converts byte counts into readable units" {
output="$(
HOME="$HOME" bash --noprofile --norc << 'EOF'
source "$PROJECT_ROOT/lib/core/common.sh"
bytes_to_human 512
bytes_to_human 2048
bytes_to_human $((5 * 1024 * 1024))
bytes_to_human $((3 * 1024 * 1024 * 1024))
EOF
)"
bytes_lines=()
while IFS= read -r line; do
bytes_lines+=("$line")
done <<< "$output"
[ "${bytes_lines[0]}" = "512B" ]
[ "${bytes_lines[1]}" = "2KB" ]
[ "${bytes_lines[2]}" = "5.0MB" ]
[ "${bytes_lines[3]}" = "3.00GB" ]
}
@test "create_temp_file and create_temp_dir are tracked and cleaned" {
HOME="$HOME" bash --noprofile --norc << 'EOF'
source "$PROJECT_ROOT/lib/core/common.sh"
create_temp_file > "$HOME/temp_file_path.txt"
create_temp_dir > "$HOME/temp_dir_path.txt"
cleanup_temp_files
EOF
file_path="$(cat "$HOME/temp_file_path.txt")"
dir_path="$(cat "$HOME/temp_dir_path.txt")"
[ ! -e "$file_path" ]
[ ! -e "$dir_path" ]
rm -f "$HOME/temp_file_path.txt" "$HOME/temp_dir_path.txt"
}
@test "should_protect_data protects system and critical apps" {
result=$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; should_protect_data 'com.apple.Safari' && echo 'protected' || echo 'not-protected'")
[ "$result" = "protected" ]
result=$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; should_protect_data 'com.clash.app' && echo 'protected' || echo 'not-protected'")
[ "$result" = "protected" ]
result=$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; should_protect_data 'com.example.RegularApp' && echo 'protected' || echo 'not-protected'")
[ "$result" = "not-protected" ]
}
@test "input methods are protected during cleanup but allowed for uninstall" {
result=$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; should_protect_data 'com.tencent.inputmethod.QQInput' && echo 'protected' || echo 'not-protected'")
[ "$result" = "protected" ]
result=$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; should_protect_data 'com.sogou.inputmethod.pinyin' && echo 'protected' || echo 'not-protected'")
[ "$result" = "protected" ]
result=$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; should_protect_from_uninstall 'com.tencent.inputmethod.QQInput' && echo 'protected' || echo 'not-protected'")
[ "$result" = "not-protected" ]
result=$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; should_protect_from_uninstall 'com.apple.inputmethod.SCIM' && echo 'protected' || echo 'not-protected'")
[ "$result" = "protected" ]
}
@test "print_summary_block formats output correctly" {
result=$(HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/core/common.sh'; print_summary_block 'success' 'Test Summary' 'Detail 1' 'Detail 2'")
[[ "$result" == *"Test Summary"* ]]
[[ "$result" == *"Detail 1"* ]]
[[ "$result" == *"Detail 2"* ]]
}
@test "start_inline_spinner and stop_inline_spinner work in non-TTY" {
result=$(HOME="$HOME" bash --noprofile --norc << 'EOF'
source "$PROJECT_ROOT/lib/core/common.sh"
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Testing..."
sleep 0.1
stop_inline_spinner
echo "done"
EOF
)
[[ "$result" == *"done"* ]]
}
@test "read_key maps j/k/h/l to navigation" {
run bash -c "export MOLE_BASE_LOADED=1; source '$PROJECT_ROOT/lib/core/ui.sh'; echo -n 'j' | read_key"
[ "$output" = "DOWN" ]
run bash -c "export MOLE_BASE_LOADED=1; source '$PROJECT_ROOT/lib/core/ui.sh'; echo -n 'k' | read_key"
[ "$output" = "UP" ]
run bash -c "export MOLE_BASE_LOADED=1; source '$PROJECT_ROOT/lib/core/ui.sh'; echo -n 'h' | read_key"
[ "$output" = "LEFT" ]
run bash -c "export MOLE_BASE_LOADED=1; source '$PROJECT_ROOT/lib/core/ui.sh'; echo -n 'l' | read_key"
[ "$output" = "RIGHT" ]
}
@test "read_key maps uppercase J/K/H/L to navigation" {
run bash -c "export MOLE_BASE_LOADED=1; source '$PROJECT_ROOT/lib/core/ui.sh'; echo -n 'J' | read_key"
[ "$output" = "DOWN" ]
run bash -c "export MOLE_BASE_LOADED=1; source '$PROJECT_ROOT/lib/core/ui.sh'; echo -n 'K' | read_key"
[ "$output" = "UP" ]
}
@test "read_key respects MOLE_READ_KEY_FORCE_CHAR" {
run bash -c "export MOLE_BASE_LOADED=1; export MOLE_READ_KEY_FORCE_CHAR=1; source '$PROJECT_ROOT/lib/core/ui.sh'; echo -n 'j' | read_key"
[ "$output" = "CHAR:j" ]
}

View File

@@ -1,236 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
TEST_DATA_DIR="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-perf.XXXXXX")"
export TEST_DATA_DIR
}
teardown_file() {
rm -rf "$TEST_DATA_DIR"
}
setup() {
source "$PROJECT_ROOT/lib/core/base.sh"
}
@test "bytes_to_human handles large values efficiently" {
local start end elapsed
local limit_ms="${MOLE_PERF_BYTES_TO_HUMAN_LIMIT_MS:-4000}"
bytes_to_human 1073741824 > /dev/null
start=$(date +%s%N)
for i in {1..1000}; do
bytes_to_human 1073741824 > /dev/null
done
end=$(date +%s%N)
elapsed=$(( (end - start) / 1000000 ))
[ "$elapsed" -lt "$limit_ms" ]
}
@test "bytes_to_human produces correct output for GB range" {
result=$(bytes_to_human 1073741824)
[ "$result" = "1.00GB" ]
result=$(bytes_to_human 5368709120)
[ "$result" = "5.00GB" ]
}
@test "bytes_to_human produces correct output for MB range" {
result=$(bytes_to_human 1048576)
[ "$result" = "1.0MB" ]
result=$(bytes_to_human 104857600)
[ "$result" = "100.0MB" ]
}
@test "bytes_to_human produces correct output for KB range" {
result=$(bytes_to_human 1024)
[ "$result" = "1KB" ]
result=$(bytes_to_human 10240)
[ "$result" = "10KB" ]
}
@test "bytes_to_human handles edge cases" {
result=$(bytes_to_human 0)
[ "$result" = "0B" ]
run bytes_to_human "invalid"
[ "$status" -eq 1 ]
[ "$output" = "0B" ]
run bytes_to_human "-100"
[ "$status" -eq 1 ]
[ "$output" = "0B" ]
}
@test "get_file_size is faster than multiple stat calls" {
local test_file="$TEST_DATA_DIR/size_test.txt"
dd if=/dev/zero of="$test_file" bs=1024 count=100 2> /dev/null
local start end elapsed
local limit_ms="${MOLE_PERF_GET_FILE_SIZE_LIMIT_MS:-2000}"
start=$(date +%s%N)
for i in {1..100}; do
get_file_size "$test_file" > /dev/null
done
end=$(date +%s%N)
elapsed=$(( (end - start) / 1000000 ))
[ "$elapsed" -lt "$limit_ms" ]
}
@test "get_file_mtime returns valid timestamp" {
local test_file="$TEST_DATA_DIR/mtime_test.txt"
touch "$test_file"
result=$(get_file_mtime "$test_file")
[[ "$result" =~ ^[0-9]{10,}$ ]]
}
@test "get_file_owner returns current user for owned files" {
local test_file="$TEST_DATA_DIR/owner_test.txt"
touch "$test_file"
result=$(get_file_owner "$test_file")
current_user=$(whoami)
[ "$result" = "$current_user" ]
}
@test "get_invoking_user executes quickly" {
local start end elapsed
start=$(date +%s%N)
for i in {1..100}; do
get_invoking_user > /dev/null
done
end=$(date +%s%N)
elapsed=$(( (end - start) / 1000000 ))
[ "$elapsed" -lt 200 ]
}
@test "get_darwin_major caches correctly" {
local first second
first=$(get_darwin_major)
second=$(get_darwin_major)
[ "$first" = "$second" ]
[[ "$first" =~ ^[0-9]+$ ]]
}
@test "create_temp_file and cleanup_temp_files work efficiently" {
local start end elapsed
declare -a MOLE_TEMP_DIRS=()
start=$(date +%s%N)
for i in {1..50}; do
create_temp_file > /dev/null
done
end=$(date +%s%N)
elapsed=$(( (end - start) / 1000000 ))
[ "$elapsed" -lt 1000 ]
[ "${#MOLE_TEMP_FILES[@]}" -eq 50 ]
start=$(date +%s%N)
cleanup_temp_files
end=$(date +%s%N)
elapsed=$(( (end - start) / 1000000 ))
[ "$elapsed" -lt 2000 ]
[ "${#MOLE_TEMP_FILES[@]}" -eq 0 ]
}
@test "mktemp_file creates files with correct prefix" {
local temp_file
temp_file=$(mktemp_file "test_prefix")
[[ "$temp_file" =~ test_prefix ]]
[ -f "$temp_file" ]
rm -f "$temp_file"
}
@test "get_brand_name handles common apps efficiently" {
local start end elapsed
get_brand_name "wechat" > /dev/null
start=$(date +%s%N)
for i in {1..50}; do
get_brand_name "wechat" > /dev/null
get_brand_name "QQ" > /dev/null
get_brand_name "dingtalk" > /dev/null
done
end=$(date +%s%N)
elapsed=$(( (end - start) / 1000000 ))
[ "$elapsed" -lt 5000 ]
}
@test "get_brand_name returns correct localized names" {
local result
result=$(get_brand_name "wechat")
[[ "$result" == "WeChat" || "$result" == "微信" ]]
}
@test "get_optimal_parallel_jobs returns sensible values" {
local result
result=$(get_optimal_parallel_jobs)
[[ "$result" =~ ^[0-9]+$ ]]
[ "$result" -gt 0 ]
[ "$result" -le 128 ]
local scan_jobs
scan_jobs=$(get_optimal_parallel_jobs "scan")
[ "$scan_jobs" -gt "$result" ]
local compute_jobs
compute_jobs=$(get_optimal_parallel_jobs "compute")
[ "$compute_jobs" -le "$scan_jobs" ]
}
@test "section tracking has minimal overhead" {
local start end elapsed
if ! declare -f note_activity > /dev/null 2>&1; then
TRACK_SECTION=0
SECTION_ACTIVITY=0
note_activity() {
if [[ $TRACK_SECTION -eq 1 ]]; then
SECTION_ACTIVITY=1
fi
}
fi
note_activity
start=$(date +%s%N)
for i in {1..1000}; do
note_activity
done
end=$(date +%s%N)
elapsed=$(( (end - start) / 1000000 ))
[ "$elapsed" -lt 2000 ]
}

View File

@@ -1,168 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-safe-functions.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
source "$PROJECT_ROOT/lib/core/common.sh"
TEST_DIR="$HOME/test_safe_functions"
mkdir -p "$TEST_DIR"
}
teardown() {
rm -rf "$TEST_DIR"
}
@test "validate_path_for_deletion rejects empty path" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion ''"
[ "$status" -eq 1 ]
}
@test "validate_path_for_deletion rejects relative path" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion 'relative/path'"
[ "$status" -eq 1 ]
}
@test "validate_path_for_deletion rejects path traversal" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '/tmp/../etc'"
[ "$status" -eq 1 ]
# Test other path traversal patterns
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '/var/log/../../etc'"
[ "$status" -eq 1 ]
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '$TEST_DIR/..'"
[ "$status" -eq 1 ]
}
@test "validate_path_for_deletion accepts Firefox-style ..files directories" {
# Firefox uses ..files suffix in IndexedDB directory names
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '$TEST_DIR/2753419432nreetyfallipx..files'"
[ "$status" -eq 0 ]
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '$TEST_DIR/storage/default/https+++www.netflix.com/idb/name..files/data'"
[ "$status" -eq 0 ]
# Directories with .. in the middle of names should be allowed
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '$TEST_DIR/test..backup/file.txt'"
[ "$status" -eq 0 ]
}
@test "validate_path_for_deletion rejects system directories" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '/System'"
[ "$status" -eq 1 ]
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '/usr/bin'"
[ "$status" -eq 1 ]
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '/etc'"
[ "$status" -eq 1 ]
}
@test "validate_path_for_deletion accepts valid path" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '$TEST_DIR/valid'"
[ "$status" -eq 0 ]
}
@test "safe_remove validates path before deletion" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; safe_remove '/System/test' 2>&1"
[ "$status" -eq 1 ]
}
@test "safe_remove successfully removes file" {
local test_file="$TEST_DIR/test_file.txt"
echo "test" > "$test_file"
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; safe_remove '$test_file' true"
[ "$status" -eq 0 ]
[ ! -f "$test_file" ]
}
@test "safe_remove successfully removes directory" {
local test_subdir="$TEST_DIR/test_subdir"
mkdir -p "$test_subdir"
touch "$test_subdir/file.txt"
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; safe_remove '$test_subdir' true"
[ "$status" -eq 0 ]
[ ! -d "$test_subdir" ]
}
@test "safe_remove handles non-existent path gracefully" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; safe_remove '$TEST_DIR/nonexistent' true"
[ "$status" -eq 0 ]
}
@test "safe_remove in silent mode suppresses error output" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; safe_remove '/System/test' true 2>&1"
[ "$status" -eq 1 ]
}
@test "safe_find_delete validates base directory" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; safe_find_delete '/nonexistent' '*.tmp' 7 'f' 2>&1"
[ "$status" -eq 1 ]
}
@test "safe_find_delete rejects symlinked directory" {
local real_dir="$TEST_DIR/real"
local link_dir="$TEST_DIR/link"
mkdir -p "$real_dir"
ln -s "$real_dir" "$link_dir"
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; safe_find_delete '$link_dir' '*.tmp' 7 'f' 2>&1"
[ "$status" -eq 1 ]
[[ "$output" == *"symlink"* ]]
rm -rf "$link_dir" "$real_dir"
}
@test "safe_find_delete validates type filter" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; safe_find_delete '$TEST_DIR' '*.tmp' 7 'x' 2>&1"
[ "$status" -eq 1 ]
[[ "$output" == *"Invalid type filter"* ]]
}
@test "safe_find_delete deletes old files" {
local old_file="$TEST_DIR/old.tmp"
local new_file="$TEST_DIR/new.tmp"
touch "$old_file"
touch "$new_file"
touch -t "$(date -v-8d '+%Y%m%d%H%M.%S' 2>/dev/null || date -d '8 days ago' '+%Y%m%d%H%M.%S')" "$old_file" 2>/dev/null || true
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; safe_find_delete '$TEST_DIR' '*.tmp' 7 'f'"
[ "$status" -eq 0 ]
}
@test "MOLE_* constants are defined" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; echo \$MOLE_TEMP_FILE_AGE_DAYS"
[ "$status" -eq 0 ]
[ "$output" = "7" ]
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; echo \$MOLE_MAX_PARALLEL_JOBS"
[ "$status" -eq 0 ]
[ "$output" = "15" ]
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; echo \$MOLE_TM_BACKUP_SAFE_HOURS"
[ "$status" -eq 0 ]
[ "$output" = "48" ]
}

View File

@@ -1,188 +0,0 @@
#!/usr/bin/env bats
setup() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
export MO_DEBUG=0
}
@test "run_with_timeout: command completes before timeout" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 5 echo 'success'
")
[[ "$result" == "success" ]]
}
@test "run_with_timeout: zero timeout runs command normally" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 0 echo 'no_timeout'
")
[[ "$result" == "no_timeout" ]]
}
@test "run_with_timeout: invalid timeout runs command normally" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout invalid echo 'no_timeout'
")
[[ "$result" == "no_timeout" ]]
}
@test "run_with_timeout: negative timeout runs command normally" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout -5 echo 'no_timeout'
")
[[ "$result" == "no_timeout" ]]
}
@test "run_with_timeout: preserves command exit code on success" {
bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 5 true
"
exit_code=$?
[[ $exit_code -eq 0 ]]
}
@test "run_with_timeout: preserves command exit code on failure" {
set +e
bash -c "
set +e
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 5 false
exit \$?
"
exit_code=$?
set -e
[[ $exit_code -eq 1 ]]
}
@test "run_with_timeout: returns 124 on timeout (if using gtimeout)" {
if ! command -v gtimeout >/dev/null 2>&1 && ! command -v timeout >/dev/null 2>&1; then
skip "gtimeout/timeout not available"
fi
set +e
bash -c "
set +e
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 1 sleep 10
exit \$?
"
exit_code=$?
set -e
[[ $exit_code -eq 124 ]]
}
@test "run_with_timeout: kills long-running command" {
start_time=$(date +%s)
set +e
bash -c "
set +e
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 2 sleep 30
" >/dev/null 2>&1
set -e
end_time=$(date +%s)
duration=$((end_time - start_time))
[[ $duration -lt 10 ]]
}
@test "run_with_timeout: handles fast-completing commands" {
start_time=$(date +%s)
bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 10 echo 'fast'
" >/dev/null 2>&1
end_time=$(date +%s)
duration=$((end_time - start_time))
[[ $duration -lt 3 ]]
}
@test "run_with_timeout: works in pipefail mode" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 5 echo 'pipefail_test'
")
[[ "$result" == "pipefail_test" ]]
}
@test "run_with_timeout: doesn't cause unintended exits" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 5 true || true
echo 'survived'
")
[[ "$result" == "survived" ]]
}
@test "run_with_timeout: handles commands with arguments" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 5 echo 'arg1' 'arg2' 'arg3'
")
[[ "$result" == "arg1 arg2 arg3" ]]
}
@test "run_with_timeout: handles commands with spaces in arguments" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 5 echo 'hello world'
")
[[ "$result" == "hello world" ]]
}
@test "run_with_timeout: debug logging when MO_DEBUG=1" {
output=$(bash -c "
set -euo pipefail
export MO_DEBUG=1
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 5 echo 'test' 2>&1
")
[[ "$output" =~ TIMEOUT ]]
}
@test "run_with_timeout: no debug logging when MO_DEBUG=0" {
output=$(bash -c "
set -euo pipefail
export MO_DEBUG=0
unset MO_TIMEOUT_INITIALIZED
source '$PROJECT_ROOT/lib/core/timeout.sh'
run_with_timeout 5 echo 'test'
" 2>/dev/null)
[[ "$output" == "test" ]]
}
@test "timeout.sh: prevents multiple sourcing" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
source '$PROJECT_ROOT/lib/core/timeout.sh'
echo 'loaded'
")
[[ "$result" == "loaded" ]]
}
@test "timeout.sh: sets MOLE_TIMEOUT_LOADED flag" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/timeout.sh'
echo \"\$MOLE_TIMEOUT_LOADED\"
")
[[ "$result" == "1" ]]
}

View File

@@ -1,239 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-installers-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
export TERM="xterm-256color"
export MO_DEBUG=0
# Create standard scan directories
mkdir -p "$HOME/Downloads"
mkdir -p "$HOME/Desktop"
mkdir -p "$HOME/Documents"
mkdir -p "$HOME/Public"
mkdir -p "$HOME/Library/Downloads"
# Clear previous test files
rm -rf "${HOME:?}/Downloads"/*
rm -rf "${HOME:?}/Desktop"/*
rm -rf "${HOME:?}/Documents"/*
}
# Test arguments
@test "installer.sh rejects unknown options" {
run "$PROJECT_ROOT/bin/installer.sh" --unknown-option
[ "$status" -eq 1 ]
[[ "$output" == *"Unknown option"* ]]
}
# Test scan_installers_in_path function directly
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Tests using find (forced fallback by hiding fd)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@test "scan_installers_in_path (fallback find): finds .dmg files" {
touch "$HOME/Downloads/Chrome.dmg"
run env PATH="/usr/bin:/bin" bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"Chrome.dmg"* ]]
}
@test "scan_installers_in_path (fallback find): finds multiple installer types" {
touch "$HOME/Downloads/App1.dmg"
touch "$HOME/Downloads/App2.pkg"
touch "$HOME/Downloads/App3.iso"
touch "$HOME/Downloads/App.mpkg"
run env PATH="/usr/bin:/bin" bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"App1.dmg"* ]]
[[ "$output" == *"App2.pkg"* ]]
[[ "$output" == *"App3.iso"* ]]
[[ "$output" == *"App.mpkg"* ]]
}
@test "scan_installers_in_path (fallback find): respects max depth" {
mkdir -p "$HOME/Downloads/level1/level2/level3"
touch "$HOME/Downloads/shallow.dmg"
touch "$HOME/Downloads/level1/mid.dmg"
touch "$HOME/Downloads/level1/level2/deep.dmg"
touch "$HOME/Downloads/level1/level2/level3/too-deep.dmg"
run env PATH="/usr/bin:/bin" bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
# Default max depth is 2
[[ "$output" == *"shallow.dmg"* ]]
[[ "$output" == *"mid.dmg"* ]]
[[ "$output" == *"deep.dmg"* ]]
[[ "$output" != *"too-deep.dmg"* ]]
}
@test "scan_installers_in_path (fallback find): honors MOLE_INSTALLER_SCAN_MAX_DEPTH" {
mkdir -p "$HOME/Downloads/level1"
touch "$HOME/Downloads/top.dmg"
touch "$HOME/Downloads/level1/nested.dmg"
run env PATH="/usr/bin:/bin" MOLE_INSTALLER_SCAN_MAX_DEPTH=1 bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"top.dmg"* ]]
[[ "$output" != *"nested.dmg"* ]]
}
@test "scan_installers_in_path (fallback find): handles non-existent directory" {
run env PATH="/usr/bin:/bin" bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/NonExistent"
[ "$status" -eq 0 ]
[[ -z "$output" ]]
}
@test "scan_installers_in_path (fallback find): ignores non-installer files" {
touch "$HOME/Downloads/document.pdf"
touch "$HOME/Downloads/image.jpg"
touch "$HOME/Downloads/archive.tar.gz"
touch "$HOME/Downloads/Installer.dmg"
run env PATH="/usr/bin:/bin" bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" != *"document.pdf"* ]]
[[ "$output" != *"image.jpg"* ]]
[[ "$output" != *"archive.tar.gz"* ]]
[[ "$output" == *"Installer.dmg"* ]]
}
@test "scan_all_installers: handles missing paths gracefully" {
# Don't create all scan directories, some may not exist
# Only create Downloads, delete others if they exist
rm -rf "$HOME/Desktop"
rm -rf "$HOME/Documents"
rm -rf "$HOME/Public"
rm -rf "$HOME/Public/Downloads"
rm -rf "$HOME/Library/Downloads"
mkdir -p "$HOME/Downloads"
# Add an installer to the one directory that exists
touch "$HOME/Downloads/test.dmg"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_all_installers
' bash "$PROJECT_ROOT/bin/installer.sh"
# Should succeed even with missing paths
[ "$status" -eq 0 ]
# Should still find the installer in the existing directory
[[ "$output" == *"test.dmg"* ]]
}
# Test edge cases
@test "scan_installers_in_path (fallback find): handles filenames with spaces" {
touch "$HOME/Downloads/My App Installer.dmg"
run env PATH="/usr/bin:/bin" bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"My App Installer.dmg"* ]]
}
@test "scan_installers_in_path (fallback find): handles filenames with special characters" {
touch "$HOME/Downloads/App-v1.2.3_beta.pkg"
run env PATH="/usr/bin:/bin" bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"App-v1.2.3_beta.pkg"* ]]
}
@test "scan_installers_in_path (fallback find): returns empty for directory with no installers" {
# Create some non-installer files
touch "$HOME/Downloads/document.pdf"
touch "$HOME/Downloads/image.png"
run env PATH="/usr/bin:/bin" bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ -z "$output" ]]
}
# Symlink handling tests
@test "scan_installers_in_path (fallback find): skips symlinks to regular files" {
touch "$HOME/Downloads/real.dmg"
ln -s "$HOME/Downloads/real.dmg" "$HOME/Downloads/symlink.dmg"
ln -s /nonexistent "$HOME/Downloads/dangling.lnk"
run env PATH="/usr/bin:/bin" bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"real.dmg"* ]]
[[ "$output" != *"symlink.dmg"* ]]
[[ "$output" != *"dangling.lnk"* ]]
}

View File

@@ -1,245 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-installers-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
if command -v fd > /dev/null 2>&1; then
FD_AVAILABLE=1
else
FD_AVAILABLE=0
fi
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
export TERM="xterm-256color"
export MO_DEBUG=0
# Create standard scan directories
mkdir -p "$HOME/Downloads"
mkdir -p "$HOME/Desktop"
mkdir -p "$HOME/Documents"
mkdir -p "$HOME/Public"
mkdir -p "$HOME/Library/Downloads"
# Clear previous test files
rm -rf "${HOME:?}/Downloads"/*
rm -rf "${HOME:?}/Desktop"/*
rm -rf "${HOME:?}/Documents"/*
}
require_fd() {
[[ "${FD_AVAILABLE:-0}" -eq 1 ]]
}
@test "scan_installers_in_path (fd): finds .dmg files" {
if ! require_fd; then
return 0
fi
touch "$HOME/Downloads/Chrome.dmg"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"Chrome.dmg"* ]]
}
@test "scan_installers_in_path (fd): finds multiple installer types" {
if ! require_fd; then
return 0
fi
touch "$HOME/Downloads/App1.dmg"
touch "$HOME/Downloads/App2.pkg"
touch "$HOME/Downloads/App3.iso"
touch "$HOME/Downloads/App.mpkg"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"App1.dmg"* ]]
[[ "$output" == *"App2.pkg"* ]]
[[ "$output" == *"App3.iso"* ]]
[[ "$output" == *"App.mpkg"* ]]
}
@test "scan_installers_in_path (fd): respects max depth" {
if ! require_fd; then
return 0
fi
mkdir -p "$HOME/Downloads/level1/level2/level3"
touch "$HOME/Downloads/shallow.dmg"
touch "$HOME/Downloads/level1/mid.dmg"
touch "$HOME/Downloads/level1/level2/deep.dmg"
touch "$HOME/Downloads/level1/level2/level3/too-deep.dmg"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
# Default max depth is 2
[[ "$output" == *"shallow.dmg"* ]]
[[ "$output" == *"mid.dmg"* ]]
[[ "$output" == *"deep.dmg"* ]]
[[ "$output" != *"too-deep.dmg"* ]]
}
@test "scan_installers_in_path (fd): honors MOLE_INSTALLER_SCAN_MAX_DEPTH" {
if ! require_fd; then
return 0
fi
mkdir -p "$HOME/Downloads/level1"
touch "$HOME/Downloads/top.dmg"
touch "$HOME/Downloads/level1/nested.dmg"
run env MOLE_INSTALLER_SCAN_MAX_DEPTH=1 bash -euo pipefail -c "
export MOLE_TEST_MODE=1
source \"\$1\"
scan_installers_in_path \"\$2\"
" bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"top.dmg"* ]]
[[ "$output" != *"nested.dmg"* ]]
}
@test "scan_installers_in_path (fd): handles non-existent directory" {
if ! require_fd; then
return 0
fi
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/NonExistent"
[ "$status" -eq 0 ]
[[ -z "$output" ]]
}
@test "scan_installers_in_path (fd): ignores non-installer files" {
if ! require_fd; then
return 0
fi
touch "$HOME/Downloads/document.pdf"
touch "$HOME/Downloads/image.jpg"
touch "$HOME/Downloads/archive.tar.gz"
touch "$HOME/Downloads/Installer.dmg"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" != *"document.pdf"* ]]
[[ "$output" != *"image.jpg"* ]]
[[ "$output" != *"archive.tar.gz"* ]]
[[ "$output" == *"Installer.dmg"* ]]
}
@test "scan_installers_in_path (fd): handles filenames with spaces" {
if ! require_fd; then
return 0
fi
touch "$HOME/Downloads/My App Installer.dmg"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"My App Installer.dmg"* ]]
}
@test "scan_installers_in_path (fd): handles filenames with special characters" {
if ! require_fd; then
return 0
fi
touch "$HOME/Downloads/App-v1.2.3_beta.pkg"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"App-v1.2.3_beta.pkg"* ]]
}
@test "scan_installers_in_path (fd): returns empty for directory with no installers" {
if ! require_fd; then
return 0
fi
# Create some non-installer files
touch "$HOME/Downloads/document.pdf"
touch "$HOME/Downloads/image.png"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ -z "$output" ]]
}
@test "scan_installers_in_path (fd): skips symlinks to regular files" {
if ! require_fd; then
return 0
fi
touch "$HOME/Downloads/real.dmg"
ln -s "$HOME/Downloads/real.dmg" "$HOME/Downloads/symlink.dmg"
ln -s /nonexistent "$HOME/Downloads/dangling.lnk"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
[ "$status" -eq 0 ]
[[ "$output" == *"real.dmg"* ]]
[[ "$output" != *"symlink.dmg"* ]]
[[ "$output" != *"dangling.lnk"* ]]
}

View File

@@ -1,377 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-installers-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
if command -v zip > /dev/null 2>&1; then
ZIP_AVAILABLE=1
else
ZIP_AVAILABLE=0
fi
if command -v zipinfo > /dev/null 2>&1 || command -v unzip > /dev/null 2>&1; then
ZIP_LIST_AVAILABLE=1
else
ZIP_LIST_AVAILABLE=0
fi
if command -v unzip > /dev/null 2>&1; then
UNZIP_AVAILABLE=1
else
UNZIP_AVAILABLE=0
fi
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
export TERM="xterm-256color"
export MO_DEBUG=0
# Create standard scan directories
mkdir -p "$HOME/Downloads"
mkdir -p "$HOME/Desktop"
mkdir -p "$HOME/Documents"
mkdir -p "$HOME/Public"
mkdir -p "$HOME/Library/Downloads"
# Clear previous test files
rm -rf "${HOME:?}/Downloads"/*
rm -rf "${HOME:?}/Desktop"/*
rm -rf "${HOME:?}/Documents"/*
}
zip_list_available() {
[[ "${ZIP_LIST_AVAILABLE:-0}" -eq 1 ]]
}
require_zip_list() {
zip_list_available
}
require_zip_support() {
[[ "${ZIP_AVAILABLE:-0}" -eq 1 && "${ZIP_LIST_AVAILABLE:-0}" -eq 1 ]]
}
require_unzip_support() {
[[ "${ZIP_AVAILABLE:-0}" -eq 1 && "${UNZIP_AVAILABLE:-0}" -eq 1 ]]
}
# Test ZIP installer detection
@test "is_installer_zip: detects ZIP with installer content even with many entries" {
if ! require_zip_support; then
return 0
fi
# Create a ZIP with many files (more than old MAX_ZIP_ENTRIES=5)
# Include a .app file to have installer content
mkdir -p "$HOME/Downloads/large-app"
touch "$HOME/Downloads/large-app/MyApp.app"
for i in {1..9}; do
touch "$HOME/Downloads/large-app/file$i.txt"
done
(cd "$HOME/Downloads" && zip -q -r large-installer.zip large-app)
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
if is_installer_zip "'"$HOME/Downloads/large-installer.zip"'"; then
echo "INSTALLER"
else
echo "NOT_INSTALLER"
fi
' bash "$PROJECT_ROOT/bin/installer.sh"
[ "$status" -eq 0 ]
[[ "$output" == "INSTALLER" ]]
}
@test "is_installer_zip: detects ZIP with app content" {
if ! require_zip_support; then
return 0
fi
mkdir -p "$HOME/Downloads/app-content"
touch "$HOME/Downloads/app-content/MyApp.app"
(cd "$HOME/Downloads" && zip -q -r app.zip app-content)
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
if is_installer_zip "'"$HOME/Downloads/app.zip"'"; then
echo "INSTALLER"
else
echo "NOT_INSTALLER"
fi
' bash "$PROJECT_ROOT/bin/installer.sh"
[ "$status" -eq 0 ]
[[ "$output" == "INSTALLER" ]]
}
@test "is_installer_zip: rejects ZIP when installer pattern appears after MAX_ZIP_ENTRIES" {
if ! require_zip_support; then
return 0
fi
# Create a ZIP where .app appears after the 50th entry
mkdir -p "$HOME/Downloads/deep-content"
# Create 51 regular files first
for i in {1..51}; do
touch "$HOME/Downloads/deep-content/file$i.txt"
done
# Add .app file at the end (52nd entry)
touch "$HOME/Downloads/deep-content/MyApp.app"
(cd "$HOME/Downloads" && zip -q -r deep.zip deep-content)
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
if is_installer_zip "'"$HOME/Downloads/deep.zip"'"; then
echo "INSTALLER"
else
echo "NOT_INSTALLER"
fi
' bash "$PROJECT_ROOT/bin/installer.sh"
[ "$status" -eq 0 ]
[[ "$output" == "NOT_INSTALLER" ]]
}
@test "is_installer_zip: detects ZIP with real app bundle structure" {
if ! require_zip_support; then
return 0
fi
# Create a realistic .app bundle structure (directory, not just a file)
mkdir -p "$HOME/Downloads/RealApp.app/Contents/MacOS"
mkdir -p "$HOME/Downloads/RealApp.app/Contents/Resources"
echo "#!/bin/bash" > "$HOME/Downloads/RealApp.app/Contents/MacOS/RealApp"
chmod +x "$HOME/Downloads/RealApp.app/Contents/MacOS/RealApp"
cat > "$HOME/Downloads/RealApp.app/Contents/Info.plist" << 'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleExecutable</key>
<string>RealApp</string>
</dict>
</plist>
EOF
(cd "$HOME/Downloads" && zip -q -r realapp.zip RealApp.app)
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
if is_installer_zip "'"$HOME/Downloads/realapp.zip"'"; then
echo "INSTALLER"
else
echo "NOT_INSTALLER"
fi
' bash "$PROJECT_ROOT/bin/installer.sh"
[ "$status" -eq 0 ]
[[ "$output" == "INSTALLER" ]]
}
@test "is_installer_zip: rejects ZIP with only regular files" {
if ! require_zip_support; then
return 0
fi
mkdir -p "$HOME/Downloads/data"
touch "$HOME/Downloads/data/file1.txt"
touch "$HOME/Downloads/data/file2.pdf"
(cd "$HOME/Downloads" && zip -q -r data.zip data)
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
if is_installer_zip "'"$HOME/Downloads/data.zip"'"; then
echo "INSTALLER"
else
echo "NOT_INSTALLER"
fi
' bash "$PROJECT_ROOT/bin/installer.sh"
[ "$status" -eq 0 ]
[[ "$output" == "NOT_INSTALLER" ]]
}
@test "is_installer_zip: returns NOT_INSTALLER when ZIP list command is unavailable" {
touch "$HOME/Downloads/empty.zip"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
ZIP_LIST_CMD=()
if is_installer_zip "$2"; then
echo "INSTALLER"
else
echo "NOT_INSTALLER"
fi
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads/empty.zip"
[ "$status" -eq 0 ]
[[ "$output" == "NOT_INSTALLER" ]]
}
@test "is_installer_zip: works with unzip list command" {
if ! require_unzip_support; then
return 0
fi
mkdir -p "$HOME/Downloads/app-content"
touch "$HOME/Downloads/app-content/MyApp.app"
(cd "$HOME/Downloads" && zip -q -r app.zip app-content)
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
ZIP_LIST_CMD=(unzip -Z -1)
if is_installer_zip "$2"; then
echo "INSTALLER"
else
echo "NOT_INSTALLER"
fi
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads/app.zip"
[ "$status" -eq 0 ]
[[ "$output" == "INSTALLER" ]]
}
# Integration tests: ZIP scanning inside scan_all_installers
@test "scan_all_installers: finds installer ZIP in Downloads" {
if ! require_zip_support; then
return 0
fi
# Create a valid installer ZIP (contains .app)
mkdir -p "$HOME/Downloads/app-content"
touch "$HOME/Downloads/app-content/MyApp.app"
(cd "$HOME/Downloads" && zip -q -r installer.zip app-content)
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_all_installers
' bash "$PROJECT_ROOT/bin/installer.sh"
[ "$status" -eq 0 ]
[[ "$output" == *"installer.zip"* ]]
}
@test "scan_all_installers: ignores non-installer ZIP in Downloads" {
if ! require_zip_support; then
return 0
fi
# Create a non-installer ZIP (only regular files)
mkdir -p "$HOME/Downloads/data"
touch "$HOME/Downloads/data/file1.txt"
touch "$HOME/Downloads/data/file2.pdf"
(cd "$HOME/Downloads" && zip -q -r data.zip data)
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_all_installers
' bash "$PROJECT_ROOT/bin/installer.sh"
[ "$status" -eq 0 ]
[[ "$output" != *"data.zip"* ]]
}
# Failure path tests for scan_installers_in_path
@test "scan_installers_in_path: skips corrupt ZIP files" {
if ! require_zip_list; then
return 0
fi
# Create a corrupt ZIP file by just writing garbage data
echo "This is not a valid ZIP file" > "$HOME/Downloads/corrupt.zip"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
# Should succeed (return 0) and silently skip the corrupt ZIP
[ "$status" -eq 0 ]
# Output should be empty since corrupt.zip is not a valid installer
[[ -z "$output" ]]
}
@test "scan_installers_in_path: handles permission-denied files" {
if ! require_zip_support; then
return 0
fi
# Create a valid installer ZIP
mkdir -p "$HOME/Downloads/app-content"
touch "$HOME/Downloads/app-content/MyApp.app"
(cd "$HOME/Downloads" && zip -q -r readable.zip app-content)
# Create a readable installer ZIP alongside a permission-denied file
mkdir -p "$HOME/Downloads/restricted-app"
touch "$HOME/Downloads/restricted-app/App.app"
(cd "$HOME/Downloads" && zip -q -r restricted.zip restricted-app)
# Remove read permissions from restricted.zip
chmod 000 "$HOME/Downloads/restricted.zip"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
# Should succeed and find the readable.zip but skip restricted.zip
[ "$status" -eq 0 ]
[[ "$output" == *"readable.zip"* ]]
[[ "$output" != *"restricted.zip"* ]]
# Cleanup: restore permissions for teardown
chmod 644 "$HOME/Downloads/restricted.zip"
}
@test "scan_installers_in_path: finds installer ZIP alongside corrupt ZIPs" {
if ! require_zip_support; then
return 0
fi
# Create a valid installer ZIP
mkdir -p "$HOME/Downloads/app-content"
touch "$HOME/Downloads/app-content/MyApp.app"
(cd "$HOME/Downloads" && zip -q -r valid-installer.zip app-content)
# Create a corrupt ZIP
echo "garbage data" > "$HOME/Downloads/corrupt.zip"
run bash -euo pipefail -c '
export MOLE_TEST_MODE=1
source "$1"
scan_installers_in_path "$2"
' bash "$PROJECT_ROOT/bin/installer.sh" "$HOME/Downloads"
# Should find the valid ZIP and silently skip the corrupt one
[ "$status" -eq 0 ]
[[ "$output" == *"valid-installer.zip"* ]]
[[ "$output" != *"corrupt.zip"* ]]
}

View File

@@ -1,99 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
}
@test "show_suggestions lists auto and manual items and exports flag" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/base.sh"
source "$PROJECT_ROOT/lib/manage/autofix.sh"
export FIREWALL_DISABLED=true
export FILEVAULT_DISABLED=true
export TOUCHID_NOT_CONFIGURED=true
export ROSETTA_NOT_INSTALLED=true
export CACHE_SIZE_GB=9
export BREW_HAS_WARNINGS=true
export DISK_FREE_GB=25
show_suggestions
echo "AUTO_FLAG=${HAS_AUTO_FIX_SUGGESTIONS}"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Enable Firewall for better security"* ]]
[[ "$output" == *"Enable FileVault"* ]]
[[ "$output" == *"Enable Touch ID for sudo"* ]]
[[ "$output" == *"Install Rosetta 2"* ]]
[[ "$output" == *"Low disk space (25GB free)"* ]]
[[ "$output" == *"AUTO_FLAG=true"* ]]
}
@test "ask_for_auto_fix accepts Enter" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/base.sh"
source "$PROJECT_ROOT/lib/manage/autofix.sh"
HAS_AUTO_FIX_SUGGESTIONS=true
read_key() { echo "ENTER"; return 0; }
ask_for_auto_fix
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"yes"* ]]
}
@test "ask_for_auto_fix rejects other keys" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/base.sh"
source "$PROJECT_ROOT/lib/manage/autofix.sh"
HAS_AUTO_FIX_SUGGESTIONS=true
read_key() { echo "ESC"; return 0; }
ask_for_auto_fix
EOF
[ "$status" -eq 1 ]
[[ "$output" == *"no"* ]]
}
@test "perform_auto_fix applies available actions and records summary" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/base.sh"
source "$PROJECT_ROOT/lib/manage/autofix.sh"
has_sudo_session() { return 0; }
ensure_sudo_session() { return 0; }
sudo() {
case "$1" in
defaults) return 0 ;;
bash) return 0 ;;
softwareupdate)
echo "Installing Rosetta 2 stub output"
return 0
;;
/usr/libexec/ApplicationFirewall/socketfilterfw) return 0 ;;
*) return 0 ;;
esac
}
export FIREWALL_DISABLED=true
export TOUCHID_NOT_CONFIGURED=true
export ROSETTA_NOT_INSTALLED=true
perform_auto_fix
echo "SUMMARY=${AUTO_FIX_SUMMARY}"
echo "DETAILS=${AUTO_FIX_DETAILS}"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Firewall enabled"* ]]
[[ "$output" == *"Touch ID configured"* ]]
[[ "$output" == *"Rosetta 2 installed"* ]]
[[ "$output" == *"SUMMARY=Auto fixes applied: 3 issue(s)"* ]]
[[ "$output" == *"DETAILS"* ]]
}

View File

@@ -1,72 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
}
setup() {
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/core/sudo.sh"
}
@test "has_sudo_session returns 1 when no sudo session" {
# shellcheck disable=SC2329
sudo() { return 1; }
export -f sudo
run has_sudo_session
[ "$status" -eq 0 ] || [ "$status" -eq 1 ]
}
@test "sudo keepalive functions don't crash" {
# shellcheck disable=SC2329
function sudo() {
return 1 # Simulate no sudo available
}
export -f sudo
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/core/sudo.sh'; has_sudo_session"
[ "$status" -eq 1 ] # Expected: no sudo session
}
@test "_start_sudo_keepalive returns a PID" {
function sudo() {
case "$1" in
-n) return 0 ;; # Simulate valid sudo session
-v) return 0 ;; # Refresh succeeds
*) return 1 ;;
esac
}
export -f sudo
local pid
pid=$(bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/core/sudo.sh'; _start_sudo_keepalive")
[[ "$pid" =~ ^[0-9]+$ ]]
kill "$pid" 2>/dev/null || true
wait "$pid" 2>/dev/null || true
}
@test "_stop_sudo_keepalive handles invalid PID gracefully" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/core/sudo.sh'; _stop_sudo_keepalive ''"
[ "$status" -eq 0 ]
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/core/sudo.sh'; _stop_sudo_keepalive '99999'"
[ "$status" -eq 0 ]
}
@test "stop_sudo_session cleans up keepalive process" {
export MOLE_SUDO_KEEPALIVE_PID="99999"
run bash -c "export MOLE_SUDO_KEEPALIVE_PID=99999; source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/core/sudo.sh'; stop_sudo_session"
[ "$status" -eq 0 ]
}
@test "sudo manager initializes global state correctly" {
result=$(bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; source '$PROJECT_ROOT/lib/core/sudo.sh'; echo \$MOLE_SUDO_ESTABLISHED")
[[ "$result" == "false" ]] || [[ -z "$result" ]]
}

View File

@@ -1,131 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-whitelist-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
rm -rf "$HOME/.config"
mkdir -p "$HOME"
WHITELIST_PATH="$HOME/.config/mole/whitelist"
}
@test "patterns_equivalent treats paths with tilde expansion as equal" {
local status
if HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/manage/whitelist.sh'; patterns_equivalent '~/.cache/test' \"\$HOME/.cache/test\""; then
status=0
else
status=$?
fi
[ "$status" -eq 0 ]
}
@test "patterns_equivalent distinguishes different paths" {
local status
if HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/manage/whitelist.sh'; patterns_equivalent '~/.cache/test' \"\$HOME/.cache/other\""; then
status=0
else
status=$?
fi
[ "$status" -ne 0 ]
}
@test "save_whitelist_patterns keeps unique entries and preserves header" {
HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/manage/whitelist.sh'; save_whitelist_patterns \"\$HOME/.cache/foo\" \"\$HOME/.cache/foo\" \"\$HOME/.cache/bar\""
[[ -f "$WHITELIST_PATH" ]]
lines=()
while IFS= read -r line; do
lines+=("$line")
done < "$WHITELIST_PATH"
[ "${#lines[@]}" -ge 4 ]
occurrences=$(grep -c "$HOME/.cache/foo" "$WHITELIST_PATH")
[ "$occurrences" -eq 1 ]
}
@test "load_whitelist falls back to defaults when config missing" {
rm -f "$WHITELIST_PATH"
HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/manage/whitelist.sh'; rm -f \"\$HOME/.config/mole/whitelist\"; load_whitelist; printf '%s\n' \"\${CURRENT_WHITELIST_PATTERNS[@]}\"" > "$HOME/current_whitelist.txt"
HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/manage/whitelist.sh'; printf '%s\n' \"\${DEFAULT_WHITELIST_PATTERNS[@]}\"" > "$HOME/default_whitelist.txt"
current=()
while IFS= read -r line; do
current+=("$line")
done < "$HOME/current_whitelist.txt"
defaults=()
while IFS= read -r line; do
defaults+=("$line")
done < "$HOME/default_whitelist.txt"
[ "${#current[@]}" -eq "${#defaults[@]}" ]
[ "${current[0]}" = "${defaults[0]/\$HOME/$HOME}" ]
}
@test "is_whitelisted matches saved patterns exactly" {
local status
if HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/manage/whitelist.sh'; save_whitelist_patterns \"\$HOME/.cache/unique-pattern\"; load_whitelist; is_whitelisted \"\$HOME/.cache/unique-pattern\""; then
status=0
else
status=$?
fi
[ "$status" -eq 0 ]
if HOME="$HOME" bash --noprofile --norc -c "source '$PROJECT_ROOT/lib/manage/whitelist.sh'; save_whitelist_patterns \"\$HOME/.cache/unique-pattern\"; load_whitelist; is_whitelisted \"\$HOME/.cache/other-pattern\""; then
status=0
else
status=$?
fi
[ "$status" -ne 0 ]
}
@test "mo clean --whitelist persists selections" {
whitelist_file="$HOME/.config/mole/whitelist"
mkdir -p "$(dirname "$whitelist_file")"
run bash --noprofile --norc -c "cd '$PROJECT_ROOT'; printf \$'\\n' | HOME='$HOME' ./mo clean --whitelist"
[ "$status" -eq 0 ]
grep -q "\\.m2/repository" "$whitelist_file"
run bash --noprofile --norc -c "cd '$PROJECT_ROOT'; printf \$' \\n' | HOME='$HOME' ./mo clean --whitelist"
[ "$status" -eq 0 ]
run grep -q "\\.m2/repository" "$whitelist_file"
[ "$status" -eq 1 ]
run bash --noprofile --norc -c "cd '$PROJECT_ROOT'; printf \$'\\n' | HOME='$HOME' ./mo clean --whitelist"
[ "$status" -eq 0 ]
run grep -q "\\.m2/repository" "$whitelist_file"
[ "$status" -eq 1 ]
}
@test "is_path_whitelisted protects parent directories of whitelisted nested paths" {
local status
if HOME="$HOME" bash --noprofile --norc -c "
source '$PROJECT_ROOT/lib/core/base.sh'
source '$PROJECT_ROOT/lib/core/app_protection.sh'
WHITELIST_PATTERNS=(\"\$HOME/Library/Caches/org.R-project.R/R/renv\")
is_path_whitelisted \"\$HOME/Library/Caches/org.R-project.R\"
"; then
status=0
else
status=$?
fi
[ "$status" -eq 0 ]
}

View File

@@ -1,183 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-optimize.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
@test "needs_permissions_repair returns true when home not writable" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" USER="tester" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
stat() { echo "root"; }
export -f stat
if needs_permissions_repair; then
echo "needs"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"needs"* ]]
}
@test "has_bluetooth_hid_connected detects HID" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
system_profiler() {
cat << 'OUT'
Bluetooth:
Apple Magic Mouse:
Connected: Yes
Type: Mouse
OUT
}
export -f system_profiler
if has_bluetooth_hid_connected; then
echo "hid"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"hid"* ]]
}
@test "is_ac_power detects AC power" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
pmset() { echo "AC Power"; }
export -f pmset
if is_ac_power; then
echo "ac"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"ac"* ]]
}
@test "is_memory_pressure_high detects warning" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
memory_pressure() { echo "warning"; }
export -f memory_pressure
if is_memory_pressure_high; then
echo "high"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"high"* ]]
}
@test "opt_system_maintenance reports DNS and Spotlight" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" MOLE_DRY_RUN=1 bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
flush_dns_cache() { return 0; }
mdutil() { echo "Indexing enabled."; }
opt_system_maintenance
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"DNS cache flushed"* ]]
[[ "$output" == *"Spotlight index verified"* ]]
}
@test "opt_network_optimization refreshes DNS" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" MOLE_DRY_RUN=1 bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
flush_dns_cache() { return 0; }
opt_network_optimization
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"DNS cache refreshed"* ]]
[[ "$output" == *"mDNSResponder restarted"* ]]
}
@test "opt_sqlite_vacuum reports sqlite3 unavailable" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
export PATH="/nonexistent"
opt_sqlite_vacuum
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"sqlite3 unavailable"* ]]
}
@test "opt_font_cache_rebuild succeeds in dry-run" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" MOLE_DRY_RUN=1 bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
opt_font_cache_rebuild
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Font cache cleared"* ]]
}
@test "opt_dock_refresh clears cache files" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" MOLE_DRY_RUN=1 bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
mkdir -p "$HOME/Library/Application Support/Dock"
touch "$HOME/Library/Application Support/Dock/test.db"
safe_remove() { return 0; }
opt_dock_refresh
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Dock cache cleared"* ]]
[[ "$output" == *"Dock refreshed"* ]]
}
@test "execute_optimization dispatches actions" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
opt_dock_refresh() { echo "dock"; }
execute_optimization dock_refresh
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"dock"* ]]
}
@test "execute_optimization rejects unknown action" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
execute_optimization unknown_action
EOF
[ "$status" -eq 1 ]
[[ "$output" == *"Unknown action"* ]]
}

View File

@@ -1,643 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-purge-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
mkdir -p "$HOME/www"
mkdir -p "$HOME/dev"
mkdir -p "$HOME/.cache/mole"
rm -rf "${HOME:?}/www"/* "${HOME:?}/dev"/*
}
@test "is_safe_project_artifact: rejects shallow paths (protection against accidents)" {
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_safe_project_artifact '$HOME/www/node_modules' '$HOME/www'; then
echo 'UNSAFE'
else
echo 'SAFE'
fi
")
[[ "$result" == "SAFE" ]]
}
@test "is_safe_project_artifact: allows proper project artifacts" {
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_safe_project_artifact '$HOME/www/myproject/node_modules' '$HOME/www'; then
echo 'ALLOWED'
else
echo 'BLOCKED'
fi
")
[[ "$result" == "ALLOWED" ]]
}
@test "is_safe_project_artifact: rejects non-absolute paths" {
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_safe_project_artifact 'relative/path/node_modules' '$HOME/www'; then
echo 'UNSAFE'
else
echo 'SAFE'
fi
")
[[ "$result" == "SAFE" ]]
}
@test "is_safe_project_artifact: validates depth calculation" {
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_safe_project_artifact '$HOME/www/project/subdir/node_modules' '$HOME/www'; then
echo 'ALLOWED'
else
echo 'BLOCKED'
fi
")
[[ "$result" == "ALLOWED" ]]
}
@test "filter_nested_artifacts: removes nested node_modules" {
mkdir -p "$HOME/www/project/node_modules/package/node_modules"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
printf '%s\n' '$HOME/www/project/node_modules' '$HOME/www/project/node_modules/package/node_modules' | \
filter_nested_artifacts | wc -l | tr -d ' '
")
[[ "$result" == "1" ]]
}
@test "filter_nested_artifacts: keeps independent artifacts" {
mkdir -p "$HOME/www/project1/node_modules"
mkdir -p "$HOME/www/project2/target"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
printf '%s\n' '$HOME/www/project1/node_modules' '$HOME/www/project2/target' | \
filter_nested_artifacts | wc -l | tr -d ' '
")
[[ "$result" == "2" ]]
}
# Vendor protection unit tests
@test "is_rails_project_root: detects valid Rails project" {
mkdir -p "$HOME/www/test-rails/config"
mkdir -p "$HOME/www/test-rails/bin"
touch "$HOME/www/test-rails/config/application.rb"
touch "$HOME/www/test-rails/Gemfile"
touch "$HOME/www/test-rails/bin/rails"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_rails_project_root '$HOME/www/test-rails'; then
echo 'YES'
else
echo 'NO'
fi
")
[[ "$result" == "YES" ]]
}
@test "is_rails_project_root: rejects non-Rails directory" {
mkdir -p "$HOME/www/not-rails"
touch "$HOME/www/not-rails/package.json"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_rails_project_root '$HOME/www/not-rails'; then
echo 'YES'
else
echo 'NO'
fi
")
[[ "$result" == "NO" ]]
}
@test "is_go_project_root: detects valid Go project" {
mkdir -p "$HOME/www/test-go"
touch "$HOME/www/test-go/go.mod"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_go_project_root '$HOME/www/test-go'; then
echo 'YES'
else
echo 'NO'
fi
")
[[ "$result" == "YES" ]]
}
@test "is_php_project_root: detects valid PHP Composer project" {
mkdir -p "$HOME/www/test-php"
touch "$HOME/www/test-php/composer.json"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_php_project_root '$HOME/www/test-php'; then
echo 'YES'
else
echo 'NO'
fi
")
[[ "$result" == "YES" ]]
}
@test "is_protected_vendor_dir: protects Rails vendor" {
mkdir -p "$HOME/www/rails-app/vendor"
mkdir -p "$HOME/www/rails-app/config"
touch "$HOME/www/rails-app/config/application.rb"
touch "$HOME/www/rails-app/Gemfile"
touch "$HOME/www/rails-app/config/environment.rb"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_protected_vendor_dir '$HOME/www/rails-app/vendor'; then
echo 'PROTECTED'
else
echo 'NOT_PROTECTED'
fi
")
[[ "$result" == "PROTECTED" ]]
}
@test "is_protected_vendor_dir: does not protect PHP vendor" {
mkdir -p "$HOME/www/php-app/vendor"
touch "$HOME/www/php-app/composer.json"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_protected_vendor_dir '$HOME/www/php-app/vendor'; then
echo 'PROTECTED'
else
echo 'NOT_PROTECTED'
fi
")
[[ "$result" == "NOT_PROTECTED" ]]
}
@test "is_project_container detects project indicators" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/project.sh"
mkdir -p "$HOME/Workspace2/project"
touch "$HOME/Workspace2/project/package.json"
if is_project_container "$HOME/Workspace2" 2; then
echo "yes"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"yes"* ]]
}
@test "discover_project_dirs includes detected containers" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/project.sh"
mkdir -p "$HOME/CustomProjects/app"
touch "$HOME/CustomProjects/app/go.mod"
discover_project_dirs | grep -q "$HOME/CustomProjects"
EOF
[ "$status" -eq 0 ]
}
@test "save_discovered_paths writes config with tilde" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/project.sh"
save_discovered_paths "$HOME/Projects"
grep -q "^~/" "$HOME/.config/mole/purge_paths"
EOF
[ "$status" -eq 0 ]
}
@test "select_purge_categories returns failure on empty input" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/clean/project.sh"
if select_purge_categories; then
exit 1
fi
EOF
[ "$status" -eq 0 ]
}
@test "is_protected_vendor_dir: protects Go vendor" {
mkdir -p "$HOME/www/go-app/vendor"
touch "$HOME/www/go-app/go.mod"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_protected_vendor_dir '$HOME/www/go-app/vendor'; then
echo 'PROTECTED'
else
echo 'NOT_PROTECTED'
fi
")
[[ "$result" == "PROTECTED" ]]
}
@test "is_protected_vendor_dir: protects unknown vendor (conservative)" {
mkdir -p "$HOME/www/unknown-app/vendor"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_protected_vendor_dir '$HOME/www/unknown-app/vendor'; then
echo 'PROTECTED'
else
echo 'NOT_PROTECTED'
fi
")
[[ "$result" == "PROTECTED" ]]
}
@test "is_protected_purge_artifact: handles vendor directories correctly" {
mkdir -p "$HOME/www/php-app/vendor"
touch "$HOME/www/php-app/composer.json"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_protected_purge_artifact '$HOME/www/php-app/vendor'; then
echo 'PROTECTED'
else
echo 'NOT_PROTECTED'
fi
")
# PHP vendor should not be protected
[[ "$result" == "NOT_PROTECTED" ]]
}
@test "is_protected_purge_artifact: returns false for non-vendor artifacts" {
mkdir -p "$HOME/www/app/node_modules"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_protected_purge_artifact '$HOME/www/app/node_modules'; then
echo 'PROTECTED'
else
echo 'NOT_PROTECTED'
fi
")
# node_modules is not in the protected list
[[ "$result" == "NOT_PROTECTED" ]]
}
# Integration tests
@test "scan_purge_targets: skips Rails vendor directory" {
mkdir -p "$HOME/www/rails-app/vendor/javascript"
mkdir -p "$HOME/www/rails-app/config"
touch "$HOME/www/rails-app/config/application.rb"
touch "$HOME/www/rails-app/Gemfile"
mkdir -p "$HOME/www/rails-app/bin"
touch "$HOME/www/rails-app/bin/rails"
local scan_output
scan_output="$(mktemp)"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
scan_purge_targets '$HOME/www' '$scan_output'
if grep -q '$HOME/www/rails-app/vendor' '$scan_output'; then
echo 'FOUND'
else
echo 'SKIPPED'
fi
")
rm -f "$scan_output"
[[ "$result" == "SKIPPED" ]]
}
@test "scan_purge_targets: cleans PHP Composer vendor directory" {
mkdir -p "$HOME/www/php-app/vendor"
touch "$HOME/www/php-app/composer.json"
local scan_output
scan_output="$(mktemp)"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
scan_purge_targets '$HOME/www' '$scan_output'
if grep -q '$HOME/www/php-app/vendor' '$scan_output'; then
echo 'FOUND'
else
echo 'MISSING'
fi
")
rm -f "$scan_output"
[[ "$result" == "FOUND" ]]
}
@test "scan_purge_targets: skips Go vendor directory" {
mkdir -p "$HOME/www/go-app/vendor"
touch "$HOME/www/go-app/go.mod"
touch "$HOME/www/go-app/go.sum"
local scan_output
scan_output="$(mktemp)"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
scan_purge_targets '$HOME/www' '$scan_output'
if grep -q '$HOME/www/go-app/vendor' '$scan_output'; then
echo 'FOUND'
else
echo 'SKIPPED'
fi
")
rm -f "$scan_output"
[[ "$result" == "SKIPPED" ]]
}
@test "scan_purge_targets: skips unknown vendor directory" {
# Create a vendor directory without any project file
mkdir -p "$HOME/www/unknown-app/vendor"
local scan_output
scan_output="$(mktemp)"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
scan_purge_targets '$HOME/www' '$scan_output'
if grep -q '$HOME/www/unknown-app/vendor' '$scan_output'; then
echo 'FOUND'
else
echo 'SKIPPED'
fi
")
rm -f "$scan_output"
# Unknown vendor should be protected (conservative approach)
[[ "$result" == "SKIPPED" ]]
}
@test "is_recently_modified: detects recent projects" {
mkdir -p "$HOME/www/project/node_modules"
touch "$HOME/www/project/package.json"
result=$(bash -c "
source '$PROJECT_ROOT/lib/core/common.sh'
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_recently_modified '$HOME/www/project/node_modules'; then
echo 'RECENT'
else
echo 'OLD'
fi
")
[[ "$result" == "RECENT" ]]
}
@test "is_recently_modified: marks old projects correctly" {
mkdir -p "$HOME/www/old-project/node_modules"
mkdir -p "$HOME/www/old-project"
bash -c "
source '$PROJECT_ROOT/lib/core/common.sh'
source '$PROJECT_ROOT/lib/clean/project.sh'
is_recently_modified '$HOME/www/old-project/node_modules' || true
"
local exit_code=$?
[ "$exit_code" -eq 0 ] || [ "$exit_code" -eq 1 ]
}
@test "purge targets are configured correctly" {
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
echo \"\${PURGE_TARGETS[@]}\"
")
[[ "$result" == *"node_modules"* ]]
[[ "$result" == *"target"* ]]
}
@test "get_dir_size_kb: calculates directory size" {
mkdir -p "$HOME/www/test-project/node_modules"
dd if=/dev/zero of="$HOME/www/test-project/node_modules/file.bin" bs=1024 count=1024 2>/dev/null
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
get_dir_size_kb '$HOME/www/test-project/node_modules'
")
[[ "$result" -ge 1000 ]] && [[ "$result" -le 1100 ]]
}
@test "get_dir_size_kb: handles non-existent paths gracefully" {
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
get_dir_size_kb '$HOME/www/non-existent'
")
[[ "$result" == "0" ]]
}
@test "clean_project_artifacts: handles empty directory gracefully" {
run bash -c "
export HOME='$HOME'
source '$PROJECT_ROOT/lib/core/common.sh'
source '$PROJECT_ROOT/lib/clean/project.sh'
clean_project_artifacts
" < /dev/null
[[ "$status" -eq 0 ]] || [[ "$status" -eq 2 ]]
}
@test "clean_project_artifacts: scans and finds artifacts" {
if ! command -v gtimeout >/dev/null 2>&1 && ! command -v timeout >/dev/null 2>&1; then
skip "gtimeout/timeout not available"
fi
mkdir -p "$HOME/www/test-project/node_modules/package1"
echo "test data" > "$HOME/www/test-project/node_modules/package1/index.js"
mkdir -p "$HOME/www/test-project"
timeout_cmd="timeout"
command -v timeout >/dev/null 2>&1 || timeout_cmd="gtimeout"
run bash -c "
export HOME='$HOME'
$timeout_cmd 5 '$PROJECT_ROOT/bin/purge.sh' 2>&1 < /dev/null || true
"
[[ "$output" =~ "Scanning" ]] ||
[[ "$output" =~ "Purge complete" ]] ||
[[ "$output" =~ "No old" ]] ||
[[ "$output" =~ "Great" ]]
}
@test "mo purge: command exists and is executable" {
[ -x "$PROJECT_ROOT/mole" ]
[ -f "$PROJECT_ROOT/bin/purge.sh" ]
}
@test "mo purge: shows in help text" {
run env HOME="$HOME" "$PROJECT_ROOT/mole" --help
[ "$status" -eq 0 ]
[[ "$output" == *"mo purge"* ]]
}
@test "mo purge: accepts --debug flag" {
if ! command -v gtimeout >/dev/null 2>&1 && ! command -v timeout >/dev/null 2>&1; then
skip "gtimeout/timeout not available"
fi
timeout_cmd="timeout"
command -v timeout >/dev/null 2>&1 || timeout_cmd="gtimeout"
run bash -c "
export HOME='$HOME'
$timeout_cmd 2 '$PROJECT_ROOT/mole' purge --debug < /dev/null 2>&1 || true
"
true
}
@test "mo purge: creates cache directory for stats" {
if ! command -v gtimeout >/dev/null 2>&1 && ! command -v timeout >/dev/null 2>&1; then
skip "gtimeout/timeout not available"
fi
timeout_cmd="timeout"
command -v timeout >/dev/null 2>&1 || timeout_cmd="gtimeout"
bash -c "
export HOME='$HOME'
$timeout_cmd 2 '$PROJECT_ROOT/mole' purge < /dev/null 2>&1 || true
"
[ -d "$HOME/.cache/mole" ] || [ -d "${XDG_CACHE_HOME:-$HOME/.cache}/mole" ]
}
# .NET bin directory detection tests
@test "is_dotnet_bin_dir: finds .NET context in parent directory with Debug dir" {
mkdir -p "$HOME/www/dotnet-app/bin/Debug"
touch "$HOME/www/dotnet-app/MyProject.csproj"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_dotnet_bin_dir '$HOME/www/dotnet-app/bin'; then
echo 'FOUND'
else
echo 'NOT_FOUND'
fi
")
[[ "$result" == "FOUND" ]]
}
@test "is_dotnet_bin_dir: requires .csproj AND Debug/Release" {
mkdir -p "$HOME/www/dotnet-app/bin"
touch "$HOME/www/dotnet-app/MyProject.csproj"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_dotnet_bin_dir '$HOME/www/dotnet-app/bin'; then
echo 'FOUND'
else
echo 'NOT_FOUND'
fi
")
# Should not find it because Debug/Release directories don't exist
[[ "$result" == "NOT_FOUND" ]]
}
@test "is_dotnet_bin_dir: rejects non-bin directories" {
mkdir -p "$HOME/www/dotnet-app/obj"
touch "$HOME/www/dotnet-app/MyProject.csproj"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
if is_dotnet_bin_dir '$HOME/www/dotnet-app/obj'; then
echo 'FOUND'
else
echo 'NOT_FOUND'
fi
")
[[ "$result" == "NOT_FOUND" ]]
}
# Integration test for bin scanning
@test "scan_purge_targets: includes .NET bin directories with Debug/Release" {
mkdir -p "$HOME/www/dotnet-app/bin/Debug"
touch "$HOME/www/dotnet-app/MyProject.csproj"
local scan_output
scan_output="$(mktemp)"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
scan_purge_targets '$HOME/www' '$scan_output'
if grep -q '$HOME/www/dotnet-app/bin' '$scan_output'; then
echo 'FOUND'
else
echo 'MISSING'
fi
")
rm -f "$scan_output"
[[ "$result" == "FOUND" ]]
}
@test "scan_purge_targets: skips generic bin directories (non-.NET)" {
mkdir -p "$HOME/www/ruby-app/bin"
touch "$HOME/www/ruby-app/Gemfile"
local scan_output
scan_output="$(mktemp)"
result=$(bash -c "
source '$PROJECT_ROOT/lib/clean/project.sh'
scan_purge_targets '$HOME/www' '$scan_output'
if grep -q '$HOME/www/ruby-app/bin' '$scan_output'; then
echo 'FOUND'
else
echo 'SKIPPED'
fi
")
rm -f "$scan_output"
[[ "$result" == "SKIPPED" ]]
}

View File

@@ -1,115 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-purge-config.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
rm -rf "$HOME/.config"
mkdir -p "$HOME/.config/mole"
}
@test "load_purge_config loads default paths when config file is missing" {
run env HOME="$HOME" bash -c "source '$PROJECT_ROOT/lib/clean/project.sh'; echo \"\${PURGE_SEARCH_PATHS[*]}\""
[ "$status" -eq 0 ]
[[ "$output" == *"$HOME/Projects"* ]]
[[ "$output" == *"$HOME/GitHub"* ]]
[[ "$output" == *"$HOME/dev"* ]]
}
@test "load_purge_config loads custom paths from config file" {
local config_file="$HOME/.config/mole/purge_paths"
cat > "$config_file" << EOF
$HOME/custom/projects
$HOME/work
EOF
run env HOME="$HOME" bash -c "source '$PROJECT_ROOT/lib/clean/project.sh'; echo \"\${PURGE_SEARCH_PATHS[*]}\""
[ "$status" -eq 0 ]
[[ "$output" == *"$HOME/custom/projects"* ]]
[[ "$output" == *"$HOME/work"* ]]
[[ "$output" != *"$HOME/GitHub"* ]]
}
@test "load_purge_config expands tilde in paths" {
local config_file="$HOME/.config/mole/purge_paths"
cat > "$config_file" << EOF
~/tilde/expanded
~/another/one
EOF
run env HOME="$HOME" bash -c "source '$PROJECT_ROOT/lib/clean/project.sh'; echo \"\${PURGE_SEARCH_PATHS[*]}\""
[ "$status" -eq 0 ]
[[ "$output" == *"$HOME/tilde/expanded"* ]]
[[ "$output" == *"$HOME/another/one"* ]]
[[ "$output" != *"~"* ]]
}
@test "load_purge_config ignores comments and empty lines" {
local config_file="$HOME/.config/mole/purge_paths"
cat > "$config_file" << EOF
$HOME/valid/path
$HOME/another/path
EOF
run env HOME="$HOME" bash -c "source '$PROJECT_ROOT/lib/clean/project.sh'; echo \"\${#PURGE_SEARCH_PATHS[@]}\"; echo \"\${PURGE_SEARCH_PATHS[*]}\""
[ "$status" -eq 0 ]
local lines
read -r -a lines <<< "$output"
local count="${lines[0]}"
[ "$count" -eq 2 ]
[[ "$output" == *"$HOME/valid/path"* ]]
[[ "$output" == *"$HOME/another/path"* ]]
}
@test "load_purge_config falls back to defaults if config file is empty" {
local config_file="$HOME/.config/mole/purge_paths"
touch "$config_file"
run env HOME="$HOME" bash -c "source '$PROJECT_ROOT/lib/clean/project.sh'; echo \"\${PURGE_SEARCH_PATHS[*]}\""
[ "$status" -eq 0 ]
[[ "$output" == *"$HOME/Projects"* ]]
}
@test "load_purge_config falls back to defaults if config file has only comments" {
local config_file="$HOME/.config/mole/purge_paths"
echo "# Just a comment" > "$config_file"
run env HOME="$HOME" bash -c "source '$PROJECT_ROOT/lib/clean/project.sh'; echo \"\${PURGE_SEARCH_PATHS[*]}\""
[ "$status" -eq 0 ]
[[ "$output" == *"$HOME/Projects"* ]]
}

View File

@@ -1,158 +0,0 @@
#!/usr/bin/env bats
setup() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
export HOME="$BATS_TEST_TMPDIR/home"
mkdir -p "$HOME/.config/mole"
}
@test "find with non-existent directory doesn't cause script exit (pipefail bug)" {
result=$(bash -c '
set -euo pipefail
find /non/existent/dir -name "*.cache" 2>/dev/null || true
echo "survived"
')
[[ "$result" == "survived" ]]
}
@test "browser directory check pattern is safe when directories don't exist" {
result=$(bash -c '
set -euo pipefail
search_dirs=()
[[ -d "/non/existent/chrome" ]] && search_dirs+=("/non/existent/chrome")
[[ -d "/tmp" ]] && search_dirs+=("/tmp")
if [[ ${#search_dirs[@]} -gt 0 ]]; then
find "${search_dirs[@]}" -maxdepth 1 -type f 2>/dev/null || true
fi
echo "survived"
')
[[ "$result" == "survived" ]]
}
@test "empty array doesn't cause unbound variable error" {
result=$(bash -c '
set -euo pipefail
search_dirs=()
if [[ ${#search_dirs[@]} -gt 0 ]]; then
echo "should not reach here"
fi
echo "survived"
')
[[ "$result" == "survived" ]]
}
@test "version comparison works correctly" {
result=$(bash -c '
v1="1.11.8"
v2="1.11.9"
if [[ "$(printf "%s\n" "$v1" "$v2" | sort -V | head -1)" == "$v1" && "$v1" != "$v2" ]]; then
echo "update_needed"
fi
')
[[ "$result" == "update_needed" ]]
}
@test "version comparison with same versions" {
result=$(bash -c '
v1="1.11.8"
v2="1.11.8"
if [[ "$(printf "%s\n" "$v1" "$v2" | sort -V | head -1)" == "$v1" && "$v1" != "$v2" ]]; then
echo "update_needed"
else
echo "up_to_date"
fi
')
[[ "$result" == "up_to_date" ]]
}
@test "version prefix v/V is stripped correctly" {
result=$(bash -c '
version="v1.11.9"
clean=${version#v}
clean=${clean#V}
echo "$clean"
')
[[ "$result" == "1.11.9" ]]
}
@test "network timeout prevents hanging (simulated)" {
if ! command -v gtimeout >/dev/null 2>&1 && ! command -v timeout >/dev/null 2>&1; then
skip "gtimeout/timeout not available"
fi
timeout_cmd="timeout"
command -v timeout >/dev/null 2>&1 || timeout_cmd="gtimeout"
# shellcheck disable=SC2016
result=$($timeout_cmd 5 bash -c '
result=$(curl -fsSL --connect-timeout 1 --max-time 2 "http://192.0.2.1:12345/test" 2>/dev/null || echo "failed")
if [[ "$result" == "failed" ]]; then
echo "timeout_works"
fi
')
[[ "$result" == "timeout_works" ]]
}
@test "empty version string is handled gracefully" {
result=$(bash -c '
latest=""
if [[ -z "$latest" ]]; then
echo "handled"
fi
')
[[ "$result" == "handled" ]]
}
@test "grep with no match doesn't cause exit in pipefail mode" {
result=$(bash -c '
set -euo pipefail
echo "test" | grep "nonexistent" || true
echo "survived"
')
[[ "$result" == "survived" ]]
}
@test "command substitution failure is handled with || true" {
result=$(bash -c '
set -euo pipefail
output=$(false) || true
echo "survived"
')
[[ "$result" == "survived" ]]
}
@test "arithmetic on zero doesn't cause exit" {
result=$(bash -c '
set -euo pipefail
count=0
((count++)) || true
echo "$count"
')
[[ "$result" == "1" ]]
}
@test "safe_remove pattern doesn't fail on non-existent path" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/common.sh'
safe_remove '$HOME/non/existent/path' true > /dev/null 2>&1 || true
echo 'survived'
")
[[ "$result" == "survived" ]]
}
@test "module loading doesn't fail" {
result=$(bash -c "
set -euo pipefail
source '$PROJECT_ROOT/lib/core/common.sh'
echo 'loaded'
")
[[ "$result" == "loaded" ]]
}

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-scripts-home.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
export TERM="dumb"
rm -rf "${HOME:?}"/*
mkdir -p "$HOME"
}
@test "check.sh --help shows usage information" {
run "$PROJECT_ROOT/scripts/check.sh" --help
[ "$status" -eq 0 ]
[[ "$output" == *"Usage"* ]]
[[ "$output" == *"--format"* ]]
[[ "$output" == *"--no-format"* ]]
}
@test "check.sh script exists and is valid" {
[ -f "$PROJECT_ROOT/scripts/check.sh" ]
[ -x "$PROJECT_ROOT/scripts/check.sh" ]
run bash -c "grep -q 'Mole Check' '$PROJECT_ROOT/scripts/check.sh'"
[ "$status" -eq 0 ]
}
@test "test.sh script exists and is valid" {
[ -f "$PROJECT_ROOT/scripts/test.sh" ]
[ -x "$PROJECT_ROOT/scripts/test.sh" ]
run bash -c "grep -q 'Mole Test Runner' '$PROJECT_ROOT/scripts/test.sh'"
[ "$status" -eq 0 ]
}
@test "test.sh includes test lint step" {
run bash -c "grep -q 'Test script lint' '$PROJECT_ROOT/scripts/test.sh'"
[ "$status" -eq 0 ]
}
@test "Makefile has build target for Go binaries" {
run bash -c "grep -q 'go build' '$PROJECT_ROOT/Makefile'"
[ "$status" -eq 0 ]
}
@test "setup-quick-launchers.sh has detect_mo function" {
run bash -c "grep -q 'detect_mo()' '$PROJECT_ROOT/scripts/setup-quick-launchers.sh'"
[ "$status" -eq 0 ]
}
@test "setup-quick-launchers.sh has Raycast script generation" {
run bash -c "grep -q 'create_raycast_commands' '$PROJECT_ROOT/scripts/setup-quick-launchers.sh'"
[ "$status" -eq 0 ]
run bash -c "grep -q 'write_raycast_script' '$PROJECT_ROOT/scripts/setup-quick-launchers.sh'"
[ "$status" -eq 0 ]
}
@test "install.sh supports dev branch installs" {
run bash -c "grep -q 'refs/heads/dev.tar.gz' '$PROJECT_ROOT/install.sh'"
[ "$status" -eq 0 ]
run bash -c "grep -q 'MOLE_VERSION=\"dev\"' '$PROJECT_ROOT/install.sh'"
[ "$status" -eq 0 ]
}

View File

@@ -1,267 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${BATS_TMPDIR:-}" # Use BATS_TMPDIR as original HOME if set by bats
if [[ -z "$ORIGINAL_HOME" ]]; then
ORIGINAL_HOME="${HOME:-}"
fi
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-uninstall-home.XXXXXX")"
export HOME
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
export TERM="dumb"
rm -rf "${HOME:?}"/*
mkdir -p "$HOME"
}
create_app_artifacts() {
mkdir -p "$HOME/Applications/TestApp.app"
mkdir -p "$HOME/Library/Application Support/TestApp"
mkdir -p "$HOME/Library/Caches/TestApp"
mkdir -p "$HOME/Library/Containers/com.example.TestApp"
mkdir -p "$HOME/Library/Preferences"
touch "$HOME/Library/Preferences/com.example.TestApp.plist"
mkdir -p "$HOME/Library/Preferences/ByHost"
touch "$HOME/Library/Preferences/ByHost/com.example.TestApp.ABC123.plist"
mkdir -p "$HOME/Library/Saved Application State/com.example.TestApp.savedState"
}
@test "find_app_files discovers user-level leftovers" {
create_app_artifacts
result="$(
HOME="$HOME" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
find_app_files "com.example.TestApp" "TestApp"
EOF
)"
[[ "$result" == *"Application Support/TestApp"* ]]
[[ "$result" == *"Caches/TestApp"* ]]
[[ "$result" == *"Preferences/com.example.TestApp.plist"* ]]
[[ "$result" == *"Saved Application State/com.example.TestApp.savedState"* ]]
[[ "$result" == *"Containers/com.example.TestApp"* ]]
}
@test "calculate_total_size returns aggregate kilobytes" {
mkdir -p "$HOME/sized"
dd if=/dev/zero of="$HOME/sized/file1" bs=1024 count=1 > /dev/null 2>&1
dd if=/dev/zero of="$HOME/sized/file2" bs=1024 count=2 > /dev/null 2>&1
result="$(
HOME="$HOME" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
files="$(printf '%s
%s
' "$HOME/sized/file1" "$HOME/sized/file2")"
calculate_total_size "$files"
EOF
)"
[ "$result" -ge 3 ]
}
@test "batch_uninstall_applications removes selected app data" {
create_app_artifacts
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/uninstall/batch.sh"
request_sudo_access() { return 0; }
start_inline_spinner() { :; }
stop_inline_spinner() { :; }
enter_alt_screen() { :; }
leave_alt_screen() { :; }
hide_cursor() { :; }
show_cursor() { :; }
remove_apps_from_dock() { :; }
pgrep() { return 1; }
pkill() { return 0; }
sudo() { return 0; } # Mock sudo command
app_bundle="$HOME/Applications/TestApp.app"
mkdir -p "$app_bundle" # Ensure this is created in the temp HOME
related="$(find_app_files "com.example.TestApp" "TestApp")"
encoded_related=$(printf '%s' "$related" | base64 | tr -d '\n')
selected_apps=()
selected_apps+=("0|$app_bundle|TestApp|com.example.TestApp|0|Never")
files_cleaned=0
total_items=0
total_size_cleaned=0
batch_uninstall_applications
[[ ! -d "$app_bundle" ]] || exit 1
[[ ! -d "$HOME/Library/Application Support/TestApp" ]] || exit 1
[[ ! -d "$HOME/Library/Caches/TestApp" ]] || exit 1
[[ ! -f "$HOME/Library/Preferences/com.example.TestApp.plist" ]] || exit 1
EOF
[ "$status" -eq 0 ]
}
@test "safe_remove can remove a simple directory" {
mkdir -p "$HOME/test_dir"
touch "$HOME/test_dir/file.txt"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
safe_remove "$HOME/test_dir"
[[ ! -d "$HOME/test_dir" ]] || exit 1
EOF
[ "$status" -eq 0 ]
}
@test "decode_file_list validates base64 encoding" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/uninstall/batch.sh"
valid_data=$(printf '/path/one
/path/two' | base64)
result=$(decode_file_list "$valid_data" "TestApp")
[[ -n "$result" ]] || exit 1
EOF
[ "$status" -eq 0 ]
}
@test "decode_file_list rejects invalid base64" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/uninstall/batch.sh"
if result=$(decode_file_list "not-valid-base64!!!" "TestApp" 2>/dev/null); then
[[ -z "$result" ]]
else
true
fi
EOF
[ "$status" -eq 0 ]
}
@test "decode_file_list handles empty input" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/uninstall/batch.sh"
empty_data=$(printf '' | base64)
result=$(decode_file_list "$empty_data" "TestApp" 2>/dev/null) || true
[[ -z "$result" ]]
EOF
[ "$status" -eq 0 ]
}
@test "decode_file_list rejects non-absolute paths" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/uninstall/batch.sh"
bad_data=$(printf 'relative/path' | base64)
if result=$(decode_file_list "$bad_data" "TestApp" 2>/dev/null); then
[[ -z "$result" ]]
else
true
fi
EOF
[ "$status" -eq 0 ]
}
@test "decode_file_list handles both BSD and GNU base64 formats" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/uninstall/batch.sh"
test_paths="/path/to/file1
/path/to/file2"
encoded_data=$(printf '%s' "$test_paths" | base64 | tr -d '\n')
result=$(decode_file_list "$encoded_data" "TestApp")
[[ "$result" == *"/path/to/file1"* ]] || exit 1
[[ "$result" == *"/path/to/file2"* ]] || exit 1
[[ -n "$result" ]] || exit 1
EOF
[ "$status" -eq 0 ]
}
@test "remove_mole deletes manual binaries and caches" {
mkdir -p "$HOME/.local/bin"
touch "$HOME/.local/bin/mole"
touch "$HOME/.local/bin/mo"
mkdir -p "$HOME/.config/mole" "$HOME/.cache/mole"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" PATH="/usr/bin:/bin" bash --noprofile --norc << 'EOF'
set -euo pipefail
start_inline_spinner() { :; }
stop_inline_spinner() { :; }
rm() {
local -a flags=()
local -a paths=()
local arg
for arg in "$@"; do
if [[ "$arg" == -* ]]; then
flags+=("$arg")
else
paths+=("$arg")
fi
done
local path
for path in "${paths[@]}"; do
if [[ "$path" == "$HOME" || "$path" == "$HOME/"* ]]; then
/bin/rm "${flags[@]}" "$path"
fi
done
return 0
}
sudo() {
if [[ "$1" == "rm" ]]; then
shift
rm "$@"
return 0
fi
return 0
}
export -f start_inline_spinner stop_inline_spinner rm sudo
printf '\n' | "$PROJECT_ROOT/mole" remove
EOF
[ "$status" -eq 0 ]
[ ! -f "$HOME/.local/bin/mole" ]
[ ! -f "$HOME/.local/bin/mo" ]
[ ! -d "$HOME/.config/mole" ]
[ ! -d "$HOME/.cache/mole" ]
}

View File

@@ -1,235 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
CURRENT_VERSION="$(grep '^VERSION=' "$PROJECT_ROOT/mole" | head -1 | sed 's/VERSION=\"\\(.*\\)\"/\\1/')"
export CURRENT_VERSION
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-update-manager.XXXXXX")"
export HOME
mkdir -p "${HOME}/.cache/mole"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
BREW_OUTDATED_COUNT=0
BREW_FORMULA_OUTDATED_COUNT=0
BREW_CASK_OUTDATED_COUNT=0
APPSTORE_UPDATE_COUNT=0
MACOS_UPDATE_AVAILABLE=false
MOLE_UPDATE_AVAILABLE=false
export MOCK_BIN_DIR="$BATS_TMPDIR/mole-mocks-$$"
mkdir -p "$MOCK_BIN_DIR"
export PATH="$MOCK_BIN_DIR:$PATH"
}
teardown() {
rm -rf "$MOCK_BIN_DIR"
}
read_key() {
echo "ESC"
return 0
}
@test "ask_for_updates returns 1 when no updates available" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/manage/update.sh"
BREW_OUTDATED_COUNT=0
APPSTORE_UPDATE_COUNT=0
MACOS_UPDATE_AVAILABLE=false
MOLE_UPDATE_AVAILABLE=false
ask_for_updates
EOF
[ "$status" -eq 1 ]
}
@test "ask_for_updates shows updates and waits for input" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/manage/update.sh"
BREW_OUTDATED_COUNT=5
BREW_FORMULA_OUTDATED_COUNT=3
BREW_CASK_OUTDATED_COUNT=2
APPSTORE_UPDATE_COUNT=1
MACOS_UPDATE_AVAILABLE=true
MOLE_UPDATE_AVAILABLE=true
read_key() { echo "ESC"; return 0; }
ask_for_updates
EOF
[ "$status" -eq 1 ] # ESC cancels
[[ "$output" == *"Homebrew (5 updates)"* ]]
[[ "$output" == *"App Store (1 apps)"* ]]
[[ "$output" == *"macOS system"* ]]
[[ "$output" == *"Mole"* ]]
}
@test "ask_for_updates accepts Enter when updates exist" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/manage/update.sh"
BREW_OUTDATED_COUNT=2
BREW_FORMULA_OUTDATED_COUNT=2
MOLE_UPDATE_AVAILABLE=true
read_key() { echo "ENTER"; return 0; }
ask_for_updates
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"AVAILABLE UPDATES"* ]]
[[ "$output" == *"yes"* ]]
}
@test "format_brew_update_label lists formula and cask counts" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/manage/update.sh"
BREW_OUTDATED_COUNT=5
BREW_FORMULA_OUTDATED_COUNT=3
BREW_CASK_OUTDATED_COUNT=2
format_brew_update_label
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"3 formula"* ]]
[[ "$output" == *"2 cask"* ]]
}
@test "perform_updates handles Homebrew success and Mole update" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/manage/update.sh"
BREW_FORMULA_OUTDATED_COUNT=1
BREW_CASK_OUTDATED_COUNT=0
MOLE_UPDATE_AVAILABLE=true
FAKE_DIR="$HOME/fake-script-dir"
mkdir -p "$FAKE_DIR/lib/manage"
cat > "$FAKE_DIR/mole" <<'SCRIPT'
#!/usr/bin/env bash
echo "Already on latest version"
SCRIPT
chmod +x "$FAKE_DIR/mole"
SCRIPT_DIR="$FAKE_DIR/lib/manage"
brew_has_outdated() { return 0; }
start_inline_spinner() { :; }
stop_inline_spinner() { :; }
reset_brew_cache() { echo "BREW_CACHE_RESET"; }
reset_mole_cache() { echo "MOLE_CACHE_RESET"; }
has_sudo_session() { return 1; }
ensure_sudo_session() { echo "ensure_sudo_session_called"; return 1; }
brew() {
if [[ "$1" == "upgrade" ]]; then
echo "Upgrading formula"
return 0
fi
return 0
}
get_appstore_update_labels() { return 0; }
get_macos_update_labels() { return 0; }
perform_updates
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Updating Mole"* ]]
[[ "$output" == *"Mole updated"* ]]
[[ "$output" == *"MOLE_CACHE_RESET"* ]]
[[ "$output" == *"All updates completed"* ]]
}
@test "update_via_homebrew reports already on latest version" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc << 'EOF'
set -euo pipefail
MOLE_TEST_BREW_UPDATE_OUTPUT="Updated 0 formulae"
MOLE_TEST_BREW_UPGRADE_OUTPUT="Warning: mole 1.7.9 already installed"
MOLE_TEST_BREW_LIST_OUTPUT="mole 1.7.9"
start_inline_spinner() { :; }
stop_inline_spinner() { :; }
brew() {
case "$1" in
update) echo "$MOLE_TEST_BREW_UPDATE_OUTPUT";;
upgrade) echo "$MOLE_TEST_BREW_UPGRADE_OUTPUT";;
list) if [[ "$2" == "--versions" ]]; then echo "$MOLE_TEST_BREW_LIST_OUTPUT"; fi ;;
esac
}
export -f brew start_inline_spinner stop_inline_spinner
source "$PROJECT_ROOT/lib/core/common.sh"
update_via_homebrew "1.7.9"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Already on latest version"* ]]
}
@test "update_mole skips download when already latest" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" CURRENT_VERSION="$CURRENT_VERSION" PATH="$HOME/fake-bin:/usr/bin:/bin" TERM="dumb" bash --noprofile --norc << 'EOF'
set -euo pipefail
curl() {
local out=""
local url=""
while [[ $# -gt 0 ]]; do
case "$1" in
-o)
out="$2"
shift 2
;;
http*://*)
url="$1"
shift
;;
*)
shift
;;
esac
done
if [[ -n "$out" ]]; then
echo "Installer executed" > "$out"
return 0
fi
if [[ "$url" == *"api.github.com"* ]]; then
echo "{\"tag_name\":\"$CURRENT_VERSION\"}"
else
echo "VERSION=\"$CURRENT_VERSION\""
fi
}
export -f curl
brew() { exit 1; }
export -f brew
"$PROJECT_ROOT/mole" update
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Already on latest version"* ]]
}

View File

@@ -1,214 +0,0 @@
#!/usr/bin/env bats
setup_file() {
PROJECT_ROOT="$(cd "${BATS_TEST_DIRNAME}/.." && pwd)"
export PROJECT_ROOT
ORIGINAL_HOME="${HOME:-}"
export ORIGINAL_HOME
HOME="$(mktemp -d "${BATS_TEST_DIRNAME}/tmp-userfile.XXXXXX")"
export HOME
mkdir -p "$HOME"
}
teardown_file() {
rm -rf "$HOME"
if [[ -n "${ORIGINAL_HOME:-}" ]]; then
export HOME="$ORIGINAL_HOME"
fi
}
setup() {
rm -rf "$HOME/.config" "$HOME/.cache"
mkdir -p "$HOME"
}
@test "get_darwin_major returns numeric version on macOS" {
result=$(bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; get_darwin_major")
[[ "$result" =~ ^[0-9]+$ ]]
}
@test "get_darwin_major returns 999 on failure (mock uname failure)" {
result=$(bash -c "
uname() { return 1; }
export -f uname
source '$PROJECT_ROOT/lib/core/base.sh'
get_darwin_major
")
[ "$result" = "999" ]
}
@test "is_darwin_ge correctly compares versions" {
run bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; is_darwin_ge 1"
[ "$status" -eq 0 ]
result=$(bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; is_darwin_ge 100 && echo 'yes' || echo 'no'")
[[ -n "$result" ]]
}
@test "is_root_user detects non-root correctly" {
result=$(bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; is_root_user && echo 'root' || echo 'not-root'")
[ "$result" = "not-root" ]
}
@test "get_invoking_user returns current user when not sudo" {
result=$(bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; get_invoking_user")
[ -n "$result" ]
[ "$result" = "${USER:-$(whoami)}" ]
}
@test "get_invoking_uid returns numeric UID" {
result=$(bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; get_invoking_uid")
[[ "$result" =~ ^[0-9]+$ ]]
}
@test "get_invoking_gid returns numeric GID" {
result=$(bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; get_invoking_gid")
[[ "$result" =~ ^[0-9]+$ ]]
}
@test "get_invoking_home returns home directory" {
result=$(bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; get_invoking_home")
[ -n "$result" ]
[ -d "$result" ]
}
@test "get_user_home returns home for valid user" {
current_user="${USER:-$(whoami)}"
result=$(bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; get_user_home '$current_user'")
[ -n "$result" ]
[ -d "$result" ]
}
@test "get_user_home returns empty for invalid user" {
result=$(bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; get_user_home 'nonexistent_user_12345'")
[ -z "$result" ] || [ "$result" = "~nonexistent_user_12345" ]
}
@test "ensure_user_dir creates simple directory" {
test_dir="$HOME/.cache/test"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir '$test_dir'"
[ -d "$test_dir" ]
}
@test "ensure_user_dir creates nested directory" {
test_dir="$HOME/.config/mole/deep/nested/path"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir '$test_dir'"
[ -d "$test_dir" ]
}
@test "ensure_user_dir handles tilde expansion" {
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir '~/.cache/tilde-test'"
[ -d "$HOME/.cache/tilde-test" ]
}
@test "ensure_user_dir is idempotent" {
test_dir="$HOME/.cache/idempotent"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir '$test_dir'"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir '$test_dir'"
[ -d "$test_dir" ]
}
@test "ensure_user_dir handles empty path gracefully" {
run bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir ''"
[ "$status" -eq 0 ]
}
@test "ensure_user_dir preserves ownership for non-root users" {
test_dir="$HOME/.cache/ownership-test"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir '$test_dir'"
current_uid=$(id -u)
dir_uid=$(/usr/bin/stat -f%u "$test_dir")
[ "$dir_uid" = "$current_uid" ]
}
@test "ensure_user_file creates file and parent directories" {
test_file="$HOME/.config/mole/test.log"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_file '$test_file'"
[ -f "$test_file" ]
[ -d "$(dirname "$test_file")" ]
}
@test "ensure_user_file handles tilde expansion" {
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_file '~/.cache/tilde-file.txt'"
[ -f "$HOME/.cache/tilde-file.txt" ]
}
@test "ensure_user_file is idempotent" {
test_file="$HOME/.cache/idempotent.txt"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_file '$test_file'"
echo "content" > "$test_file"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_file '$test_file'"
[ -f "$test_file" ]
[ "$(cat "$test_file")" = "content" ]
}
@test "ensure_user_file handles empty path gracefully" {
run bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_file ''"
[ "$status" -eq 0 ]
}
@test "ensure_user_file creates deeply nested files" {
test_file="$HOME/.config/deep/very/nested/structure/file.log"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_file '$test_file'"
[ -f "$test_file" ]
}
@test "ensure_user_file preserves ownership for non-root users" {
test_file="$HOME/.cache/file-ownership-test.txt"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_file '$test_file'"
current_uid=$(id -u)
file_uid=$(/usr/bin/stat -f%u "$test_file")
[ "$file_uid" = "$current_uid" ]
}
@test "ensure_user_dir early stop optimization works" {
test_dir="$HOME/.cache/perf/test/nested"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir '$test_dir'"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir '$test_dir'"
[ -d "$test_dir" ]
current_uid=$(id -u)
dir_uid=$(/usr/bin/stat -f%u "$test_dir")
[ "$dir_uid" = "$current_uid" ]
}
@test "ensure_user_dir and ensure_user_file work together" {
cache_dir="$HOME/.cache/mole"
cache_file="$cache_dir/integration_test.log"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_dir '$cache_dir'"
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'; ensure_user_file '$cache_file'"
[ -d "$cache_dir" ]
[ -f "$cache_file" ]
}
@test "multiple ensure_user_file calls in same directory" {
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'
ensure_user_file '$HOME/.config/mole/file1.txt'
ensure_user_file '$HOME/.config/mole/file2.txt'
ensure_user_file '$HOME/.config/mole/file3.txt'
"
[ -f "$HOME/.config/mole/file1.txt" ]
[ -f "$HOME/.config/mole/file2.txt" ]
[ -f "$HOME/.config/mole/file3.txt" ]
}
@test "ensure functions handle concurrent calls safely" {
bash -c "source '$PROJECT_ROOT/lib/core/base.sh'
ensure_user_dir '$HOME/.cache/concurrent' &
ensure_user_dir '$HOME/.cache/concurrent' &
wait
"
[ -d "$HOME/.cache/concurrent" ]
}