diff --git a/cmd/analyze/scanner.go b/cmd/analyze/scanner.go index 0d2bb6c..982d497 100644 --- a/cmd/analyze/scanner.go +++ b/cmd/analyze/scanner.go @@ -119,6 +119,16 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in size := getActualFileSize(fullPath, info) atomic.AddInt64(&total, size) + // Reuse timer to reduce GC pressure + timer := time.NewTimer(0) + // Ensure timer is drained immediately since we start with 0 + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + select { case entryChan <- dirEntry{ Name: child.Name() + " →", @@ -127,10 +137,26 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in IsDir: isDir, LastAccess: getLastAccessTimeFromInfo(info), }: - case <-time.After(100 * time.Millisecond): - // Skip if channel is blocked + default: + // If channel is full, use timer to wait with timeout + timer.Reset(100 * time.Millisecond) + select { + case entryChan <- dirEntry{ + Name: child.Name() + " →", + Path: fullPath, + Size: size, + IsDir: isDir, + LastAccess: getLastAccessTimeFromInfo(info), + }: + if !timer.Stop() { + <-timer.C + } + case <-timer.C: + // Skip if channel is blocked + } } continue + } if child.IsDir() { @@ -162,6 +188,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in atomic.AddInt64(&total, size) atomic.AddInt64(dirsScanned, 1) + timer := time.NewTimer(100 * time.Millisecond) select { case entryChan <- dirEntry{ Name: name, @@ -170,7 +197,10 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in IsDir: true, LastAccess: time.Time{}, }: - case <-time.After(100 * time.Millisecond): + if !timer.Stop() { + <-timer.C + } + case <-timer.C: } }(child.Name(), fullPath) continue @@ -195,6 +225,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in atomic.AddInt64(&total, size) atomic.AddInt64(dirsScanned, 1) + timer := time.NewTimer(100 * time.Millisecond) select { case entryChan <- dirEntry{ Name: name, @@ -203,7 +234,10 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in IsDir: true, LastAccess: time.Time{}, }: - case <-time.After(100 * time.Millisecond): + if !timer.Stop() { + <-timer.C + } + case <-timer.C: } }(child.Name(), fullPath) continue @@ -219,6 +253,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in atomic.AddInt64(&total, size) atomic.AddInt64(dirsScanned, 1) + timer := time.NewTimer(100 * time.Millisecond) select { case entryChan <- dirEntry{ Name: name, @@ -227,7 +262,10 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in IsDir: true, LastAccess: time.Time{}, }: - case <-time.After(100 * time.Millisecond): + if !timer.Stop() { + <-timer.C + } + case <-timer.C: } }(child.Name(), fullPath) continue @@ -243,6 +281,9 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in atomic.AddInt64(filesScanned, 1) atomic.AddInt64(bytesScanned, size) + // Single-use timer for main loop (less pressure than tight loop above) + // But let's be consistent and optimized + timer := time.NewTimer(100 * time.Millisecond) select { case entryChan <- dirEntry{ Name: child.Name(), @@ -251,16 +292,23 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in IsDir: false, LastAccess: getLastAccessTimeFromInfo(info), }: - case <-time.After(100 * time.Millisecond): + if !timer.Stop() { + <-timer.C + } + case <-timer.C: } // Track large files only. if !shouldSkipFileForLargeTracking(fullPath) { minSize := atomic.LoadInt64(&largeFileMinSize) if size >= minSize { + timer.Reset(100 * time.Millisecond) select { case largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}: - case <-time.After(100 * time.Millisecond): + if !timer.Stop() { + <-timer.C + } + case <-timer.C: } } } @@ -471,6 +519,15 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, lar maxConcurrent := min(runtime.NumCPU()*2, maxDirWorkers) sem := make(chan struct{}, maxConcurrent) + // Reuse timer for large file sends + timer := time.NewTimer(0) + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + for _, child := range children { fullPath := filepath.Join(root, child.Name()) @@ -536,9 +593,13 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, lar if !shouldSkipFileForLargeTracking(fullPath) && largeFileMinSize != nil { minSize := atomic.LoadInt64(largeFileMinSize) if size >= minSize { + timer.Reset(100 * time.Millisecond) select { case largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}: - case <-time.After(100 * time.Millisecond): + if !timer.Stop() { + <-timer.C + } + case <-timer.C: } } } diff --git a/lib/core/app_protection.sh b/lib/core/app_protection.sh index 7fae723..e260037 100755 --- a/lib/core/app_protection.sh +++ b/lib/core/app_protection.sh @@ -545,24 +545,53 @@ bundle_matches_pattern() { return 1 } +# Helper to build regex from array (Bash 3.2 compatible - no namerefs) +# $1: Variable name to store result +# $2...: Array elements (passed as expanded list) +build_regex_var() { + local var_name="$1" + shift + local regex="" + for pattern in "$@"; do + # Escape dots . -> \. + local p="${pattern//./\\.}" + # Convert * to .* + p="${p//\*/.*}" + # Start and end anchors + p="^${p}$" + + if [[ -z "$regex" ]]; then + regex="$p" + else + regex="$regex|$p" + fi + done + eval "$var_name=\"\$regex\"" +} + +# Generate Regex strings once +APPLE_UNINSTALLABLE_REGEX="" +build_regex_var APPLE_UNINSTALLABLE_REGEX "${APPLE_UNINSTALLABLE_APPS[@]}" + +SYSTEM_CRITICAL_REGEX="" +build_regex_var SYSTEM_CRITICAL_REGEX "${SYSTEM_CRITICAL_BUNDLES[@]}" + +DATA_PROTECTED_REGEX="" +build_regex_var DATA_PROTECTED_REGEX "${DATA_PROTECTED_BUNDLES[@]}" + # Check if application is a protected system component should_protect_from_uninstall() { local bundle_id="$1" # First check if it's an uninstallable Apple app - # These apps have com.apple.* bundle IDs but are NOT system-critical - for pattern in "${APPLE_UNINSTALLABLE_APPS[@]}"; do - if bundle_matches_pattern "$bundle_id" "$pattern"; then - return 1 # Can be uninstalled - fi - done + if [[ "$bundle_id" =~ $APPLE_UNINSTALLABLE_REGEX ]]; then + return 1 # Can be uninstalled + fi # Then check system-critical components - for pattern in "${SYSTEM_CRITICAL_BUNDLES[@]}"; do - if bundle_matches_pattern "$bundle_id" "$pattern"; then - return 0 # Protected - fi - done + if [[ "$bundle_id" =~ $SYSTEM_CRITICAL_REGEX ]]; then + return 0 # Protected + fi return 1 } @@ -570,12 +599,17 @@ should_protect_from_uninstall() { # Check if application data should be protected during cleanup should_protect_data() { local bundle_id="$1" - # Protect both system critical and data protected bundles during cleanup - for pattern in "${SYSTEM_CRITICAL_BUNDLES[@]}" "${DATA_PROTECTED_BUNDLES[@]}"; do - if bundle_matches_pattern "$bundle_id" "$pattern"; then - return 0 - fi - done + + # Check system critical + if [[ "$bundle_id" =~ $SYSTEM_CRITICAL_REGEX ]]; then + return 0 + fi + + # Check data protected + if [[ "$bundle_id" =~ $DATA_PROTECTED_REGEX ]]; then + return 0 + fi + return 1 }