1
0
mirror of https://github.com/tw93/Mole.git synced 2026-02-04 11:31:46 +00:00

improve analyze scanning performance, UI responsiveness, symlink navigation

This commit is contained in:
Tw93
2025-12-12 15:01:17 +08:00
parent dbdc9cdb45
commit 749ed2230b
9 changed files with 103 additions and 28 deletions

Binary file not shown.

Binary file not shown.

View File

@@ -10,17 +10,17 @@ const (
defaultViewport = 12 // Default viewport when terminal height is unknown
overviewCacheTTL = 7 * 24 * time.Hour // 7 days
overviewCacheFile = "overview_sizes.json"
duTimeout = 60 * time.Second // Increased for large directories
duTimeout = 30 * time.Second // Fail faster to fallback to concurrent scan
mdlsTimeout = 5 * time.Second
maxConcurrentOverview = 3 // Scan up to 3 overview dirs concurrently
maxConcurrentOverview = 8 // Increased parallel overview scans
batchUpdateSize = 100 // Batch atomic updates every N items
cacheModTimeGrace = 30 * time.Minute // Ignore minor directory mtime bumps
// Worker pool configuration
minWorkers = 8 // Minimum workers for better I/O throughput
maxWorkers = 64 // Maximum workers to avoid excessive goroutines
cpuMultiplier = 2 // Worker multiplier per CPU core for I/O-bound operations
maxDirWorkers = 16 // Maximum concurrent subdirectory scans
minWorkers = 16 // Safe baseline for older machines
maxWorkers = 64 // Cap at 64 to avoid OS resource contention
cpuMultiplier = 4 // Balanced CPU usage
maxDirWorkers = 32 // Limit concurrent subdirectory scans
openCommandTimeout = 10 * time.Second // Timeout for open/reveal commands
)

View File

@@ -376,7 +376,7 @@ func (m model) scanCmd(path string) tea.Cmd {
}
func tickCmd() tea.Cmd {
return tea.Tick(time.Millisecond*120, func(t time.Time) tea.Msg {
return tea.Tick(time.Millisecond*80, func(t time.Time) tea.Msg {
return tickMsg(t)
})
}
@@ -429,7 +429,14 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
m.status = fmt.Sprintf("Scan failed: %v", msg.err)
return m, nil
}
m.entries = msg.result.Entries
// Filter out 0-byte items for cleaner view
filteredEntries := make([]dirEntry, 0, len(msg.result.Entries))
for _, e := range msg.result.Entries {
if e.Size > 0 {
filteredEntries = append(filteredEntries, e)
}
}
m.entries = filteredEntries
m.largeFiles = msg.result.LargeFiles
m.totalSize = msg.result.TotalSize
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
@@ -639,7 +646,24 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
m.scanning = false
return m, nil
case "r":
// Invalidate cache before rescanning to ensure fresh data
if m.inOverviewMode() {
// In overview mode, clear cache and re-scan known entries
m.overviewSizeCache = make(map[string]int64)
m.overviewScanningSet = make(map[string]bool)
m.hydrateOverviewEntries() // Reset sizes to pending
// Reset all entries to pending state for visual feedback
for i := range m.entries {
m.entries[i].Size = -1
}
m.totalSize = 0
m.status = "Refreshing..."
m.overviewScanning = true
return m, tea.Batch(m.scheduleOverviewScans(), tickCmd())
}
// Normal mode: Invalidate cache before rescanning
invalidateCache(m.path)
m.status = "Refreshing..."
m.scanning = true

View File

@@ -97,7 +97,17 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
// Skip symlinks to avoid following them into unexpected locations
// Use Type() instead of IsDir() to check without following symlinks
if child.Type()&fs.ModeSymlink != 0 {
// For symlinks, get their target info but mark them specially
// For symlinks, check if they point to a directory
targetInfo, err := os.Stat(fullPath)
isDir := false
if err == nil && targetInfo.IsDir() {
isDir = true
}
// Get symlink size (we don't effectively count the target size towards parent to avoid double counting,
// or we just count the link size itself. Existing logic counts 'size' via getActualFileSize on the link info).
// Ideally we just want navigation.
// Re-fetching info for link itself if needed, but child.Info() does that.
info, err := child.Info()
if err != nil {
continue
@@ -109,7 +119,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
Name: child.Name() + " →", // Add arrow to indicate symlink
Path: fullPath,
Size: size,
IsDir: false, // Don't allow navigation into symlinks
IsDir: isDir, // Allow navigation if target is directory
LastAccess: getLastAccessTimeFromInfo(info),
}
continue
@@ -287,7 +297,7 @@ func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *
default:
}
if currentPath != nil {
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
*currentPath = dirPath
}
@@ -496,8 +506,9 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
}
// Update current path
if currentPath != nil {
// Update current path occasionally to prevent UI jitter
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
*currentPath = fullPath
}
}

View File

@@ -323,18 +323,18 @@ func (m model) View() string {
if m.inOverviewMode() {
// Show ← Back if there's history (entered from a parent directory)
if len(m.history) > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ← Back | Q Quit%s\n", colorGray, colorReset)
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ← Back | Q Quit%s\n", colorGray, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓→ | Enter | R Refresh | O Open | F File | Q Quit%s\n", colorGray, colorReset)
fmt.Fprintf(&b, "%s↑↓→ | Enter | R Refresh | O Open | F File | Q Quit%s\n", colorGray, colorReset)
}
} else if m.showLargeFiles {
fmt.Fprintf(&b, "%s↑↓← | R Refresh | O Open | F File | ⌫ Del | ← Back | Q Quit%s\n", colorGray, colorReset)
fmt.Fprintf(&b, "%s↑↓← | R Refresh | O Open | F File | ⌫ Del | ← Back | Q Quit%s\n", colorGray, colorReset)
} else {
largeFileCount := len(m.largeFiles)
if largeFileCount > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ⌫ Del | T Top(%d) | Q Quit%s\n", colorGray, largeFileCount, colorReset)
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ⌫ Del | T Top(%d) | Q Quit%s\n", colorGray, largeFileCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ⌫ Del | Q Quit%s\n", colorGray, colorReset)
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ⌫ Del | Q Quit%s\n", colorGray, colorReset)
}
}
if m.deleteConfirm && m.deleteTarget != nil {

View File

@@ -130,7 +130,7 @@ EOF
items+=('mail_downloads|Mail Downloads|Clear old mail attachments (> 30 days)|true')
items+=('swap_cleanup|Swap Refresh|Reset swap files and dynamic pager|true')
items+=('spotlight_cache_cleanup|Spotlight Cache|Clear user-level Spotlight indexes|true')
items+=('developer_cleanup|Developer Cleanup|Clear Xcode DerivedData & DeviceSupport|false')
items+=('developer_cleanup|Developer Cleanup|Clear Xcode DerivedData & DeviceSupport|true')
# Output items as JSON
local first=true

View File

@@ -510,10 +510,10 @@ paginated_multi_select() {
"${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN}${NC}"
"${GRAY}Space Select${NC}"
"${GRAY}Enter${NC}"
"${GRAY}F Refresh${NC}"
"${GRAY}R Refresh${NC}"
"${GRAY}${filter_text}${NC}"
"${GRAY}S ${sort_status}${NC}"
"${GRAY}R ${reverse_arrow}${NC}"
"${GRAY}O ${reverse_arrow}${NC}"
"${GRAY}Q Exit${NC}"
)
_print_wrapped_controls "$sep" "${_segs_all[@]}"
@@ -641,20 +641,60 @@ paginated_multi_select() {
rebuild_view
fi
;;
"CHAR:j")
if [[ "$filter_mode" != "true" ]]; then
# Down navigation
if [[ ${#view_indices[@]} -gt 0 ]]; then
local absolute_index=$((top_index + cursor_pos))
local last_index=$((${#view_indices[@]} - 1))
if [[ $absolute_index -lt $last_index ]]; then
local visible_count=$((${#view_indices[@]} - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
((cursor_pos++))
elif [[ $((top_index + visible_count)) -lt ${#view_indices[@]} ]]; then
((top_index++))
fi
fi
fi
else
filter_query+="j"
fi
;;
"CHAR:k")
if [[ "$filter_mode" != "true" ]]; then
# Up navigation
if [[ ${#view_indices[@]} -gt 0 ]]; then
if [[ $cursor_pos -gt 0 ]]; then
((cursor_pos--))
elif [[ $top_index -gt 0 ]]; then
((top_index--))
fi
fi
else
filter_query+="k"
fi
;;
"CHAR:f" | "CHAR:F")
if [[ "$filter_mode" == "true" ]]; then
filter_query+="${key#CHAR:}"
fi
# F is currently unbound in normal mode to avoid conflict with Refresh (R)
;;
"CHAR:r" | "CHAR:R")
if [[ "$filter_mode" == "true" ]]; then
filter_query+="${key#CHAR:}"
else
# Trigger Refresh signal
# Trigger Refresh signal (Unified with Analyze)
cleanup
return 10
fi
;;
"CHAR:r")
# lower-case r: behave like reverse when NOT in filter mode
"CHAR:o" | "CHAR:O")
if [[ "$filter_mode" == "true" ]]; then
filter_query+="r"
else
filter_query+="${key#CHAR:}"
elif [[ "$has_metadata" == "true" ]]; then
# O toggles reverse order (Unified Sort Order)
if [[ "$sort_reverse" == "true" ]]; then
sort_reverse="false"
else

View File

@@ -212,7 +212,7 @@ paginated_multi_select() {
# Clear any remaining lines at bottom
printf "${clear_line}\n" >&2
printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | Q Exit${NC}\n" >&2
printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | Q Exit${NC}\n" >&2
# Clear one more line to ensure no artifacts
printf "${clear_line}" >&2