1
0
mirror of https://github.com/tw93/Mole.git synced 2026-02-10 21:49:16 +00:00

chore: restructure windows branch (move windows/ content to root, remove macos files)

This commit is contained in:
Tw93
2026-01-10 13:23:29 +08:00
parent e84a457c2f
commit edf5ed09a9
140 changed files with 1472 additions and 34059 deletions

BIN
cmd/analyze/analyze.exe Normal file

Binary file not shown.

View File

@@ -1,360 +0,0 @@
package main
import (
"encoding/gob"
"os"
"path/filepath"
"strings"
"sync/atomic"
"testing"
"time"
)
func resetOverviewSnapshotForTest() {
overviewSnapshotMu.Lock()
overviewSnapshotCache = nil
overviewSnapshotLoaded = false
overviewSnapshotMu.Unlock()
}
func TestScanPathConcurrentBasic(t *testing.T) {
root := t.TempDir()
rootFile := filepath.Join(root, "root.txt")
if err := os.WriteFile(rootFile, []byte("root-data"), 0o644); err != nil {
t.Fatalf("write root file: %v", err)
}
nested := filepath.Join(root, "nested")
if err := os.MkdirAll(nested, 0o755); err != nil {
t.Fatalf("create nested dir: %v", err)
}
fileOne := filepath.Join(nested, "a.bin")
if err := os.WriteFile(fileOne, []byte("alpha"), 0o644); err != nil {
t.Fatalf("write file one: %v", err)
}
fileTwo := filepath.Join(nested, "b.bin")
if err := os.WriteFile(fileTwo, []byte(strings.Repeat("b", 32)), 0o644); err != nil {
t.Fatalf("write file two: %v", err)
}
linkPath := filepath.Join(root, "link-to-a")
if err := os.Symlink(fileOne, linkPath); err != nil {
t.Fatalf("create symlink: %v", err)
}
var filesScanned, dirsScanned, bytesScanned int64
current := ""
result, err := scanPathConcurrent(root, &filesScanned, &dirsScanned, &bytesScanned, &current)
if err != nil {
t.Fatalf("scanPathConcurrent returned error: %v", err)
}
linkInfo, err := os.Lstat(linkPath)
if err != nil {
t.Fatalf("stat symlink: %v", err)
}
expectedDirSize := int64(len("alpha") + len(strings.Repeat("b", 32)))
expectedRootFileSize := int64(len("root-data"))
expectedLinkSize := getActualFileSize(linkPath, linkInfo)
expectedTotal := expectedDirSize + expectedRootFileSize + expectedLinkSize
if result.TotalSize != expectedTotal {
t.Fatalf("expected total size %d, got %d", expectedTotal, result.TotalSize)
}
if got := atomic.LoadInt64(&filesScanned); got != 3 {
t.Fatalf("expected 3 files scanned, got %d", got)
}
if dirs := atomic.LoadInt64(&dirsScanned); dirs == 0 {
t.Fatalf("expected directory scan count to increase")
}
if bytes := atomic.LoadInt64(&bytesScanned); bytes == 0 {
t.Fatalf("expected byte counter to increase")
}
foundSymlink := false
for _, entry := range result.Entries {
if strings.HasSuffix(entry.Name, " →") {
foundSymlink = true
if entry.IsDir {
t.Fatalf("symlink entry should not be marked as directory")
}
}
}
if !foundSymlink {
t.Fatalf("expected symlink entry to be present in scan result")
}
}
func TestDeletePathWithProgress(t *testing.T) {
// Skip in CI environments where Finder may not be available.
if os.Getenv("CI") != "" {
t.Skip("Skipping Finder-dependent test in CI")
}
parent := t.TempDir()
target := filepath.Join(parent, "target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
files := []string{
filepath.Join(target, "one.txt"),
filepath.Join(target, "two.txt"),
}
for _, f := range files {
if err := os.WriteFile(f, []byte("content"), 0o644); err != nil {
t.Fatalf("write %s: %v", f, err)
}
}
var counter int64
count, err := trashPathWithProgress(target, &counter)
if err != nil {
t.Fatalf("trashPathWithProgress returned error: %v", err)
}
if count != int64(len(files)) {
t.Fatalf("expected %d files trashed, got %d", len(files), count)
}
if _, err := os.Stat(target); !os.IsNotExist(err) {
t.Fatalf("expected target to be moved to Trash, stat err=%v", err)
}
}
func TestOverviewStoreAndLoad(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
resetOverviewSnapshotForTest()
t.Cleanup(resetOverviewSnapshotForTest)
path := filepath.Join(home, "project")
want := int64(123456)
if err := storeOverviewSize(path, want); err != nil {
t.Fatalf("storeOverviewSize: %v", err)
}
got, err := loadStoredOverviewSize(path)
if err != nil {
t.Fatalf("loadStoredOverviewSize: %v", err)
}
if got != want {
t.Fatalf("snapshot mismatch: want %d, got %d", want, got)
}
// Reload from disk and ensure value persists.
resetOverviewSnapshotForTest()
got, err = loadStoredOverviewSize(path)
if err != nil {
t.Fatalf("loadStoredOverviewSize after reset: %v", err)
}
if got != want {
t.Fatalf("snapshot mismatch after reset: want %d, got %d", want, got)
}
}
func TestCacheSaveLoadRoundTrip(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
target := filepath.Join(home, "cache-target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target dir: %v", err)
}
result := scanResult{
Entries: []dirEntry{
{Name: "alpha", Path: filepath.Join(target, "alpha"), Size: 10, IsDir: true},
},
LargeFiles: []fileEntry{
{Name: "big.bin", Path: filepath.Join(target, "big.bin"), Size: 2048},
},
TotalSize: 42,
}
if err := saveCacheToDisk(target, result); err != nil {
t.Fatalf("saveCacheToDisk: %v", err)
}
cache, err := loadCacheFromDisk(target)
if err != nil {
t.Fatalf("loadCacheFromDisk: %v", err)
}
if cache.TotalSize != result.TotalSize {
t.Fatalf("total size mismatch: want %d, got %d", result.TotalSize, cache.TotalSize)
}
if len(cache.Entries) != len(result.Entries) {
t.Fatalf("entry count mismatch: want %d, got %d", len(result.Entries), len(cache.Entries))
}
if len(cache.LargeFiles) != len(result.LargeFiles) {
t.Fatalf("large file count mismatch: want %d, got %d", len(result.LargeFiles), len(cache.LargeFiles))
}
}
func TestMeasureOverviewSize(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
resetOverviewSnapshotForTest()
t.Cleanup(resetOverviewSnapshotForTest)
target := filepath.Join(home, "measure")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
content := []byte(strings.Repeat("x", 2048))
if err := os.WriteFile(filepath.Join(target, "data.bin"), content, 0o644); err != nil {
t.Fatalf("write file: %v", err)
}
size, err := measureOverviewSize(target)
if err != nil {
t.Fatalf("measureOverviewSize: %v", err)
}
if size <= 0 {
t.Fatalf("expected positive size, got %d", size)
}
// Ensure snapshot stored.
cached, err := loadStoredOverviewSize(target)
if err != nil {
t.Fatalf("loadStoredOverviewSize: %v", err)
}
if cached != size {
t.Fatalf("snapshot mismatch: want %d, got %d", size, cached)
}
}
func TestIsCleanableDir(t *testing.T) {
if !isCleanableDir("/Users/test/project/node_modules") {
t.Fatalf("expected node_modules to be cleanable")
}
if isCleanableDir("/Users/test/Library/Caches/AppCache") {
t.Fatalf("Library caches should be handled by mo clean")
}
if isCleanableDir("") {
t.Fatalf("empty path should not be cleanable")
}
}
func TestHasUsefulVolumeMounts(t *testing.T) {
root := t.TempDir()
if hasUsefulVolumeMounts(root) {
t.Fatalf("empty directory should not report useful mounts")
}
hidden := filepath.Join(root, ".hidden")
if err := os.Mkdir(hidden, 0o755); err != nil {
t.Fatalf("create hidden dir: %v", err)
}
if hasUsefulVolumeMounts(root) {
t.Fatalf("hidden entries should not count as useful mounts")
}
mount := filepath.Join(root, "ExternalDrive")
if err := os.Mkdir(mount, 0o755); err != nil {
t.Fatalf("create mount dir: %v", err)
}
if !hasUsefulVolumeMounts(root) {
t.Fatalf("expected useful mount when real directory exists")
}
}
func TestLoadCacheExpiresWhenDirectoryChanges(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
target := filepath.Join(home, "change-target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
result := scanResult{TotalSize: 5}
if err := saveCacheToDisk(target, result); err != nil {
t.Fatalf("saveCacheToDisk: %v", err)
}
// Advance mtime beyond grace period.
time.Sleep(time.Millisecond * 10)
if err := os.Chtimes(target, time.Now(), time.Now()); err != nil {
t.Fatalf("chtimes: %v", err)
}
// Simulate older cache entry to exceed grace window.
cachePath, err := getCachePath(target)
if err != nil {
t.Fatalf("getCachePath: %v", err)
}
if _, err := os.Stat(cachePath); err != nil {
t.Fatalf("stat cache: %v", err)
}
oldTime := time.Now().Add(-cacheModTimeGrace - time.Minute)
if err := os.Chtimes(cachePath, oldTime, oldTime); err != nil {
t.Fatalf("chtimes cache: %v", err)
}
file, err := os.Open(cachePath)
if err != nil {
t.Fatalf("open cache: %v", err)
}
var entry cacheEntry
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
t.Fatalf("decode cache: %v", err)
}
_ = file.Close()
entry.ScanTime = time.Now().Add(-8 * 24 * time.Hour)
tmp := cachePath + ".tmp"
f, err := os.Create(tmp)
if err != nil {
t.Fatalf("create tmp cache: %v", err)
}
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
t.Fatalf("encode tmp cache: %v", err)
}
_ = f.Close()
if err := os.Rename(tmp, cachePath); err != nil {
t.Fatalf("rename tmp cache: %v", err)
}
if _, err := loadCacheFromDisk(target); err == nil {
t.Fatalf("expected cache load to fail after stale scan time")
}
}
func TestScanPathPermissionError(t *testing.T) {
root := t.TempDir()
lockedDir := filepath.Join(root, "locked")
if err := os.Mkdir(lockedDir, 0o755); err != nil {
t.Fatalf("create locked dir: %v", err)
}
// Create a file before locking.
if err := os.WriteFile(filepath.Join(lockedDir, "secret.txt"), []byte("shh"), 0o644); err != nil {
t.Fatalf("write secret: %v", err)
}
// Remove permissions.
if err := os.Chmod(lockedDir, 0o000); err != nil {
t.Fatalf("chmod 000: %v", err)
}
defer func() {
// Restore permissions for cleanup.
_ = os.Chmod(lockedDir, 0o755)
}()
var files, dirs, bytes int64
current := ""
// Scanning the locked dir itself should fail.
_, err := scanPathConcurrent(lockedDir, &files, &dirs, &bytes, &current)
if err == nil {
t.Fatalf("expected error scanning locked directory, got nil")
}
if !os.IsPermission(err) {
t.Logf("unexpected error type: %v", err)
}
}

View File

@@ -1,346 +0,0 @@
package main
import (
"context"
"encoding/gob"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/cespare/xxhash/v2"
)
type overviewSizeSnapshot struct {
Size int64 `json:"size"`
Updated time.Time `json:"updated"`
}
var (
overviewSnapshotMu sync.Mutex
overviewSnapshotCache map[string]overviewSizeSnapshot
overviewSnapshotLoaded bool
)
func snapshotFromModel(m model) historyEntry {
return historyEntry{
Path: m.path,
Entries: cloneDirEntries(m.entries),
LargeFiles: cloneFileEntries(m.largeFiles),
TotalSize: m.totalSize,
TotalFiles: m.totalFiles,
Selected: m.selected,
EntryOffset: m.offset,
LargeSelected: m.largeSelected,
LargeOffset: m.largeOffset,
IsOverview: m.isOverview,
}
}
func cacheSnapshot(m model) historyEntry {
entry := snapshotFromModel(m)
entry.Dirty = false
return entry
}
func cloneDirEntries(entries []dirEntry) []dirEntry {
if len(entries) == 0 {
return nil
}
copied := make([]dirEntry, len(entries))
copy(copied, entries) //nolint:all
return copied
}
func cloneFileEntries(files []fileEntry) []fileEntry {
if len(files) == 0 {
return nil
}
copied := make([]fileEntry, len(files))
copy(copied, files) //nolint:all
return copied
}
func ensureOverviewSnapshotCacheLocked() error {
if overviewSnapshotLoaded {
return nil
}
storePath, err := getOverviewSizeStorePath()
if err != nil {
return err
}
data, err := os.ReadFile(storePath)
if err != nil {
if os.IsNotExist(err) {
overviewSnapshotCache = make(map[string]overviewSizeSnapshot)
overviewSnapshotLoaded = true
return nil
}
return err
}
if len(data) == 0 {
overviewSnapshotCache = make(map[string]overviewSizeSnapshot)
overviewSnapshotLoaded = true
return nil
}
var snapshots map[string]overviewSizeSnapshot
if err := json.Unmarshal(data, &snapshots); err != nil || snapshots == nil {
backupPath := storePath + ".corrupt"
_ = os.Rename(storePath, backupPath)
overviewSnapshotCache = make(map[string]overviewSizeSnapshot)
overviewSnapshotLoaded = true
return nil
}
overviewSnapshotCache = snapshots
overviewSnapshotLoaded = true
return nil
}
func getOverviewSizeStorePath() (string, error) {
cacheDir, err := getCacheDir()
if err != nil {
return "", err
}
return filepath.Join(cacheDir, overviewCacheFile), nil
}
func loadStoredOverviewSize(path string) (int64, error) {
if path == "" {
return 0, fmt.Errorf("empty path")
}
overviewSnapshotMu.Lock()
defer overviewSnapshotMu.Unlock()
if err := ensureOverviewSnapshotCacheLocked(); err != nil {
return 0, err
}
if overviewSnapshotCache == nil {
return 0, fmt.Errorf("snapshot cache unavailable")
}
if snapshot, ok := overviewSnapshotCache[path]; ok && snapshot.Size > 0 {
if time.Since(snapshot.Updated) < overviewCacheTTL {
return snapshot.Size, nil
}
return 0, fmt.Errorf("snapshot expired")
}
return 0, fmt.Errorf("snapshot not found")
}
func storeOverviewSize(path string, size int64) error {
if path == "" || size <= 0 {
return fmt.Errorf("invalid overview size")
}
overviewSnapshotMu.Lock()
defer overviewSnapshotMu.Unlock()
if err := ensureOverviewSnapshotCacheLocked(); err != nil {
return err
}
if overviewSnapshotCache == nil {
overviewSnapshotCache = make(map[string]overviewSizeSnapshot)
}
overviewSnapshotCache[path] = overviewSizeSnapshot{
Size: size,
Updated: time.Now(),
}
return persistOverviewSnapshotLocked()
}
func persistOverviewSnapshotLocked() error {
storePath, err := getOverviewSizeStorePath()
if err != nil {
return err
}
tmpPath := storePath + ".tmp"
data, err := json.MarshalIndent(overviewSnapshotCache, "", " ")
if err != nil {
return err
}
if err := os.WriteFile(tmpPath, data, 0644); err != nil {
return err
}
return os.Rename(tmpPath, storePath)
}
func loadOverviewCachedSize(path string) (int64, error) {
if path == "" {
return 0, fmt.Errorf("empty path")
}
if snapshot, err := loadStoredOverviewSize(path); err == nil {
return snapshot, nil
}
cacheEntry, err := loadCacheFromDisk(path)
if err != nil {
return 0, err
}
_ = storeOverviewSize(path, cacheEntry.TotalSize)
return cacheEntry.TotalSize, nil
}
func getCacheDir() (string, error) {
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
cacheDir := filepath.Join(home, ".cache", "mole")
if err := os.MkdirAll(cacheDir, 0755); err != nil {
return "", err
}
return cacheDir, nil
}
func getCachePath(path string) (string, error) {
cacheDir, err := getCacheDir()
if err != nil {
return "", err
}
hash := xxhash.Sum64String(path)
filename := fmt.Sprintf("%x.cache", hash)
return filepath.Join(cacheDir, filename), nil
}
func loadCacheFromDisk(path string) (*cacheEntry, error) {
cachePath, err := getCachePath(path)
if err != nil {
return nil, err
}
file, err := os.Open(cachePath)
if err != nil {
return nil, err
}
defer file.Close() //nolint:errcheck
var entry cacheEntry
decoder := gob.NewDecoder(file)
if err := decoder.Decode(&entry); err != nil {
return nil, err
}
info, err := os.Stat(path)
if err != nil {
return nil, err
}
if info.ModTime().After(entry.ModTime) {
// Allow grace window.
if cacheModTimeGrace <= 0 || info.ModTime().Sub(entry.ModTime) > cacheModTimeGrace {
return nil, fmt.Errorf("cache expired: directory modified")
}
}
if time.Since(entry.ScanTime) > 7*24*time.Hour {
return nil, fmt.Errorf("cache expired: too old")
}
return &entry, nil
}
func saveCacheToDisk(path string, result scanResult) error {
cachePath, err := getCachePath(path)
if err != nil {
return err
}
info, err := os.Stat(path)
if err != nil {
return err
}
entry := cacheEntry{
Entries: result.Entries,
LargeFiles: result.LargeFiles,
TotalSize: result.TotalSize,
TotalFiles: result.TotalFiles,
ModTime: info.ModTime(),
ScanTime: time.Now(),
}
file, err := os.Create(cachePath)
if err != nil {
return err
}
defer file.Close() //nolint:errcheck
encoder := gob.NewEncoder(file)
return encoder.Encode(entry)
}
// peekCacheTotalFiles attempts to read the total file count from cache,
// ignoring expiration. Used for initial scan progress estimates.
func peekCacheTotalFiles(path string) (int64, error) {
cachePath, err := getCachePath(path)
if err != nil {
return 0, err
}
file, err := os.Open(cachePath)
if err != nil {
return 0, err
}
defer file.Close() //nolint:errcheck
var entry cacheEntry
decoder := gob.NewDecoder(file)
if err := decoder.Decode(&entry); err != nil {
return 0, err
}
return entry.TotalFiles, nil
}
func invalidateCache(path string) {
cachePath, err := getCachePath(path)
if err == nil {
_ = os.Remove(cachePath)
}
removeOverviewSnapshot(path)
}
func removeOverviewSnapshot(path string) {
if path == "" {
return
}
overviewSnapshotMu.Lock()
defer overviewSnapshotMu.Unlock()
if err := ensureOverviewSnapshotCacheLocked(); err != nil {
return
}
if overviewSnapshotCache == nil {
return
}
if _, ok := overviewSnapshotCache[path]; ok {
delete(overviewSnapshotCache, path)
_ = persistOverviewSnapshotLocked()
}
}
// prefetchOverviewCache warms overview cache in background.
func prefetchOverviewCache(ctx context.Context) {
entries := createOverviewEntries()
var needScan []string
for _, entry := range entries {
if size, err := loadStoredOverviewSize(entry.Path); err == nil && size > 0 {
continue
}
needScan = append(needScan, entry.Path)
}
if len(needScan) == 0 {
return
}
for _, path := range needScan {
select {
case <-ctx.Done():
return
default:
}
size, err := measureOverviewSize(path)
if err == nil && size > 0 {
_ = storeOverviewSize(path, size)
}
}
}

View File

@@ -1,107 +0,0 @@
package main
import (
"path/filepath"
"strings"
)
// isCleanableDir marks paths safe to delete manually (not handled by mo clean).
func isCleanableDir(path string) bool {
if path == "" {
return false
}
// Exclude paths mo clean already handles.
if isHandledByMoClean(path) {
return false
}
baseName := filepath.Base(path)
// Project dependencies and build outputs are safe.
if projectDependencyDirs[baseName] {
return true
}
return false
}
// isHandledByMoClean checks if a path is cleaned by mo clean.
func isHandledByMoClean(path string) bool {
cleanPaths := []string{
"/Library/Caches/",
"/Library/Logs/",
"/Library/Saved Application State/",
"/.Trash/",
"/Library/DiagnosticReports/",
}
for _, p := range cleanPaths {
if strings.Contains(path, p) {
return true
}
}
return false
}
// Project dependency and build directories.
var projectDependencyDirs = map[string]bool{
// JavaScript/Node.
"node_modules": true,
"bower_components": true,
".yarn": true,
".pnpm-store": true,
// Python.
"venv": true,
".venv": true,
"virtualenv": true,
"__pycache__": true,
".pytest_cache": true,
".mypy_cache": true,
".ruff_cache": true,
".tox": true,
".eggs": true,
"htmlcov": true,
".ipynb_checkpoints": true,
// Ruby.
"vendor": true,
".bundle": true,
// Java/Kotlin/Scala.
".gradle": true,
"out": true,
// Build outputs.
"build": true,
"dist": true,
"target": true,
".next": true,
".nuxt": true,
".output": true,
".parcel-cache": true,
".turbo": true,
".vite": true,
".nx": true,
"coverage": true,
".coverage": true,
".nyc_output": true,
// Frontend framework outputs.
".angular": true,
".svelte-kit": true,
".astro": true,
".docusaurus": true,
// Apple dev.
"DerivedData": true,
"Pods": true,
".build": true,
"Carthage": true,
".dart_tool": true,
// Other tools.
".terraform": true,
}

View File

@@ -1,248 +0,0 @@
package main
import "time"
const (
maxEntries = 30
maxLargeFiles = 30
barWidth = 24
minLargeFileSize = 100 << 20
defaultViewport = 12
overviewCacheTTL = 7 * 24 * time.Hour
overviewCacheFile = "overview_sizes.json"
duTimeout = 30 * time.Second
mdlsTimeout = 5 * time.Second
maxConcurrentOverview = 8
batchUpdateSize = 100
cacheModTimeGrace = 30 * time.Minute
// Worker pool limits.
minWorkers = 16
maxWorkers = 64
cpuMultiplier = 4
maxDirWorkers = 32
openCommandTimeout = 10 * time.Second
)
var foldDirs = map[string]bool{
// VCS.
".git": true,
".svn": true,
".hg": true,
// JavaScript/Node.
"node_modules": true,
".npm": true,
"_npx": true,
"_cacache": true,
"_logs": true,
"_locks": true,
"_quick": true,
"_libvips": true,
"_prebuilds": true,
"_update-notifier-last-checked": true,
".yarn": true,
".pnpm-store": true,
".next": true,
".nuxt": true,
"bower_components": true,
".vite": true,
".turbo": true,
".parcel-cache": true,
".nx": true,
".rush": true,
"tnpm": true,
".tnpm": true,
".bun": true,
".deno": true,
// Python.
"__pycache__": true,
".pytest_cache": true,
".mypy_cache": true,
".ruff_cache": true,
"venv": true,
".venv": true,
"virtualenv": true,
".tox": true,
"site-packages": true,
".eggs": true,
"*.egg-info": true,
".pyenv": true,
".poetry": true,
".pip": true,
".pipx": true,
// Ruby/Go/PHP (vendor), Java/Kotlin/Scala/Rust (target).
"vendor": true,
".bundle": true,
"gems": true,
".rbenv": true,
"target": true,
".gradle": true,
".m2": true,
".ivy2": true,
"out": true,
"pkg": true,
"composer.phar": true,
".composer": true,
".cargo": true,
// Build outputs.
"build": true,
"dist": true,
".output": true,
"coverage": true,
".coverage": true,
// IDE.
".idea": true,
".vscode": true,
".vs": true,
".fleet": true,
// Cache directories.
".cache": true,
"__MACOSX": true,
".DS_Store": true,
".Trash": true,
"Caches": true,
".Spotlight-V100": true,
".fseventsd": true,
".DocumentRevisions-V100": true,
".TemporaryItems": true,
"$RECYCLE.BIN": true,
".temp": true,
".tmp": true,
"_temp": true,
"_tmp": true,
".Homebrew": true,
".rustup": true,
".sdkman": true,
".nvm": true,
// macOS.
"Application Scripts": true,
"Saved Application State": true,
// iCloud.
"Mobile Documents": true,
// Containers.
".docker": true,
".containerd": true,
// Mobile development.
"Pods": true,
"DerivedData": true,
".build": true,
"xcuserdata": true,
"Carthage": true,
".dart_tool": true,
// Web frameworks.
".angular": true,
".svelte-kit": true,
".astro": true,
".solid": true,
// Databases.
".mysql": true,
".postgres": true,
"mongodb": true,
// Other.
".terraform": true,
".vagrant": true,
"tmp": true,
"temp": true,
}
var skipSystemDirs = map[string]bool{
"dev": true,
"tmp": true,
"private": true,
"cores": true,
"net": true,
"home": true,
"System": true,
"sbin": true,
"bin": true,
"etc": true,
"var": true,
"opt": false,
"usr": false,
"Volumes": true,
"Network": true,
".vol": true,
".Spotlight-V100": true,
".fseventsd": true,
".DocumentRevisions-V100": true,
".TemporaryItems": true,
".MobileBackups": true,
}
var defaultSkipDirs = map[string]bool{
"nfs": true,
"PHD": true,
"Permissions": true,
}
var skipExtensions = map[string]bool{
".go": true,
".js": true,
".ts": true,
".tsx": true,
".jsx": true,
".json": true,
".md": true,
".txt": true,
".yml": true,
".yaml": true,
".xml": true,
".html": true,
".css": true,
".scss": true,
".sass": true,
".less": true,
".py": true,
".rb": true,
".java": true,
".kt": true,
".rs": true,
".swift": true,
".m": true,
".mm": true,
".c": true,
".cpp": true,
".h": true,
".hpp": true,
".cs": true,
".sql": true,
".db": true,
".lock": true,
".gradle": true,
".mjs": true,
".cjs": true,
".coffee": true,
".dart": true,
".svelte": true,
".vue": true,
".nim": true,
".hx": true,
}
var spinnerFrames = []string{"|", "/", "-", "\\", "|", "/", "-", "\\"}
const (
colorPurple = "\033[0;35m"
colorPurpleBold = "\033[1;35m"
colorGray = "\033[0;90m"
colorRed = "\033[0;31m"
colorYellow = "\033[0;33m"
colorGreen = "\033[0;32m"
colorBlue = "\033[0;34m"
colorCyan = "\033[0;36m"
colorReset = "\033[0m"
colorBold = "\033[1m"
)

View File

@@ -1,146 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"sync/atomic"
"time"
tea "github.com/charmbracelet/bubbletea"
)
const trashTimeout = 30 * time.Second
func deletePathCmd(path string, counter *int64) tea.Cmd {
return func() tea.Msg {
count, err := trashPathWithProgress(path, counter)
return deleteProgressMsg{
done: true,
err: err,
count: count,
path: path,
}
}
}
// deleteMultiplePathsCmd moves paths to Trash and aggregates results.
func deleteMultiplePathsCmd(paths []string, counter *int64) tea.Cmd {
return func() tea.Msg {
var totalCount int64
var errors []string
// Process deeper paths first to avoid parent/child conflicts.
pathsToDelete := append([]string(nil), paths...)
sort.Slice(pathsToDelete, func(i, j int) bool {
return strings.Count(pathsToDelete[i], string(filepath.Separator)) > strings.Count(pathsToDelete[j], string(filepath.Separator))
})
for _, path := range pathsToDelete {
count, err := trashPathWithProgress(path, counter)
totalCount += count
if err != nil {
if os.IsNotExist(err) {
continue
}
errors = append(errors, err.Error())
}
}
var resultErr error
if len(errors) > 0 {
resultErr = &multiDeleteError{errors: errors}
}
return deleteProgressMsg{
done: true,
err: resultErr,
count: totalCount,
path: "",
}
}
}
// multiDeleteError holds multiple deletion errors.
type multiDeleteError struct {
errors []string
}
func (e *multiDeleteError) Error() string {
if len(e.errors) == 1 {
return e.errors[0]
}
return strings.Join(e.errors[:min(3, len(e.errors))], "; ")
}
// trashPathWithProgress moves a path to Trash using Finder.
// This allows users to recover accidentally deleted files.
func trashPathWithProgress(root string, counter *int64) (int64, error) {
// Verify path exists (use Lstat to handle broken symlinks).
info, err := os.Lstat(root)
if err != nil {
return 0, err
}
// Count items for progress reporting.
var count int64
if info.IsDir() {
_ = filepath.WalkDir(root, func(_ string, d os.DirEntry, err error) error {
if err != nil {
return nil
}
if !d.IsDir() {
count++
if counter != nil {
atomic.StoreInt64(counter, count)
}
}
return nil
})
} else {
count = 1
if counter != nil {
atomic.StoreInt64(counter, 1)
}
}
// Move to Trash using Finder AppleScript.
if err := moveToTrash(root); err != nil {
return 0, err
}
return count, nil
}
// moveToTrash uses macOS Finder to move a file/directory to Trash.
// This is the safest method as it uses the system's native trash mechanism.
func moveToTrash(path string) error {
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("failed to resolve path: %w", err)
}
// Escape path for AppleScript (handle quotes and backslashes).
escapedPath := strings.ReplaceAll(absPath, "\\", "\\\\")
escapedPath = strings.ReplaceAll(escapedPath, "\"", "\\\"")
script := fmt.Sprintf(`tell application "Finder" to delete POSIX file "%s"`, escapedPath)
ctx, cancel := context.WithTimeout(context.Background(), trashTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, "osascript", "-e", script)
output, err := cmd.CombinedOutput()
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return fmt.Errorf("timeout moving to Trash")
}
return fmt.Errorf("failed to move to Trash: %s", strings.TrimSpace(string(output)))
}
return nil
}

View File

@@ -1,87 +0,0 @@
package main
import (
"os"
"path/filepath"
"testing"
)
func TestTrashPathWithProgress(t *testing.T) {
// Skip in CI environments where Finder may not be available.
if os.Getenv("CI") != "" {
t.Skip("Skipping Finder-dependent test in CI")
}
parent := t.TempDir()
target := filepath.Join(parent, "target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
files := []string{
filepath.Join(target, "one.txt"),
filepath.Join(target, "two.txt"),
}
for _, f := range files {
if err := os.WriteFile(f, []byte("content"), 0o644); err != nil {
t.Fatalf("write %s: %v", f, err)
}
}
var counter int64
count, err := trashPathWithProgress(target, &counter)
if err != nil {
t.Fatalf("trashPathWithProgress returned error: %v", err)
}
if count != int64(len(files)) {
t.Fatalf("expected %d files trashed, got %d", len(files), count)
}
if _, err := os.Stat(target); !os.IsNotExist(err) {
t.Fatalf("expected target to be moved to Trash, stat err=%v", err)
}
}
func TestDeleteMultiplePathsCmdHandlesParentChild(t *testing.T) {
// Skip in CI environments where Finder may not be available.
if os.Getenv("CI") != "" {
t.Skip("Skipping Finder-dependent test in CI")
}
base := t.TempDir()
parent := filepath.Join(base, "parent")
child := filepath.Join(parent, "child")
// Structure: parent/fileA, parent/child/fileC.
if err := os.MkdirAll(child, 0o755); err != nil {
t.Fatalf("mkdir: %v", err)
}
if err := os.WriteFile(filepath.Join(parent, "fileA"), []byte("a"), 0o644); err != nil {
t.Fatalf("write fileA: %v", err)
}
if err := os.WriteFile(filepath.Join(child, "fileC"), []byte("c"), 0o644); err != nil {
t.Fatalf("write fileC: %v", err)
}
var counter int64
msg := deleteMultiplePathsCmd([]string{parent, child}, &counter)()
progress, ok := msg.(deleteProgressMsg)
if !ok {
t.Fatalf("expected deleteProgressMsg, got %T", msg)
}
if progress.err != nil {
t.Fatalf("unexpected error: %v", progress.err)
}
if progress.count != 2 {
t.Fatalf("expected 2 files trashed, got %d", progress.count)
}
if _, err := os.Stat(parent); !os.IsNotExist(err) {
t.Fatalf("expected parent to be moved to Trash, err=%v", err)
}
}
func TestMoveToTrashNonExistent(t *testing.T) {
err := moveToTrash("/nonexistent/path/that/does/not/exist")
if err == nil {
t.Fatal("expected error for non-existent path")
}
}

View File

@@ -1,247 +0,0 @@
package main
import (
"fmt"
"os"
"strings"
"time"
)
func displayPath(path string) string {
home, err := os.UserHomeDir()
if err != nil || home == "" {
return path
}
if strings.HasPrefix(path, home) {
return strings.Replace(path, home, "~", 1)
}
return path
}
// truncateMiddle trims the middle, keeping head and tail.
func truncateMiddle(s string, maxWidth int) string {
runes := []rune(s)
currentWidth := displayWidth(s)
if currentWidth <= maxWidth {
return s
}
if maxWidth < 10 {
width := 0
for i, r := range runes {
width += runeWidth(r)
if width > maxWidth {
return string(runes[:i])
}
}
return s
}
targetHeadWidth := (maxWidth - 3) / 3
targetTailWidth := maxWidth - 3 - targetHeadWidth
headWidth := 0
headIdx := 0
for i, r := range runes {
w := runeWidth(r)
if headWidth+w > targetHeadWidth {
break
}
headWidth += w
headIdx = i + 1
}
tailWidth := 0
tailIdx := len(runes)
for i := len(runes) - 1; i >= 0; i-- {
w := runeWidth(runes[i])
if tailWidth+w > targetTailWidth {
break
}
tailWidth += w
tailIdx = i
}
return string(runes[:headIdx]) + "..." + string(runes[tailIdx:])
}
func formatNumber(n int64) string {
if n < 1000 {
return fmt.Sprintf("%d", n)
}
if n < 1000000 {
return fmt.Sprintf("%.1fk", float64(n)/1000)
}
return fmt.Sprintf("%.1fM", float64(n)/1000000)
}
func humanizeBytes(size int64) string {
if size < 0 {
return "0 B"
}
const unit = 1024
if size < unit {
return fmt.Sprintf("%d B", size)
}
div, exp := int64(unit), 0
for n := size / unit; n >= unit; n /= unit {
div *= unit
exp++
}
value := float64(size) / float64(div)
return fmt.Sprintf("%.1f %cB", value, "KMGTPE"[exp])
}
func coloredProgressBar(value, maxValue int64, percent float64) string {
if maxValue <= 0 {
return colorGray + strings.Repeat("░", barWidth) + colorReset
}
filled := min(int((value*int64(barWidth))/maxValue), barWidth)
var barColor string
if percent >= 50 {
barColor = colorRed
} else if percent >= 20 {
barColor = colorYellow
} else if percent >= 5 {
barColor = colorBlue
} else {
barColor = colorGreen
}
var bar strings.Builder
bar.WriteString(barColor)
for i := range barWidth {
if i < filled {
if i < filled-1 {
bar.WriteString("█")
} else {
remainder := (value * int64(barWidth)) % maxValue
if remainder > maxValue/2 {
bar.WriteString("█")
} else if remainder > maxValue/4 {
bar.WriteString("▓")
} else {
bar.WriteString("▒")
}
}
} else {
bar.WriteString(colorGray + "░" + barColor)
}
}
return bar.String() + colorReset
}
// runeWidth returns display width for wide characters and emoji.
func runeWidth(r rune) int {
if r >= 0x4E00 && r <= 0x9FFF || // CJK Unified Ideographs
r >= 0x3400 && r <= 0x4DBF || // CJK Extension A
r >= 0x20000 && r <= 0x2A6DF || // CJK Extension B
r >= 0x2A700 && r <= 0x2B73F || // CJK Extension C
r >= 0x2B740 && r <= 0x2B81F || // CJK Extension D
r >= 0x2B820 && r <= 0x2CEAF || // CJK Extension E
r >= 0x3040 && r <= 0x30FF || // Hiragana and Katakana
r >= 0x31F0 && r <= 0x31FF || // Katakana Phonetic Extensions
r >= 0xAC00 && r <= 0xD7AF || // Hangul Syllables
r >= 0xFF00 && r <= 0xFFEF || // Fullwidth Forms
r >= 0x1F300 && r <= 0x1F6FF || // Miscellaneous Symbols and Pictographs (includes Transport)
r >= 0x1F900 && r <= 0x1F9FF || // Supplemental Symbols and Pictographs
r >= 0x2600 && r <= 0x26FF || // Miscellaneous Symbols
r >= 0x2700 && r <= 0x27BF || // Dingbats
r >= 0xFE10 && r <= 0xFE1F || // Vertical Forms
r >= 0x1F000 && r <= 0x1F02F { // Mahjong Tiles
return 2
}
return 1
}
func displayWidth(s string) int {
width := 0
for _, r := range s {
width += runeWidth(r)
}
return width
}
// calculateNameWidth computes name column width from terminal width.
func calculateNameWidth(termWidth int) int {
const fixedWidth = 61
available := termWidth - fixedWidth
if available < 24 {
return 24
}
if available > 60 {
return 60
}
return available
}
func trimNameWithWidth(name string, maxWidth int) string {
const (
ellipsis = "..."
ellipsisWidth = 3
)
runes := []rune(name)
widths := make([]int, len(runes))
for i, r := range runes {
widths[i] = runeWidth(r)
}
currentWidth := 0
for i, w := range widths {
if currentWidth+w > maxWidth {
subWidth := currentWidth
j := i
for j > 0 && subWidth+ellipsisWidth > maxWidth {
j--
subWidth -= widths[j]
}
if j == 0 {
return ellipsis
}
return string(runes[:j]) + ellipsis
}
currentWidth += w
}
return name
}
func padName(name string, targetWidth int) string {
currentWidth := displayWidth(name)
if currentWidth >= targetWidth {
return name
}
return name + strings.Repeat(" ", targetWidth-currentWidth)
}
// formatUnusedTime formats time since last access.
func formatUnusedTime(lastAccess time.Time) string {
if lastAccess.IsZero() {
return ""
}
duration := time.Since(lastAccess)
days := int(duration.Hours() / 24)
if days < 90 {
return ""
}
months := days / 30
years := days / 365
if years >= 2 {
return fmt.Sprintf(">%dyr", years)
} else if years >= 1 {
return ">1yr"
} else if months >= 3 {
return fmt.Sprintf(">%dmo", months)
}
return ""
}

View File

@@ -1,309 +0,0 @@
package main
import (
"strings"
"testing"
)
func TestRuneWidth(t *testing.T) {
tests := []struct {
name string
input rune
want int
}{
{"ASCII letter", 'a', 1},
{"ASCII digit", '5', 1},
{"Chinese character", '中', 2},
{"Japanese hiragana", 'あ', 2},
{"Korean hangul", '한', 2},
{"CJK ideograph", '語', 2},
{"Full-width number", '', 2},
{"ASCII space", ' ', 1},
{"Tab", '\t', 1},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := runeWidth(tt.input); got != tt.want {
t.Errorf("runeWidth(%q) = %d, want %d", tt.input, got, tt.want)
}
})
}
}
func TestDisplayWidth(t *testing.T) {
tests := []struct {
name string
input string
want int
}{
{"Empty string", "", 0},
{"ASCII only", "hello", 5},
{"Chinese only", "你好", 4},
{"Mixed ASCII and CJK", "hello世界", 9}, // 5 + 4
{"Path with CJK", "/Users/张三/文件", 16}, // 7 (ASCII) + 4 (张三) + 4 (文件) + 1 (/) = 16
{"Full-width chars", "", 6},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := displayWidth(tt.input); got != tt.want {
t.Errorf("displayWidth(%q) = %d, want %d", tt.input, got, tt.want)
}
})
}
}
func TestHumanizeBytes(t *testing.T) {
tests := []struct {
input int64
want string
}{
{-100, "0 B"},
{0, "0 B"},
{512, "512 B"},
{1023, "1023 B"},
{1024, "1.0 KB"},
{1536, "1.5 KB"},
{10240, "10.0 KB"},
{1048576, "1.0 MB"},
{1572864, "1.5 MB"},
{1073741824, "1.0 GB"},
{1099511627776, "1.0 TB"},
{1125899906842624, "1.0 PB"},
}
for _, tt := range tests {
got := humanizeBytes(tt.input)
if got != tt.want {
t.Errorf("humanizeBytes(%d) = %q, want %q", tt.input, got, tt.want)
}
}
}
func TestFormatNumber(t *testing.T) {
tests := []struct {
input int64
want string
}{
{0, "0"},
{500, "500"},
{999, "999"},
{1000, "1.0k"},
{1500, "1.5k"},
{999999, "1000.0k"},
{1000000, "1.0M"},
{1500000, "1.5M"},
}
for _, tt := range tests {
got := formatNumber(tt.input)
if got != tt.want {
t.Errorf("formatNumber(%d) = %q, want %q", tt.input, got, tt.want)
}
}
}
func TestTruncateMiddle(t *testing.T) {
tests := []struct {
name string
input string
maxWidth int
check func(t *testing.T, result string)
}{
{
name: "No truncation needed",
input: "short",
maxWidth: 10,
check: func(t *testing.T, result string) {
if result != "short" {
t.Errorf("Should not truncate short string, got %q", result)
}
},
},
{
name: "Truncate long ASCII",
input: "verylongfilename.txt",
maxWidth: 15,
check: func(t *testing.T, result string) {
if !strings.Contains(result, "...") {
t.Errorf("Truncated string should contain '...', got %q", result)
}
if displayWidth(result) > 15 {
t.Errorf("Truncated width %d exceeds max %d", displayWidth(result), 15)
}
},
},
{
name: "Truncate with CJK characters",
input: "非常长的中文文件名称.txt",
maxWidth: 20,
check: func(t *testing.T, result string) {
if !strings.Contains(result, "...") {
t.Errorf("Should truncate CJK string, got %q", result)
}
if displayWidth(result) > 20 {
t.Errorf("Truncated width %d exceeds max %d", displayWidth(result), 20)
}
},
},
{
name: "Very small width",
input: "longname",
maxWidth: 5,
check: func(t *testing.T, result string) {
if displayWidth(result) > 5 {
t.Errorf("Width %d exceeds max %d", displayWidth(result), 5)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := truncateMiddle(tt.input, tt.maxWidth)
tt.check(t, result)
})
}
}
func TestDisplayPath(t *testing.T) {
tests := []struct {
name string
setup func() string
check func(t *testing.T, result string)
}{
{
name: "Replace home directory",
setup: func() string {
home := t.TempDir()
t.Setenv("HOME", home)
return home + "/Documents/file.txt"
},
check: func(t *testing.T, result string) {
if !strings.HasPrefix(result, "~/") {
t.Errorf("Expected path to start with ~/, got %q", result)
}
if !strings.HasSuffix(result, "Documents/file.txt") {
t.Errorf("Expected path to end with Documents/file.txt, got %q", result)
}
},
},
{
name: "Keep absolute path outside home",
setup: func() string {
t.Setenv("HOME", "/Users/test")
return "/var/log/system.log"
},
check: func(t *testing.T, result string) {
if result != "/var/log/system.log" {
t.Errorf("Expected unchanged path, got %q", result)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
path := tt.setup()
result := displayPath(path)
tt.check(t, result)
})
}
}
func TestPadName(t *testing.T) {
tests := []struct {
name string
input string
targetWidth int
wantWidth int
}{
{"Pad ASCII", "test", 10, 10},
{"No padding needed", "longname", 5, 8},
{"Pad CJK", "中文", 10, 10},
{"Mixed CJK and ASCII", "hello世", 15, 15},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := padName(tt.input, tt.targetWidth)
gotWidth := displayWidth(result)
if gotWidth < tt.wantWidth && displayWidth(tt.input) < tt.targetWidth {
t.Errorf("padName(%q, %d) width = %d, want >= %d", tt.input, tt.targetWidth, gotWidth, tt.wantWidth)
}
})
}
}
func TestTrimNameWithWidth(t *testing.T) {
tests := []struct {
name string
input string
maxWidth int
check func(t *testing.T, result string)
}{
{
name: "Trim ASCII name",
input: "verylongfilename.txt",
maxWidth: 10,
check: func(t *testing.T, result string) {
if displayWidth(result) > 10 {
t.Errorf("Width exceeds max: %d > 10", displayWidth(result))
}
if !strings.HasSuffix(result, "...") {
t.Errorf("Expected ellipsis, got %q", result)
}
},
},
{
name: "Trim CJK name",
input: "很长的文件名称.txt",
maxWidth: 12,
check: func(t *testing.T, result string) {
if displayWidth(result) > 12 {
t.Errorf("Width exceeds max: %d > 12", displayWidth(result))
}
},
},
{
name: "No trimming needed",
input: "short.txt",
maxWidth: 20,
check: func(t *testing.T, result string) {
if result != "short.txt" {
t.Errorf("Should not trim, got %q", result)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := trimNameWithWidth(tt.input, tt.maxWidth)
tt.check(t, result)
})
}
}
func TestCalculateNameWidth(t *testing.T) {
tests := []struct {
termWidth int
wantMin int
wantMax int
}{
{80, 19, 60}, // 80 - 61 = 19
{120, 59, 60}, // 120 - 61 = 59
{200, 60, 60}, // Capped at 60
{70, 24, 60}, // Below minimum, use 24
{50, 24, 60}, // Very small, use minimum
}
for _, tt := range tests {
got := calculateNameWidth(tt.termWidth)
if got < tt.wantMin || got > tt.wantMax {
t.Errorf("calculateNameWidth(%d) = %d, want between %d and %d",
tt.termWidth, got, tt.wantMin, tt.wantMax)
}
}
}

View File

@@ -1,39 +0,0 @@
package main
// entryHeap is a min-heap of dirEntry used to keep Top N largest entries.
type entryHeap []dirEntry
func (h entryHeap) Len() int { return len(h) }
func (h entryHeap) Less(i, j int) bool { return h[i].Size < h[j].Size }
func (h entryHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *entryHeap) Push(x any) {
*h = append(*h, x.(dirEntry))
}
func (h *entryHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// largeFileHeap is a min-heap for fileEntry.
type largeFileHeap []fileEntry
func (h largeFileHeap) Len() int { return len(h) }
func (h largeFileHeap) Less(i, j int) bool { return h[i].Size < h[j].Size }
func (h largeFileHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *largeFileHeap) Push(x any) {
*h = append(*h, x.(fileEntry))
}
func (h *largeFileHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}

File diff suppressed because it is too large Load Diff

164
cmd/analyze/main_test.go Normal file
View File

@@ -0,0 +1,164 @@
//go:build windows
package main
import (
"os"
"path/filepath"
"testing"
)
func TestFormatBytes(t *testing.T) {
tests := []struct {
input int64
expected string
}{
{0, "0 B"},
{512, "512 B"},
{1024, "1.0 KB"},
{1536, "1.5 KB"},
{1048576, "1.0 MB"},
{1073741824, "1.0 GB"},
{1099511627776, "1.0 TB"},
}
for _, test := range tests {
result := formatBytes(test.input)
if result != test.expected {
t.Errorf("formatBytes(%d) = %s, expected %s", test.input, result, test.expected)
}
}
}
func TestTruncatePath(t *testing.T) {
tests := []struct {
input string
maxLen int
expected string
}{
{"C:\\short", 20, "C:\\short"},
{"C:\\this\\is\\a\\very\\long\\path\\that\\should\\be\\truncated", 30, "...ong\\path\\that\\should\\be\\truncated"},
}
for _, test := range tests {
result := truncatePath(test.input, test.maxLen)
if len(result) > test.maxLen && test.maxLen < len(test.input) {
// For truncated paths, just verify length constraint
if len(result) > test.maxLen+10 { // Allow some flexibility
t.Errorf("truncatePath(%s, %d) length = %d, expected <= %d", test.input, test.maxLen, len(result), test.maxLen)
}
}
}
}
func TestCleanablePatterns(t *testing.T) {
expectedCleanable := []string{
"node_modules",
"vendor",
".venv",
"venv",
"__pycache__",
"target",
"build",
"dist",
}
for _, pattern := range expectedCleanable {
if !cleanablePatterns[pattern] {
t.Errorf("Expected %s to be in cleanablePatterns", pattern)
}
}
}
func TestSkipPatterns(t *testing.T) {
expectedSkip := []string{
"$Recycle.Bin",
"System Volume Information",
"Windows",
"Program Files",
}
for _, pattern := range expectedSkip {
if !skipPatterns[pattern] {
t.Errorf("Expected %s to be in skipPatterns", pattern)
}
}
}
func TestCalculateDirSize(t *testing.T) {
// Create a temp directory with known content
tmpDir, err := os.MkdirTemp("", "mole_test_*")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
// Create a test file with known size
testFile := filepath.Join(tmpDir, "test.txt")
content := []byte("Hello, World!") // 13 bytes
if err := os.WriteFile(testFile, content, 0644); err != nil {
t.Fatalf("Failed to write test file: %v", err)
}
size := calculateDirSize(tmpDir)
if size != int64(len(content)) {
t.Errorf("calculateDirSize() = %d, expected %d", size, len(content))
}
}
func TestNewModel(t *testing.T) {
model := newModel("C:\\")
if model.path != "C:\\" {
t.Errorf("newModel path = %s, expected C:\\", model.path)
}
if !model.scanning {
t.Error("newModel should start in scanning state")
}
if model.multiSelected == nil {
t.Error("newModel multiSelected should be initialized")
}
if model.cache == nil {
t.Error("newModel cache should be initialized")
}
}
func TestScanDirectory(t *testing.T) {
// Create a temp directory with known structure
tmpDir, err := os.MkdirTemp("", "mole_scan_test_*")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
// Create subdirectory
subDir := filepath.Join(tmpDir, "subdir")
if err := os.Mkdir(subDir, 0755); err != nil {
t.Fatalf("Failed to create subdir: %v", err)
}
// Create test files
testFile1 := filepath.Join(tmpDir, "file1.txt")
testFile2 := filepath.Join(subDir, "file2.txt")
os.WriteFile(testFile1, []byte("content1"), 0644)
os.WriteFile(testFile2, []byte("content2"), 0644)
entries, largeFiles, totalSize, err := scanDirectory(tmpDir)
if err != nil {
t.Fatalf("scanDirectory error: %v", err)
}
if len(entries) != 2 { // subdir + file1.txt
t.Errorf("Expected 2 entries, got %d", len(entries))
}
if totalSize == 0 {
t.Error("totalSize should be greater than 0")
}
// No large files in this test
_ = largeFiles
}

View File

@@ -1,663 +0,0 @@
package main
import (
"bytes"
"container/heap"
"context"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"golang.org/x/sync/singleflight"
)
var scanGroup singleflight.Group
func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) (scanResult, error) {
children, err := os.ReadDir(root)
if err != nil {
return scanResult{}, err
}
var total int64
// Keep Top N heaps.
entriesHeap := &entryHeap{}
heap.Init(entriesHeap)
largeFilesHeap := &largeFileHeap{}
heap.Init(largeFilesHeap)
// Worker pool sized for I/O-bound scanning.
numWorkers := max(runtime.NumCPU()*cpuMultiplier, minWorkers)
if numWorkers > maxWorkers {
numWorkers = maxWorkers
}
if numWorkers > len(children) {
numWorkers = len(children)
}
if numWorkers < 1 {
numWorkers = 1
}
sem := make(chan struct{}, numWorkers)
var wg sync.WaitGroup
// Collect results via channels.
entryChan := make(chan dirEntry, len(children))
largeFileChan := make(chan fileEntry, maxLargeFiles*2)
var collectorWg sync.WaitGroup
collectorWg.Add(2)
go func() {
defer collectorWg.Done()
for entry := range entryChan {
if entriesHeap.Len() < maxEntries {
heap.Push(entriesHeap, entry)
} else if entry.Size > (*entriesHeap)[0].Size {
heap.Pop(entriesHeap)
heap.Push(entriesHeap, entry)
}
}
}()
go func() {
defer collectorWg.Done()
for file := range largeFileChan {
if largeFilesHeap.Len() < maxLargeFiles {
heap.Push(largeFilesHeap, file)
} else if file.Size > (*largeFilesHeap)[0].Size {
heap.Pop(largeFilesHeap)
heap.Push(largeFilesHeap, file)
}
}
}()
isRootDir := root == "/"
home := os.Getenv("HOME")
isHomeDir := home != "" && root == home
for _, child := range children {
fullPath := filepath.Join(root, child.Name())
// Skip symlinks to avoid following unexpected targets.
if child.Type()&fs.ModeSymlink != 0 {
targetInfo, err := os.Stat(fullPath)
isDir := false
if err == nil && targetInfo.IsDir() {
isDir = true
}
// Count link size only to avoid double-counting targets.
info, err := child.Info()
if err != nil {
continue
}
size := getActualFileSize(fullPath, info)
atomic.AddInt64(&total, size)
entryChan <- dirEntry{
Name: child.Name() + " →",
Path: fullPath,
Size: size,
IsDir: isDir,
LastAccess: getLastAccessTimeFromInfo(info),
}
continue
}
if child.IsDir() {
if defaultSkipDirs[child.Name()] {
continue
}
// Skip system dirs at root.
if isRootDir && skipSystemDirs[child.Name()] {
continue
}
// ~/Library is scanned separately; reuse cache when possible.
if isHomeDir && child.Name() == "Library" {
wg.Add(1)
go func(name, path string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
var size int64
if cached, err := loadStoredOverviewSize(path); err == nil && cached > 0 {
size = cached
} else if cached, err := loadCacheFromDisk(path); err == nil {
size = cached.TotalSize
} else {
size = calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
}
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
entryChan <- dirEntry{
Name: name,
Path: path,
Size: size,
IsDir: true,
LastAccess: time.Time{},
}
}(child.Name(), fullPath)
continue
}
// Folded dirs: fast size without expanding.
if shouldFoldDirWithPath(child.Name(), fullPath) {
wg.Add(1)
go func(name, path string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
size, err := getDirectorySizeFromDu(path)
if err != nil || size <= 0 {
size = calculateDirSizeFast(path, filesScanned, dirsScanned, bytesScanned, currentPath)
}
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
entryChan <- dirEntry{
Name: name,
Path: path,
Size: size,
IsDir: true,
LastAccess: time.Time{},
}
}(child.Name(), fullPath)
continue
}
wg.Add(1)
go func(name, path string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
size := calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
entryChan <- dirEntry{
Name: name,
Path: path,
Size: size,
IsDir: true,
LastAccess: time.Time{},
}
}(child.Name(), fullPath)
continue
}
info, err := child.Info()
if err != nil {
continue
}
// Actual disk usage for sparse/cloud files.
size := getActualFileSize(fullPath, info)
atomic.AddInt64(&total, size)
atomic.AddInt64(filesScanned, 1)
atomic.AddInt64(bytesScanned, size)
entryChan <- dirEntry{
Name: child.Name(),
Path: fullPath,
Size: size,
IsDir: false,
LastAccess: getLastAccessTimeFromInfo(info),
}
// Track large files only.
if !shouldSkipFileForLargeTracking(fullPath) && size >= minLargeFileSize {
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
}
}
wg.Wait()
// Close channels and wait for collectors.
close(entryChan)
close(largeFileChan)
collectorWg.Wait()
// Convert heaps to sorted slices (descending).
entries := make([]dirEntry, entriesHeap.Len())
for i := len(entries) - 1; i >= 0; i-- {
entries[i] = heap.Pop(entriesHeap).(dirEntry)
}
largeFiles := make([]fileEntry, largeFilesHeap.Len())
for i := len(largeFiles) - 1; i >= 0; i-- {
largeFiles[i] = heap.Pop(largeFilesHeap).(fileEntry)
}
// Use Spotlight for large files when available.
if spotlightFiles := findLargeFilesWithSpotlight(root, minLargeFileSize); len(spotlightFiles) > 0 {
largeFiles = spotlightFiles
}
return scanResult{
Entries: entries,
LargeFiles: largeFiles,
TotalSize: total,
TotalFiles: atomic.LoadInt64(filesScanned),
}, nil
}
func shouldFoldDirWithPath(name, path string) bool {
if foldDirs[name] {
return true
}
// Handle npm cache structure.
if strings.Contains(path, "/.npm/") || strings.Contains(path, "/.tnpm/") {
parent := filepath.Base(filepath.Dir(path))
if parent == ".npm" || parent == ".tnpm" || strings.HasPrefix(parent, "_") {
return true
}
if len(name) == 1 {
return true
}
}
return false
}
func shouldSkipFileForLargeTracking(path string) bool {
ext := strings.ToLower(filepath.Ext(path))
return skipExtensions[ext]
}
// calculateDirSizeFast performs concurrent dir sizing using os.ReadDir.
func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
var total int64
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
concurrency := min(runtime.NumCPU()*4, 64)
sem := make(chan struct{}, concurrency)
var walk func(string)
walk = func(dirPath string) {
select {
case <-ctx.Done():
return
default:
}
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
*currentPath = dirPath
}
entries, err := os.ReadDir(dirPath)
if err != nil {
return
}
var localBytes, localFiles int64
for _, entry := range entries {
if entry.IsDir() {
wg.Add(1)
subDir := filepath.Join(dirPath, entry.Name())
go func(p string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
walk(p)
}(subDir)
atomic.AddInt64(dirsScanned, 1)
} else {
info, err := entry.Info()
if err == nil {
size := getActualFileSize(filepath.Join(dirPath, entry.Name()), info)
localBytes += size
localFiles++
}
}
}
if localBytes > 0 {
atomic.AddInt64(&total, localBytes)
atomic.AddInt64(bytesScanned, localBytes)
}
if localFiles > 0 {
atomic.AddInt64(filesScanned, localFiles)
}
}
walk(root)
wg.Wait()
return total
}
// Use Spotlight (mdfind) to quickly find large files.
func findLargeFilesWithSpotlight(root string, minSize int64) []fileEntry {
query := fmt.Sprintf("kMDItemFSSize >= %d", minSize)
ctx, cancel := context.WithTimeout(context.Background(), mdlsTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, "mdfind", "-onlyin", root, query)
output, err := cmd.Output()
if err != nil {
return nil
}
var files []fileEntry
for line := range strings.Lines(strings.TrimSpace(string(output))) {
if line == "" {
continue
}
// Filter code files first (cheap).
if shouldSkipFileForLargeTracking(line) {
continue
}
// Filter folded directories (cheap string check).
if isInFoldedDir(line) {
continue
}
info, err := os.Lstat(line)
if err != nil {
continue
}
if info.IsDir() || info.Mode()&os.ModeSymlink != 0 {
continue
}
// Actual disk usage for sparse/cloud files.
actualSize := getActualFileSize(line, info)
files = append(files, fileEntry{
Name: filepath.Base(line),
Path: line,
Size: actualSize,
})
}
// Sort by size (descending).
sort.Slice(files, func(i, j int) bool {
return files[i].Size > files[j].Size
})
if len(files) > maxLargeFiles {
files = files[:maxLargeFiles]
}
return files
}
// isInFoldedDir checks if a path is inside a folded directory.
func isInFoldedDir(path string) bool {
parts := strings.SplitSeq(path, string(os.PathSeparator))
for part := range parts {
if foldDirs[part] {
return true
}
}
return false
}
func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
children, err := os.ReadDir(root)
if err != nil {
return 0
}
var total int64
var wg sync.WaitGroup
// Limit concurrent subdirectory scans.
maxConcurrent := min(runtime.NumCPU()*2, maxDirWorkers)
sem := make(chan struct{}, maxConcurrent)
for _, child := range children {
fullPath := filepath.Join(root, child.Name())
if child.Type()&fs.ModeSymlink != 0 {
info, err := child.Info()
if err != nil {
continue
}
size := getActualFileSize(fullPath, info)
total += size
atomic.AddInt64(filesScanned, 1)
atomic.AddInt64(bytesScanned, size)
continue
}
if child.IsDir() {
if shouldFoldDirWithPath(child.Name(), fullPath) {
wg.Add(1)
go func(path string) {
defer wg.Done()
size, err := getDirectorySizeFromDu(path)
if err == nil && size > 0 {
atomic.AddInt64(&total, size)
atomic.AddInt64(bytesScanned, size)
atomic.AddInt64(dirsScanned, 1)
}
}(fullPath)
continue
}
wg.Add(1)
go func(path string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
size := calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
}(fullPath)
continue
}
info, err := child.Info()
if err != nil {
continue
}
size := getActualFileSize(fullPath, info)
total += size
atomic.AddInt64(filesScanned, 1)
atomic.AddInt64(bytesScanned, size)
if !shouldSkipFileForLargeTracking(fullPath) && size >= minLargeFileSize {
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
}
// Update current path occasionally to prevent UI jitter.
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
*currentPath = fullPath
}
}
wg.Wait()
return total
}
// measureOverviewSize calculates the size of a directory using multiple strategies.
// When scanning Home, it excludes ~/Library to avoid duplicate counting.
func measureOverviewSize(path string) (int64, error) {
if path == "" {
return 0, fmt.Errorf("empty path")
}
path = filepath.Clean(path)
if !filepath.IsAbs(path) {
return 0, fmt.Errorf("path must be absolute: %s", path)
}
if _, err := os.Stat(path); err != nil {
return 0, fmt.Errorf("cannot access path: %v", err)
}
// Determine if we should exclude ~/Library (when scanning Home)
home := os.Getenv("HOME")
excludePath := ""
if home != "" && path == home {
excludePath = filepath.Join(home, "Library")
}
if cached, err := loadStoredOverviewSize(path); err == nil && cached > 0 {
return cached, nil
}
if duSize, err := getDirectorySizeFromDuWithExclude(path, excludePath); err == nil && duSize > 0 {
_ = storeOverviewSize(path, duSize)
return duSize, nil
}
if logicalSize, err := getDirectoryLogicalSizeWithExclude(path, excludePath); err == nil && logicalSize > 0 {
_ = storeOverviewSize(path, logicalSize)
return logicalSize, nil
}
if cached, err := loadCacheFromDisk(path); err == nil {
_ = storeOverviewSize(path, cached.TotalSize)
return cached.TotalSize, nil
}
return 0, fmt.Errorf("unable to measure directory size with fast methods")
}
func getDirectorySizeFromDu(path string) (int64, error) {
return getDirectorySizeFromDuWithExclude(path, "")
}
func getDirectorySizeFromDuWithExclude(path string, excludePath string) (int64, error) {
runDuSize := func(target string) (int64, error) {
if _, err := os.Stat(target); err != nil {
return 0, err
}
ctx, cancel := context.WithTimeout(context.Background(), duTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, "du", "-sk", target)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
if ctx.Err() == context.DeadlineExceeded {
return 0, fmt.Errorf("du timeout after %v", duTimeout)
}
if stderr.Len() > 0 {
return 0, fmt.Errorf("du failed: %v (%s)", err, stderr.String())
}
return 0, fmt.Errorf("du failed: %v", err)
}
fields := strings.Fields(stdout.String())
if len(fields) == 0 {
return 0, fmt.Errorf("du output empty")
}
kb, err := strconv.ParseInt(fields[0], 10, 64)
if err != nil {
return 0, fmt.Errorf("failed to parse du output: %v", err)
}
if kb <= 0 {
return 0, fmt.Errorf("du size invalid: %d", kb)
}
return kb * 1024, nil
}
// When excluding a path (e.g., ~/Library), subtract only that exact directory instead of ignoring every "Library"
if excludePath != "" {
totalSize, err := runDuSize(path)
if err != nil {
return 0, err
}
excludeSize, err := runDuSize(excludePath)
if err != nil {
if !os.IsNotExist(err) {
return 0, err
}
excludeSize = 0
}
if excludeSize > totalSize {
excludeSize = 0
}
return totalSize - excludeSize, nil
}
return runDuSize(path)
}
func getDirectoryLogicalSizeWithExclude(path string, excludePath string) (int64, error) {
var total int64
err := filepath.WalkDir(path, func(p string, d fs.DirEntry, err error) error {
if err != nil {
if os.IsPermission(err) {
return filepath.SkipDir
}
return nil
}
// Skip excluded path
if excludePath != "" && p == excludePath {
return filepath.SkipDir
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return nil
}
total += getActualFileSize(p, info)
return nil
})
if err != nil && err != filepath.SkipDir {
return 0, err
}
return total, nil
}
func getActualFileSize(_ string, info fs.FileInfo) int64 {
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return info.Size()
}
actualSize := stat.Blocks * 512
if actualSize < info.Size() {
return actualSize
}
return info.Size()
}
func getLastAccessTime(path string) time.Time {
info, err := os.Stat(path)
if err != nil {
return time.Time{}
}
return getLastAccessTimeFromInfo(info)
}
func getLastAccessTimeFromInfo(info fs.FileInfo) time.Time {
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return time.Time{}
}
return time.Unix(stat.Atimespec.Sec, stat.Atimespec.Nsec)
}

View File

@@ -1,45 +0,0 @@
package main
import (
"os"
"path/filepath"
"testing"
)
func writeFileWithSize(t *testing.T, path string, size int) {
t.Helper()
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
t.Fatalf("mkdir %s: %v", path, err)
}
content := make([]byte, size)
if err := os.WriteFile(path, content, 0o644); err != nil {
t.Fatalf("write %s: %v", path, err)
}
}
func TestGetDirectoryLogicalSizeWithExclude(t *testing.T) {
base := t.TempDir()
homeFile := filepath.Join(base, "fileA")
libFile := filepath.Join(base, "Library", "fileB")
projectLibFile := filepath.Join(base, "Projects", "Library", "fileC")
writeFileWithSize(t, homeFile, 100)
writeFileWithSize(t, libFile, 200)
writeFileWithSize(t, projectLibFile, 300)
total, err := getDirectoryLogicalSizeWithExclude(base, "")
if err != nil {
t.Fatalf("getDirectoryLogicalSizeWithExclude (no exclude) error: %v", err)
}
if total != 600 {
t.Fatalf("expected total 600 bytes, got %d", total)
}
excluding, err := getDirectoryLogicalSizeWithExclude(base, filepath.Join(base, "Library"))
if err != nil {
t.Fatalf("getDirectoryLogicalSizeWithExclude (exclude Library) error: %v", err)
}
if excluding != 400 {
t.Fatalf("expected 400 bytes when excluding top-level Library, got %d", excluding)
}
}

View File

@@ -1,428 +0,0 @@
//go:build darwin
package main
import (
"fmt"
"strings"
"sync/atomic"
)
// View renders the TUI.
func (m model) View() string {
var b strings.Builder
fmt.Fprintln(&b)
if m.inOverviewMode() {
fmt.Fprintf(&b, "%sAnalyze Disk%s\n", colorPurpleBold, colorReset)
if m.overviewScanning {
allPending := true
for _, entry := range m.entries {
if entry.Size >= 0 {
allPending = false
break
}
}
if allPending {
fmt.Fprintf(&b, "%s%s%s%s Analyzing disk usage, please wait...%s\n",
colorCyan, colorBold,
spinnerFrames[m.spinner],
colorReset, colorReset)
return b.String()
} else {
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
fmt.Fprintf(&b, "%s%s%s%s Scanning...\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset)
}
} else {
hasPending := false
for _, entry := range m.entries {
if entry.Size < 0 {
hasPending = true
break
}
}
if hasPending {
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
fmt.Fprintf(&b, "%s%s%s%s Scanning...\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset)
} else {
fmt.Fprintf(&b, "%sSelect a location to explore:%s\n\n", colorGray, colorReset)
}
}
} else {
fmt.Fprintf(&b, "%sAnalyze Disk%s %s%s%s", colorPurpleBold, colorReset, colorGray, displayPath(m.path), colorReset)
if !m.scanning {
fmt.Fprintf(&b, " | Total: %s", humanizeBytes(m.totalSize))
}
fmt.Fprintf(&b, "\n\n")
}
if m.deleting {
count := int64(0)
if m.deleteCount != nil {
count = atomic.LoadInt64(m.deleteCount)
}
fmt.Fprintf(&b, "%s%s%s%s Deleting: %s%s items%s removed, please wait...\n",
colorCyan, colorBold,
spinnerFrames[m.spinner],
colorReset,
colorYellow, formatNumber(count), colorReset)
return b.String()
}
if m.scanning {
filesScanned, dirsScanned, bytesScanned := m.getScanProgress()
progressPrefix := ""
if m.lastTotalFiles > 0 {
percent := float64(filesScanned) / float64(m.lastTotalFiles) * 100
// Cap at 100% generally
if percent > 100 {
percent = 100
}
// While strictly scanning, cap at 99% to avoid "100% but still working" confusion
if m.scanning && percent >= 100 {
percent = 99
}
progressPrefix = fmt.Sprintf(" %s(%.0f%%)%s", colorCyan, percent, colorReset)
}
fmt.Fprintf(&b, "%s%s%s%s Scanning%s: %s%s files%s, %s%s dirs%s, %s%s%s\n",
colorCyan, colorBold,
spinnerFrames[m.spinner],
colorReset,
progressPrefix,
colorYellow, formatNumber(filesScanned), colorReset,
colorYellow, formatNumber(dirsScanned), colorReset,
colorGreen, humanizeBytes(bytesScanned), colorReset)
if m.currentPath != nil {
currentPath := *m.currentPath
if currentPath != "" {
shortPath := displayPath(currentPath)
shortPath = truncateMiddle(shortPath, 50)
fmt.Fprintf(&b, "%s%s%s\n", colorGray, shortPath, colorReset)
}
}
return b.String()
}
if m.showLargeFiles {
if len(m.largeFiles) == 0 {
fmt.Fprintln(&b, " No large files found (>=100MB)")
} else {
viewport := calculateViewport(m.height, true)
start := max(m.largeOffset, 0)
end := min(start+viewport, len(m.largeFiles))
maxLargeSize := int64(1)
for _, file := range m.largeFiles {
if file.Size > maxLargeSize {
maxLargeSize = file.Size
}
}
nameWidth := calculateNameWidth(m.width)
for idx := start; idx < end; idx++ {
file := m.largeFiles[idx]
shortPath := displayPath(file.Path)
shortPath = truncateMiddle(shortPath, nameWidth)
paddedPath := padName(shortPath, nameWidth)
entryPrefix := " "
nameColor := ""
sizeColor := colorGray
numColor := ""
isMultiSelected := m.largeMultiSelected != nil && m.largeMultiSelected[file.Path]
selectIcon := "○"
if isMultiSelected {
selectIcon = fmt.Sprintf("%s●%s", colorGreen, colorReset)
nameColor = colorGreen
}
if idx == m.largeSelected {
entryPrefix = fmt.Sprintf(" %s%s▶%s ", colorCyan, colorBold, colorReset)
if !isMultiSelected {
nameColor = colorCyan
}
sizeColor = colorCyan
numColor = colorCyan
}
size := humanizeBytes(file.Size)
bar := coloredProgressBar(file.Size, maxLargeSize, 0)
fmt.Fprintf(&b, "%s%s %s%2d.%s %s | 📄 %s%s%s %s%10s%s\n",
entryPrefix, selectIcon, numColor, idx+1, colorReset, bar, nameColor, paddedPath, colorReset, sizeColor, size, colorReset)
}
}
} else {
if len(m.entries) == 0 {
fmt.Fprintln(&b, " Empty directory")
} else {
if m.inOverviewMode() {
maxSize := int64(1)
for _, entry := range m.entries {
if entry.Size > maxSize {
maxSize = entry.Size
}
}
totalSize := m.totalSize
// Overview paths are short; fixed width keeps layout stable.
nameWidth := 20
for idx, entry := range m.entries {
icon := "📁"
sizeVal := entry.Size
barValue := max(sizeVal, 0)
var percent float64
if totalSize > 0 && sizeVal >= 0 {
percent = float64(sizeVal) / float64(totalSize) * 100
} else {
percent = 0
}
percentStr := fmt.Sprintf("%5.1f%%", percent)
if totalSize == 0 || sizeVal < 0 {
percentStr = " -- "
}
bar := coloredProgressBar(barValue, maxSize, percent)
sizeText := "pending.."
if sizeVal >= 0 {
sizeText = humanizeBytes(sizeVal)
}
sizeColor := colorGray
if sizeVal >= 0 && totalSize > 0 {
switch {
case percent >= 50:
sizeColor = colorRed
case percent >= 20:
sizeColor = colorYellow
case percent >= 5:
sizeColor = colorBlue
default:
sizeColor = colorGray
}
}
entryPrefix := " "
name := trimNameWithWidth(entry.Name, nameWidth)
paddedName := padName(name, nameWidth)
nameSegment := fmt.Sprintf("%s %s", icon, paddedName)
numColor := ""
percentColor := ""
if idx == m.selected {
entryPrefix = fmt.Sprintf(" %s%s▶%s ", colorCyan, colorBold, colorReset)
nameSegment = fmt.Sprintf("%s%s %s%s", colorCyan, icon, paddedName, colorReset)
numColor = colorCyan
percentColor = colorCyan
sizeColor = colorCyan
}
displayIndex := idx + 1
var hintLabel string
if entry.IsDir && isCleanableDir(entry.Path) {
hintLabel = fmt.Sprintf("%s🧹%s", colorYellow, colorReset)
} else {
lastAccess := entry.LastAccess
if lastAccess.IsZero() && entry.Path != "" {
lastAccess = getLastAccessTime(entry.Path)
}
if unusedTime := formatUnusedTime(lastAccess); unusedTime != "" {
hintLabel = fmt.Sprintf("%s%s%s", colorGray, unusedTime, colorReset)
}
}
if hintLabel == "" {
fmt.Fprintf(&b, "%s%s%2d.%s %s %s%s%s | %s %s%10s%s\n",
entryPrefix, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
nameSegment, sizeColor, sizeText, colorReset)
} else {
fmt.Fprintf(&b, "%s%s%2d.%s %s %s%s%s | %s %s%10s%s %s\n",
entryPrefix, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
nameSegment, sizeColor, sizeText, colorReset, hintLabel)
}
}
} else {
maxSize := int64(1)
for _, entry := range m.entries {
if entry.Size > maxSize {
maxSize = entry.Size
}
}
viewport := calculateViewport(m.height, false)
nameWidth := calculateNameWidth(m.width)
start := max(m.offset, 0)
end := min(start+viewport, len(m.entries))
for idx := start; idx < end; idx++ {
entry := m.entries[idx]
icon := "📄"
if entry.IsDir {
icon = "📁"
}
size := humanizeBytes(entry.Size)
name := trimNameWithWidth(entry.Name, nameWidth)
paddedName := padName(name, nameWidth)
percent := float64(entry.Size) / float64(m.totalSize) * 100
percentStr := fmt.Sprintf("%5.1f%%", percent)
bar := coloredProgressBar(entry.Size, maxSize, percent)
var sizeColor string
if percent >= 50 {
sizeColor = colorRed
} else if percent >= 20 {
sizeColor = colorYellow
} else if percent >= 5 {
sizeColor = colorBlue
} else {
sizeColor = colorGray
}
isMultiSelected := m.multiSelected != nil && m.multiSelected[entry.Path]
selectIcon := "○"
nameColor := ""
if isMultiSelected {
selectIcon = fmt.Sprintf("%s●%s", colorGreen, colorReset)
nameColor = colorGreen
}
entryPrefix := " "
nameSegment := fmt.Sprintf("%s %s", icon, paddedName)
if nameColor != "" {
nameSegment = fmt.Sprintf("%s%s %s%s", nameColor, icon, paddedName, colorReset)
}
numColor := ""
percentColor := ""
if idx == m.selected {
entryPrefix = fmt.Sprintf(" %s%s▶%s ", colorCyan, colorBold, colorReset)
if !isMultiSelected {
nameSegment = fmt.Sprintf("%s%s %s%s", colorCyan, icon, paddedName, colorReset)
}
numColor = colorCyan
percentColor = colorCyan
sizeColor = colorCyan
}
displayIndex := idx + 1
var hintLabel string
if entry.IsDir && isCleanableDir(entry.Path) {
hintLabel = fmt.Sprintf("%s🧹%s", colorYellow, colorReset)
} else {
lastAccess := entry.LastAccess
if lastAccess.IsZero() && entry.Path != "" {
lastAccess = getLastAccessTime(entry.Path)
}
if unusedTime := formatUnusedTime(lastAccess); unusedTime != "" {
hintLabel = fmt.Sprintf("%s%s%s", colorGray, unusedTime, colorReset)
}
}
if hintLabel == "" {
fmt.Fprintf(&b, "%s%s %s%2d.%s %s %s%s%s | %s %s%10s%s\n",
entryPrefix, selectIcon, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
nameSegment, sizeColor, size, colorReset)
} else {
fmt.Fprintf(&b, "%s%s %s%2d.%s %s %s%s%s | %s %s%10s%s %s\n",
entryPrefix, selectIcon, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
nameSegment, sizeColor, size, colorReset, hintLabel)
}
}
}
}
}
fmt.Fprintln(&b)
if m.inOverviewMode() {
if len(m.history) > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ← Back | Q Quit%s\n", colorGray, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓→ | Enter | R Refresh | O Open | F File | Q Quit%s\n", colorGray, colorReset)
}
} else if m.showLargeFiles {
selectCount := len(m.largeMultiSelected)
if selectCount > 0 {
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del(%d) | ← Back | Q Quit%s\n", colorGray, selectCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del | ← Back | Q Quit%s\n", colorGray, colorReset)
}
} else {
largeFileCount := len(m.largeFiles)
selectCount := len(m.multiSelected)
if selectCount > 0 {
if largeFileCount > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del(%d) | T Top(%d) | Q Quit%s\n", colorGray, selectCount, largeFileCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del(%d) | Q Quit%s\n", colorGray, selectCount, colorReset)
}
} else {
if largeFileCount > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | T Top(%d) | Q Quit%s\n", colorGray, largeFileCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | Q Quit%s\n", colorGray, colorReset)
}
}
}
if m.deleteConfirm && m.deleteTarget != nil {
fmt.Fprintln(&b)
var deleteCount int
var totalDeleteSize int64
if m.showLargeFiles && len(m.largeMultiSelected) > 0 {
deleteCount = len(m.largeMultiSelected)
for path := range m.largeMultiSelected {
for _, file := range m.largeFiles {
if file.Path == path {
totalDeleteSize += file.Size
break
}
}
}
} else if !m.showLargeFiles && len(m.multiSelected) > 0 {
deleteCount = len(m.multiSelected)
for path := range m.multiSelected {
for _, entry := range m.entries {
if entry.Path == path {
totalDeleteSize += entry.Size
break
}
}
}
}
if deleteCount > 1 {
fmt.Fprintf(&b, "%sDelete:%s %d items (%s) %sPress Enter to confirm | ESC cancel%s\n",
colorRed, colorReset,
deleteCount, humanizeBytes(totalDeleteSize),
colorGray, colorReset)
} else {
fmt.Fprintf(&b, "%sDelete:%s %s (%s) %sPress Enter to confirm | ESC cancel%s\n",
colorRed, colorReset,
m.deleteTarget.Name, humanizeBytes(m.deleteTarget.Size),
colorGray, colorReset)
}
}
return b.String()
}
// calculateViewport returns visible rows for the current terminal height.
func calculateViewport(termHeight int, isLargeFiles bool) int {
if termHeight <= 0 {
return defaultViewport
}
reserved := 6 // Header + footer
if isLargeFiles {
reserved = 5
}
available := termHeight - reserved
if available < 1 {
return 1
}
if available > 30 {
return 30
}
return available
}