mirror of
https://github.com/tw93/Mole.git
synced 2026-02-04 11:31:46 +00:00
fix: Use du -P for accurate size calculation and add timeouts to channel sends to prevent blocking.
This commit is contained in:
@@ -119,12 +119,16 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
size := getActualFileSize(fullPath, info)
|
size := getActualFileSize(fullPath, info)
|
||||||
atomic.AddInt64(&total, size)
|
atomic.AddInt64(&total, size)
|
||||||
|
|
||||||
entryChan <- dirEntry{
|
select {
|
||||||
|
case entryChan <- dirEntry{
|
||||||
Name: child.Name() + " →",
|
Name: child.Name() + " →",
|
||||||
Path: fullPath,
|
Path: fullPath,
|
||||||
Size: size,
|
Size: size,
|
||||||
IsDir: isDir,
|
IsDir: isDir,
|
||||||
LastAccess: getLastAccessTimeFromInfo(info),
|
LastAccess: getLastAccessTimeFromInfo(info),
|
||||||
|
}:
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
|
// Skip if channel is blocked
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -158,12 +162,15 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
atomic.AddInt64(&total, size)
|
atomic.AddInt64(&total, size)
|
||||||
atomic.AddInt64(dirsScanned, 1)
|
atomic.AddInt64(dirsScanned, 1)
|
||||||
|
|
||||||
entryChan <- dirEntry{
|
select {
|
||||||
|
case entryChan <- dirEntry{
|
||||||
Name: name,
|
Name: name,
|
||||||
Path: path,
|
Path: path,
|
||||||
Size: size,
|
Size: size,
|
||||||
IsDir: true,
|
IsDir: true,
|
||||||
LastAccess: time.Time{},
|
LastAccess: time.Time{},
|
||||||
|
}:
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
}
|
}
|
||||||
}(child.Name(), fullPath)
|
}(child.Name(), fullPath)
|
||||||
continue
|
continue
|
||||||
@@ -188,12 +195,15 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
atomic.AddInt64(&total, size)
|
atomic.AddInt64(&total, size)
|
||||||
atomic.AddInt64(dirsScanned, 1)
|
atomic.AddInt64(dirsScanned, 1)
|
||||||
|
|
||||||
entryChan <- dirEntry{
|
select {
|
||||||
|
case entryChan <- dirEntry{
|
||||||
Name: name,
|
Name: name,
|
||||||
Path: path,
|
Path: path,
|
||||||
Size: size,
|
Size: size,
|
||||||
IsDir: true,
|
IsDir: true,
|
||||||
LastAccess: time.Time{},
|
LastAccess: time.Time{},
|
||||||
|
}:
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
}
|
}
|
||||||
}(child.Name(), fullPath)
|
}(child.Name(), fullPath)
|
||||||
continue
|
continue
|
||||||
@@ -209,12 +219,15 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
atomic.AddInt64(&total, size)
|
atomic.AddInt64(&total, size)
|
||||||
atomic.AddInt64(dirsScanned, 1)
|
atomic.AddInt64(dirsScanned, 1)
|
||||||
|
|
||||||
entryChan <- dirEntry{
|
select {
|
||||||
|
case entryChan <- dirEntry{
|
||||||
Name: name,
|
Name: name,
|
||||||
Path: path,
|
Path: path,
|
||||||
Size: size,
|
Size: size,
|
||||||
IsDir: true,
|
IsDir: true,
|
||||||
LastAccess: time.Time{},
|
LastAccess: time.Time{},
|
||||||
|
}:
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
}
|
}
|
||||||
}(child.Name(), fullPath)
|
}(child.Name(), fullPath)
|
||||||
continue
|
continue
|
||||||
@@ -230,18 +243,25 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
atomic.AddInt64(filesScanned, 1)
|
atomic.AddInt64(filesScanned, 1)
|
||||||
atomic.AddInt64(bytesScanned, size)
|
atomic.AddInt64(bytesScanned, size)
|
||||||
|
|
||||||
entryChan <- dirEntry{
|
select {
|
||||||
|
case entryChan <- dirEntry{
|
||||||
Name: child.Name(),
|
Name: child.Name(),
|
||||||
Path: fullPath,
|
Path: fullPath,
|
||||||
Size: size,
|
Size: size,
|
||||||
IsDir: false,
|
IsDir: false,
|
||||||
LastAccess: getLastAccessTimeFromInfo(info),
|
LastAccess: getLastAccessTimeFromInfo(info),
|
||||||
|
}:
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
}
|
}
|
||||||
|
|
||||||
// Track large files only.
|
// Track large files only.
|
||||||
if !shouldSkipFileForLargeTracking(fullPath) {
|
if !shouldSkipFileForLargeTracking(fullPath) {
|
||||||
minSize := atomic.LoadInt64(&largeFileMinSize)
|
minSize := atomic.LoadInt64(&largeFileMinSize)
|
||||||
if size >= minSize {
|
if size >= minSize {
|
||||||
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
|
select {
|
||||||
|
case largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}:
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -516,7 +536,10 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, lar
|
|||||||
if !shouldSkipFileForLargeTracking(fullPath) && largeFileMinSize != nil {
|
if !shouldSkipFileForLargeTracking(fullPath) && largeFileMinSize != nil {
|
||||||
minSize := atomic.LoadInt64(largeFileMinSize)
|
minSize := atomic.LoadInt64(largeFileMinSize)
|
||||||
if size >= minSize {
|
if size >= minSize {
|
||||||
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
|
select {
|
||||||
|
case largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}:
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -413,7 +413,7 @@ clean_orphaned_system_services() {
|
|||||||
fi
|
fi
|
||||||
orphaned_files+=("$plist")
|
orphaned_files+=("$plist")
|
||||||
local size_kb
|
local size_kb
|
||||||
size_kb=$(sudo du -sk "$plist" 2> /dev/null | awk '{print $1}' || echo "0")
|
size_kb=$(sudo du -skP "$plist" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||||
((total_orphaned_kb += size_kb))
|
((total_orphaned_kb += size_kb))
|
||||||
((orphaned_count++))
|
((orphaned_count++))
|
||||||
break
|
break
|
||||||
@@ -444,7 +444,7 @@ clean_orphaned_system_services() {
|
|||||||
fi
|
fi
|
||||||
orphaned_files+=("$plist")
|
orphaned_files+=("$plist")
|
||||||
local size_kb
|
local size_kb
|
||||||
size_kb=$(sudo du -sk "$plist" 2> /dev/null | awk '{print $1}' || echo "0")
|
size_kb=$(sudo du -skP "$plist" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||||
((total_orphaned_kb += size_kb))
|
((total_orphaned_kb += size_kb))
|
||||||
((orphaned_count++))
|
((orphaned_count++))
|
||||||
break
|
break
|
||||||
@@ -474,7 +474,7 @@ clean_orphaned_system_services() {
|
|||||||
fi
|
fi
|
||||||
orphaned_files+=("$helper")
|
orphaned_files+=("$helper")
|
||||||
local size_kb
|
local size_kb
|
||||||
size_kb=$(sudo du -sk "$helper" 2> /dev/null | awk '{print $1}' || echo "0")
|
size_kb=$(sudo du -skP "$helper" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||||
((total_orphaned_kb += size_kb))
|
((total_orphaned_kb += size_kb))
|
||||||
((orphaned_count++))
|
((orphaned_count++))
|
||||||
break
|
break
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ clean_homebrew() {
|
|||||||
local skip_cleanup=false
|
local skip_cleanup=false
|
||||||
local brew_cache_size=0
|
local brew_cache_size=0
|
||||||
if [[ -d ~/Library/Caches/Homebrew ]]; then
|
if [[ -d ~/Library/Caches/Homebrew ]]; then
|
||||||
brew_cache_size=$(run_with_timeout 3 du -sk ~/Library/Caches/Homebrew 2> /dev/null | awk '{print $1}')
|
brew_cache_size=$(run_with_timeout 3 du -skP ~/Library/Caches/Homebrew 2> /dev/null | awk '{print $1}')
|
||||||
local du_exit=$?
|
local du_exit=$?
|
||||||
if [[ $du_exit -eq 0 && -n "$brew_cache_size" && "$brew_cache_size" -lt 51200 ]]; then
|
if [[ $du_exit -eq 0 && -n "$brew_cache_size" && "$brew_cache_size" -lt 51200 ]]; then
|
||||||
skip_cleanup=true
|
skip_cleanup=true
|
||||||
|
|||||||
@@ -489,7 +489,7 @@ is_recently_modified() {
|
|||||||
get_dir_size_kb() {
|
get_dir_size_kb() {
|
||||||
local path="$1"
|
local path="$1"
|
||||||
if [[ -d "$path" ]]; then
|
if [[ -d "$path" ]]; then
|
||||||
du -sk "$path" 2> /dev/null | awk '{print $1}' || echo "0"
|
du -skP "$path" 2> /dev/null | awk '{print $1}' || echo "0"
|
||||||
else
|
else
|
||||||
echo "0"
|
echo "0"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -626,7 +626,7 @@ check_ios_device_backups() {
|
|||||||
if [[ -d "$backup_dir" ]]; then
|
if [[ -d "$backup_dir" ]]; then
|
||||||
local backup_kb=$(get_path_size_kb "$backup_dir")
|
local backup_kb=$(get_path_size_kb "$backup_dir")
|
||||||
if [[ -n "${backup_kb:-}" && "$backup_kb" -gt 102400 ]]; then
|
if [[ -n "${backup_kb:-}" && "$backup_kb" -gt 102400 ]]; then
|
||||||
local backup_human=$(command du -sh "$backup_dir" 2> /dev/null | awk '{print $1}')
|
local backup_human=$(command du -shP "$backup_dir" 2> /dev/null | awk '{print $1}')
|
||||||
if [[ -n "$backup_human" ]]; then
|
if [[ -n "$backup_human" ]]; then
|
||||||
note_activity
|
note_activity
|
||||||
echo -e " ${YELLOW}${ICON_WARNING}${NC} iOS backups: ${GREEN}${backup_human}${NC}${GRAY}, Path: $backup_dir${NC}"
|
echo -e " ${YELLOW}${ICON_WARNING}${NC} iOS backups: ${GREEN}${backup_human}${NC}${GRAY}, Path: $backup_dir${NC}"
|
||||||
|
|||||||
@@ -267,7 +267,7 @@ safe_sudo_remove() {
|
|||||||
|
|
||||||
if sudo test -e "$path" 2> /dev/null; then
|
if sudo test -e "$path" 2> /dev/null; then
|
||||||
local size_kb
|
local size_kb
|
||||||
size_kb=$(sudo du -sk "$path" 2> /dev/null | awk '{print $1}' || echo "0")
|
size_kb=$(sudo du -skP "$path" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||||
if [[ "$size_kb" -gt 0 ]]; then
|
if [[ "$size_kb" -gt 0 ]]; then
|
||||||
file_size=$(bytes_to_human "$((size_kb * 1024))")
|
file_size=$(bytes_to_human "$((size_kb * 1024))")
|
||||||
fi
|
fi
|
||||||
@@ -297,7 +297,7 @@ safe_sudo_remove() {
|
|||||||
local size_human=""
|
local size_human=""
|
||||||
if oplog_enabled; then
|
if oplog_enabled; then
|
||||||
if sudo test -e "$path" 2> /dev/null; then
|
if sudo test -e "$path" 2> /dev/null; then
|
||||||
size_kb=$(sudo du -sk "$path" 2> /dev/null | awk '{print $1}' || echo "0")
|
size_kb=$(sudo du -skP "$path" 2> /dev/null | awk '{print $1}' || echo "0")
|
||||||
if [[ "$size_kb" =~ ^[0-9]+$ ]] && [[ "$size_kb" -gt 0 ]]; then
|
if [[ "$size_kb" =~ ^[0-9]+$ ]] && [[ "$size_kb" -gt 0 ]]; then
|
||||||
size_human=$(bytes_to_human "$((size_kb * 1024))" 2> /dev/null || echo "${size_kb}KB")
|
size_human=$(bytes_to_human "$((size_kb * 1024))" 2> /dev/null || echo "${size_kb}KB")
|
||||||
fi
|
fi
|
||||||
|
|||||||
Reference in New Issue
Block a user