mirror of
https://github.com/tw93/Mole.git
synced 2026-02-12 16:20:14 +00:00
Merge branch 'dev'
This commit is contained in:
104
bin/purge.sh
104
bin/purge.sh
@@ -47,22 +47,120 @@ start_purge() {
|
|||||||
printf '\033[2J\033[H'
|
printf '\033[2J\033[H'
|
||||||
fi
|
fi
|
||||||
printf '\n'
|
printf '\n'
|
||||||
echo -e "${PURPLE_BOLD}Purge Project Artifacts${NC}"
|
|
||||||
|
|
||||||
# Initialize stats file in user cache directory
|
# Initialize stats file in user cache directory
|
||||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||||
ensure_user_dir "$stats_dir"
|
ensure_user_dir "$stats_dir"
|
||||||
ensure_user_file "$stats_dir/purge_stats"
|
ensure_user_file "$stats_dir/purge_stats"
|
||||||
ensure_user_file "$stats_dir/purge_count"
|
ensure_user_file "$stats_dir/purge_count"
|
||||||
|
ensure_user_file "$stats_dir/purge_scanning"
|
||||||
echo "0" > "$stats_dir/purge_stats"
|
echo "0" > "$stats_dir/purge_stats"
|
||||||
echo "0" > "$stats_dir/purge_count"
|
echo "0" > "$stats_dir/purge_count"
|
||||||
|
echo "" > "$stats_dir/purge_scanning"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Perform the purge
|
# Perform the purge
|
||||||
perform_purge() {
|
perform_purge() {
|
||||||
|
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||||
|
local monitor_pid=""
|
||||||
|
|
||||||
|
# Cleanup function
|
||||||
|
cleanup_monitor() {
|
||||||
|
# Remove scanning file to stop monitor
|
||||||
|
rm -f "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||||
|
|
||||||
|
if [[ -n "$monitor_pid" ]]; then
|
||||||
|
kill "$monitor_pid" 2> /dev/null || true
|
||||||
|
wait "$monitor_pid" 2> /dev/null || true
|
||||||
|
fi
|
||||||
|
if [[ -t 1 ]]; then
|
||||||
|
printf '\r\033[K\n\033[K\033[A'
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set up trap for cleanup
|
||||||
|
trap cleanup_monitor INT TERM
|
||||||
|
|
||||||
|
# Show scanning with spinner on same line as title
|
||||||
|
if [[ -t 1 ]]; then
|
||||||
|
# Print title first
|
||||||
|
printf '%s' "${PURPLE_BOLD}Purge Project Artifacts${NC} "
|
||||||
|
|
||||||
|
# Start background monitor with ASCII spinner
|
||||||
|
(
|
||||||
|
local spinner_chars="|/-\\"
|
||||||
|
local spinner_idx=0
|
||||||
|
local last_path=""
|
||||||
|
|
||||||
|
# Set up trap to exit cleanly
|
||||||
|
trap 'exit 0' INT TERM
|
||||||
|
|
||||||
|
# Function to truncate path in the middle
|
||||||
|
truncate_path() {
|
||||||
|
local path="$1"
|
||||||
|
local max_len=80
|
||||||
|
|
||||||
|
if [[ ${#path} -le $max_len ]]; then
|
||||||
|
echo "$path"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Calculate how much to show on each side
|
||||||
|
local side_len=$(( (max_len - 3) / 2 ))
|
||||||
|
local start="${path:0:$side_len}"
|
||||||
|
local end="${path: -$side_len}"
|
||||||
|
echo "${start}...${end}"
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ -f "$stats_dir/purge_scanning" ]]; do
|
||||||
|
local current_path=$(cat "$stats_dir/purge_scanning" 2> /dev/null || echo "")
|
||||||
|
local display_path=""
|
||||||
|
|
||||||
|
if [[ -n "$current_path" ]]; then
|
||||||
|
display_path="${current_path/#$HOME/~}"
|
||||||
|
display_path=$(truncate_path "$display_path")
|
||||||
|
last_path="$display_path"
|
||||||
|
elif [[ -n "$last_path" ]]; then
|
||||||
|
display_path="$last_path"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get current spinner character
|
||||||
|
local spin_char="${spinner_chars:$spinner_idx:1}"
|
||||||
|
spinner_idx=$(( (spinner_idx + 1) % ${#spinner_chars} ))
|
||||||
|
|
||||||
|
# Show title on first line, spinner and scanning info on second line
|
||||||
|
if [[ -n "$display_path" ]]; then
|
||||||
|
printf '\r%s\n%s %sScanning %s\033[K\033[A' \
|
||||||
|
"${PURPLE_BOLD}Purge Project Artifacts${NC}" \
|
||||||
|
"${BLUE}${spin_char}${NC}" \
|
||||||
|
"${GRAY}" "$display_path"
|
||||||
|
else
|
||||||
|
printf '\r%s\n%s %sScanning...\033[K\033[A' \
|
||||||
|
"${PURPLE_BOLD}Purge Project Artifacts${NC}" \
|
||||||
|
"${BLUE}${spin_char}${NC}" \
|
||||||
|
"${GRAY}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep 0.05
|
||||||
|
done
|
||||||
|
exit 0
|
||||||
|
) &
|
||||||
|
monitor_pid=$!
|
||||||
|
else
|
||||||
|
echo -e "${PURPLE_BOLD}Purge Project Artifacts${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
clean_project_artifacts
|
clean_project_artifacts
|
||||||
local exit_code=$?
|
local exit_code=$?
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
trap - INT TERM
|
||||||
|
cleanup_monitor
|
||||||
|
|
||||||
|
if [[ -t 1 ]]; then
|
||||||
|
echo -e "${PURPLE_BOLD}Purge Project Artifacts${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
# Exit codes:
|
# Exit codes:
|
||||||
# 0 = success, show summary
|
# 0 = success, show summary
|
||||||
# 1 = user cancelled
|
# 1 = user cancelled
|
||||||
@@ -79,15 +177,11 @@ perform_purge() {
|
|||||||
local total_size_cleaned=0
|
local total_size_cleaned=0
|
||||||
local total_items_cleaned=0
|
local total_items_cleaned=0
|
||||||
|
|
||||||
# Read stats from user cache directory
|
|
||||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
|
||||||
|
|
||||||
if [[ -f "$stats_dir/purge_stats" ]]; then
|
if [[ -f "$stats_dir/purge_stats" ]]; then
|
||||||
total_size_cleaned=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0")
|
total_size_cleaned=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0")
|
||||||
rm -f "$stats_dir/purge_stats"
|
rm -f "$stats_dir/purge_stats"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Read count
|
|
||||||
if [[ -f "$stats_dir/purge_count" ]]; then
|
if [[ -f "$stats_dir/purge_count" ]]; then
|
||||||
total_items_cleaned=$(cat "$stats_dir/purge_count" 2> /dev/null || echo "0")
|
total_items_cleaned=$(cat "$stats_dir/purge_count" 2> /dev/null || echo "0")
|
||||||
rm -f "$stats_dir/purge_count"
|
rm -f "$stats_dir/purge_count"
|
||||||
|
|||||||
@@ -13,8 +13,13 @@ LIB_DIR="$(cd "$SCRIPT_DIR/../lib" && pwd)"
|
|||||||
# shellcheck source=../lib/core/common.sh
|
# shellcheck source=../lib/core/common.sh
|
||||||
source "$LIB_DIR/core/common.sh"
|
source "$LIB_DIR/core/common.sh"
|
||||||
|
|
||||||
readonly PAM_SUDO_FILE="${MOLE_PAM_SUDO_FILE:-/etc/pam.d/sudo}"
|
# Set up global cleanup trap
|
||||||
readonly PAM_SUDO_LOCAL_FILE="${MOLE_PAM_SUDO_LOCAL_FILE:-/etc/pam.d/sudo_local}"
|
trap cleanup_temp_files EXIT INT TERM
|
||||||
|
|
||||||
|
PAM_SUDO_FILE="${MOLE_PAM_SUDO_FILE:-/etc/pam.d/sudo}"
|
||||||
|
PAM_SUDO_LOCAL_FILE="${MOLE_PAM_SUDO_LOCAL_FILE:-$(dirname "$PAM_SUDO_FILE")/sudo_local}"
|
||||||
|
readonly PAM_SUDO_FILE
|
||||||
|
readonly PAM_SUDO_LOCAL_FILE
|
||||||
readonly PAM_TID_LINE="auth sufficient pam_tid.so"
|
readonly PAM_TID_LINE="auth sufficient pam_tid.so"
|
||||||
|
|
||||||
# Check if Touch ID is already configured
|
# Check if Touch ID is already configured
|
||||||
@@ -66,9 +71,8 @@ show_status() {
|
|||||||
|
|
||||||
# Enable Touch ID for sudo
|
# Enable Touch ID for sudo
|
||||||
enable_touchid() {
|
enable_touchid() {
|
||||||
# Cleanup trap
|
# Cleanup trap handled by global EXIT trap
|
||||||
local temp_file=""
|
local temp_file=""
|
||||||
trap '[[ -n "${temp_file:-}" ]] && rm -f "${temp_file:-}"' EXIT
|
|
||||||
|
|
||||||
# First check if system supports Touch ID
|
# First check if system supports Touch ID
|
||||||
if ! supports_touchid; then
|
if ! supports_touchid; then
|
||||||
@@ -88,7 +92,7 @@ enable_touchid() {
|
|||||||
# It is in sudo_local, but let's check if it's ALSO in sudo (incomplete migration)
|
# It is in sudo_local, but let's check if it's ALSO in sudo (incomplete migration)
|
||||||
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
|
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
|
||||||
# Clean up legacy config
|
# Clean up legacy config
|
||||||
temp_file=$(mktemp)
|
temp_file=$(create_temp_file)
|
||||||
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
||||||
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
|
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
|
||||||
echo -e "${GREEN}${ICON_SUCCESS} Cleanup legacy configuration${NC}"
|
echo -e "${GREEN}${ICON_SUCCESS} Cleanup legacy configuration${NC}"
|
||||||
@@ -117,7 +121,7 @@ enable_touchid() {
|
|||||||
else
|
else
|
||||||
# Append if not present
|
# Append if not present
|
||||||
if ! grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then
|
if ! grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then
|
||||||
temp_file=$(mktemp)
|
temp_file=$(create_temp_file)
|
||||||
cp "$PAM_SUDO_LOCAL_FILE" "$temp_file"
|
cp "$PAM_SUDO_LOCAL_FILE" "$temp_file"
|
||||||
echo "$PAM_TID_LINE" >> "$temp_file"
|
echo "$PAM_TID_LINE" >> "$temp_file"
|
||||||
sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE"
|
sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE"
|
||||||
@@ -132,7 +136,7 @@ enable_touchid() {
|
|||||||
if $write_success; then
|
if $write_success; then
|
||||||
# If we migrated from legacy, clean it up now
|
# If we migrated from legacy, clean it up now
|
||||||
if $is_legacy_configured; then
|
if $is_legacy_configured; then
|
||||||
temp_file=$(mktemp)
|
temp_file=$(create_temp_file)
|
||||||
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
||||||
sudo mv "$temp_file" "$PAM_SUDO_FILE"
|
sudo mv "$temp_file" "$PAM_SUDO_FILE"
|
||||||
log_success "Touch ID migrated to sudo_local"
|
log_success "Touch ID migrated to sudo_local"
|
||||||
@@ -163,7 +167,7 @@ enable_touchid() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Create temp file
|
# Create temp file
|
||||||
temp_file=$(mktemp)
|
temp_file=$(create_temp_file)
|
||||||
|
|
||||||
# Insert pam_tid.so after the first comment block
|
# Insert pam_tid.so after the first comment block
|
||||||
awk '
|
awk '
|
||||||
@@ -194,9 +198,8 @@ enable_touchid() {
|
|||||||
|
|
||||||
# Disable Touch ID for sudo
|
# Disable Touch ID for sudo
|
||||||
disable_touchid() {
|
disable_touchid() {
|
||||||
# Cleanup trap
|
# Cleanup trap handled by global EXIT trap
|
||||||
local temp_file=""
|
local temp_file=""
|
||||||
trap '[[ -n "${temp_file:-}" ]] && rm -f "${temp_file:-}"' EXIT
|
|
||||||
|
|
||||||
if ! is_touchid_configured; then
|
if ! is_touchid_configured; then
|
||||||
echo -e "${YELLOW}Touch ID is not currently enabled${NC}"
|
echo -e "${YELLOW}Touch ID is not currently enabled${NC}"
|
||||||
@@ -206,13 +209,13 @@ disable_touchid() {
|
|||||||
# Check sudo_local first
|
# Check sudo_local first
|
||||||
if [[ -f "$PAM_SUDO_LOCAL_FILE" ]] && grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then
|
if [[ -f "$PAM_SUDO_LOCAL_FILE" ]] && grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then
|
||||||
# Remove from sudo_local
|
# Remove from sudo_local
|
||||||
temp_file=$(mktemp)
|
temp_file=$(create_temp_file)
|
||||||
grep -v "pam_tid.so" "$PAM_SUDO_LOCAL_FILE" > "$temp_file"
|
grep -v "pam_tid.so" "$PAM_SUDO_LOCAL_FILE" > "$temp_file"
|
||||||
|
|
||||||
if sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE" 2> /dev/null; then
|
if sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE" 2> /dev/null; then
|
||||||
# Since we modified sudo_local, we should also check if it's in sudo file (legacy cleanup)
|
# Since we modified sudo_local, we should also check if it's in sudo file (legacy cleanup)
|
||||||
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
|
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
|
||||||
temp_file=$(mktemp)
|
temp_file=$(create_temp_file)
|
||||||
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
||||||
sudo mv "$temp_file" "$PAM_SUDO_FILE"
|
sudo mv "$temp_file" "$PAM_SUDO_FILE"
|
||||||
fi
|
fi
|
||||||
@@ -236,7 +239,7 @@ disable_touchid() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Remove pam_tid.so line
|
# Remove pam_tid.so line
|
||||||
temp_file=$(mktemp)
|
temp_file=$(create_temp_file)
|
||||||
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
||||||
|
|
||||||
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
|
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
|
||||||
|
|||||||
@@ -45,9 +45,10 @@ func TestScanPathConcurrentBasic(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var filesScanned, dirsScanned, bytesScanned int64
|
var filesScanned, dirsScanned, bytesScanned int64
|
||||||
current := ""
|
current := &atomic.Value{}
|
||||||
|
current.Store("")
|
||||||
|
|
||||||
result, err := scanPathConcurrent(root, &filesScanned, &dirsScanned, &bytesScanned, ¤t)
|
result, err := scanPathConcurrent(root, &filesScanned, &dirsScanned, &bytesScanned, current)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("scanPathConcurrent returned error: %v", err)
|
t.Fatalf("scanPathConcurrent returned error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -204,7 +205,7 @@ func TestMeasureOverviewSize(t *testing.T) {
|
|||||||
if err := os.MkdirAll(target, 0o755); err != nil {
|
if err := os.MkdirAll(target, 0o755); err != nil {
|
||||||
t.Fatalf("create target: %v", err)
|
t.Fatalf("create target: %v", err)
|
||||||
}
|
}
|
||||||
content := []byte(strings.Repeat("x", 2048))
|
content := []byte(strings.Repeat("x", 4096))
|
||||||
if err := os.WriteFile(filepath.Join(target, "data.bin"), content, 0o644); err != nil {
|
if err := os.WriteFile(filepath.Join(target, "data.bin"), content, 0o644); err != nil {
|
||||||
t.Fatalf("write file: %v", err)
|
t.Fatalf("write file: %v", err)
|
||||||
}
|
}
|
||||||
@@ -225,6 +226,20 @@ func TestMeasureOverviewSize(t *testing.T) {
|
|||||||
if cached != size {
|
if cached != size {
|
||||||
t.Fatalf("snapshot mismatch: want %d, got %d", size, cached)
|
t.Fatalf("snapshot mismatch: want %d, got %d", size, cached)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure measureOverviewSize does not use cache
|
||||||
|
// APFS block size is 4KB, 4097 bytes should use more blocks
|
||||||
|
content = []byte(strings.Repeat("x", 4097))
|
||||||
|
if err := os.WriteFile(filepath.Join(target, "data2.bin"), content, 0o644); err != nil {
|
||||||
|
t.Fatalf("write file: %v", err)
|
||||||
|
}
|
||||||
|
size2, err := measureOverviewSize(target)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("measureOverviewSize: %v", err)
|
||||||
|
}
|
||||||
|
if size2 == size {
|
||||||
|
t.Fatalf("measureOverwiewSize used cache")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsCleanableDir(t *testing.T) {
|
func TestIsCleanableDir(t *testing.T) {
|
||||||
@@ -347,10 +362,11 @@ func TestScanPathPermissionError(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
var files, dirs, bytes int64
|
var files, dirs, bytes int64
|
||||||
current := ""
|
current := &atomic.Value{}
|
||||||
|
current.Store("")
|
||||||
|
|
||||||
// Scanning the locked dir itself should fail.
|
// Scanning the locked dir itself should fail.
|
||||||
_, err := scanPathConcurrent(lockedDir, &files, &dirs, &bytes, ¤t)
|
_, err := scanPathConcurrent(lockedDir, &files, &dirs, &bytes, current)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("expected error scanning locked directory, got nil")
|
t.Fatalf("expected error scanning locked directory, got nil")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -97,7 +97,7 @@ type model struct {
|
|||||||
filesScanned *int64
|
filesScanned *int64
|
||||||
dirsScanned *int64
|
dirsScanned *int64
|
||||||
bytesScanned *int64
|
bytesScanned *int64
|
||||||
currentPath *string
|
currentPath *atomic.Value
|
||||||
showLargeFiles bool
|
showLargeFiles bool
|
||||||
isOverview bool
|
isOverview bool
|
||||||
deleteConfirm bool
|
deleteConfirm bool
|
||||||
@@ -162,7 +162,8 @@ func main() {
|
|||||||
|
|
||||||
func newModel(path string, isOverview bool) model {
|
func newModel(path string, isOverview bool) model {
|
||||||
var filesScanned, dirsScanned, bytesScanned int64
|
var filesScanned, dirsScanned, bytesScanned int64
|
||||||
currentPath := ""
|
currentPath := &atomic.Value{}
|
||||||
|
currentPath.Store("")
|
||||||
var overviewFilesScanned, overviewDirsScanned, overviewBytesScanned int64
|
var overviewFilesScanned, overviewDirsScanned, overviewBytesScanned int64
|
||||||
overviewCurrentPath := ""
|
overviewCurrentPath := ""
|
||||||
|
|
||||||
@@ -174,7 +175,7 @@ func newModel(path string, isOverview bool) model {
|
|||||||
filesScanned: &filesScanned,
|
filesScanned: &filesScanned,
|
||||||
dirsScanned: &dirsScanned,
|
dirsScanned: &dirsScanned,
|
||||||
bytesScanned: &bytesScanned,
|
bytesScanned: &bytesScanned,
|
||||||
currentPath: ¤tPath,
|
currentPath: currentPath,
|
||||||
showLargeFiles: false,
|
showLargeFiles: false,
|
||||||
isOverview: isOverview,
|
isOverview: isOverview,
|
||||||
cache: make(map[string]historyEntry),
|
cache: make(map[string]historyEntry),
|
||||||
@@ -394,7 +395,7 @@ func (m model) scanCmd(path string) tea.Cmd {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func tickCmd() tea.Cmd {
|
func tickCmd() tea.Cmd {
|
||||||
return tea.Tick(time.Millisecond*80, func(t time.Time) tea.Msg {
|
return tea.Tick(time.Millisecond*100, func(t time.Time) tea.Msg {
|
||||||
return tickMsg(t)
|
return tickMsg(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -434,7 +435,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|||||||
atomic.StoreInt64(m.dirsScanned, 0)
|
atomic.StoreInt64(m.dirsScanned, 0)
|
||||||
atomic.StoreInt64(m.bytesScanned, 0)
|
atomic.StoreInt64(m.bytesScanned, 0)
|
||||||
if m.currentPath != nil {
|
if m.currentPath != nil {
|
||||||
*m.currentPath = ""
|
m.currentPath.Store("")
|
||||||
}
|
}
|
||||||
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
|
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
|
||||||
}
|
}
|
||||||
@@ -683,6 +684,11 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
|||||||
m.largeMultiSelected = make(map[string]bool)
|
m.largeMultiSelected = make(map[string]bool)
|
||||||
|
|
||||||
if m.inOverviewMode() {
|
if m.inOverviewMode() {
|
||||||
|
// Explicitly invalidate cache for all overview entries to force re-scan
|
||||||
|
for _, entry := range m.entries {
|
||||||
|
invalidateCache(entry.Path)
|
||||||
|
}
|
||||||
|
|
||||||
m.overviewSizeCache = make(map[string]int64)
|
m.overviewSizeCache = make(map[string]int64)
|
||||||
m.overviewScanningSet = make(map[string]bool)
|
m.overviewScanningSet = make(map[string]bool)
|
||||||
m.hydrateOverviewEntries() // Reset sizes to pending
|
m.hydrateOverviewEntries() // Reset sizes to pending
|
||||||
@@ -707,7 +713,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
|||||||
atomic.StoreInt64(m.dirsScanned, 0)
|
atomic.StoreInt64(m.dirsScanned, 0)
|
||||||
atomic.StoreInt64(m.bytesScanned, 0)
|
atomic.StoreInt64(m.bytesScanned, 0)
|
||||||
if m.currentPath != nil {
|
if m.currentPath != nil {
|
||||||
*m.currentPath = ""
|
m.currentPath.Store("")
|
||||||
}
|
}
|
||||||
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
|
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
|
||||||
case "t", "T":
|
case "t", "T":
|
||||||
@@ -979,7 +985,7 @@ func (m model) enterSelectedDir() (tea.Model, tea.Cmd) {
|
|||||||
atomic.StoreInt64(m.dirsScanned, 0)
|
atomic.StoreInt64(m.dirsScanned, 0)
|
||||||
atomic.StoreInt64(m.bytesScanned, 0)
|
atomic.StoreInt64(m.bytesScanned, 0)
|
||||||
if m.currentPath != nil {
|
if m.currentPath != nil {
|
||||||
*m.currentPath = ""
|
m.currentPath.Store("")
|
||||||
}
|
}
|
||||||
|
|
||||||
if cached, ok := m.cache[m.path]; ok && !cached.Dirty {
|
if cached, ok := m.cache[m.path]; ok && !cached.Dirty {
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
var scanGroup singleflight.Group
|
var scanGroup singleflight.Group
|
||||||
|
|
||||||
func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) (scanResult, error) {
|
func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *atomic.Value) (scanResult, error) {
|
||||||
children, err := os.ReadDir(root)
|
children, err := os.ReadDir(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return scanResult{}, err
|
return scanResult{}, err
|
||||||
@@ -50,10 +50,20 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
numWorkers = 1
|
numWorkers = 1
|
||||||
}
|
}
|
||||||
sem := make(chan struct{}, numWorkers)
|
sem := make(chan struct{}, numWorkers)
|
||||||
|
duSem := make(chan struct{}, min(4, runtime.NumCPU())) // limits concurrent du processes
|
||||||
|
duQueueSem := make(chan struct{}, min(4, runtime.NumCPU())*2) // limits how many goroutines may be waiting to run du
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
// Collect results via channels.
|
// Collect results via channels.
|
||||||
entryChan := make(chan dirEntry, len(children))
|
// Cap buffer size to prevent memory spikes with huge directories.
|
||||||
|
entryBufSize := len(children)
|
||||||
|
if entryBufSize > 4096 {
|
||||||
|
entryBufSize = 4096
|
||||||
|
}
|
||||||
|
if entryBufSize < 1 {
|
||||||
|
entryBufSize = 1
|
||||||
|
}
|
||||||
|
entryChan := make(chan dirEntry, entryBufSize)
|
||||||
largeFileChan := make(chan fileEntry, maxLargeFiles*2)
|
largeFileChan := make(chan fileEntry, maxLargeFiles*2)
|
||||||
|
|
||||||
var collectorWg sync.WaitGroup
|
var collectorWg sync.WaitGroup
|
||||||
@@ -126,10 +136,10 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
|
|
||||||
// ~/Library is scanned separately; reuse cache when possible.
|
// ~/Library is scanned separately; reuse cache when possible.
|
||||||
if isHomeDir && child.Name() == "Library" {
|
if isHomeDir && child.Name() == "Library" {
|
||||||
|
sem <- struct{}{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(name, path string) {
|
go func(name, path string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
sem <- struct{}{}
|
|
||||||
defer func() { <-sem }()
|
defer func() { <-sem }()
|
||||||
|
|
||||||
var size int64
|
var size int64
|
||||||
@@ -138,7 +148,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
} else if cached, err := loadCacheFromDisk(path); err == nil {
|
} else if cached, err := loadCacheFromDisk(path); err == nil {
|
||||||
size = cached.TotalSize
|
size = cached.TotalSize
|
||||||
} else {
|
} else {
|
||||||
size = calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
|
size = calculateDirSizeConcurrent(path, largeFileChan, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||||
}
|
}
|
||||||
atomic.AddInt64(&total, size)
|
atomic.AddInt64(&total, size)
|
||||||
atomic.AddInt64(dirsScanned, 1)
|
atomic.AddInt64(dirsScanned, 1)
|
||||||
@@ -156,13 +166,17 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
|
|
||||||
// Folded dirs: fast size without expanding.
|
// Folded dirs: fast size without expanding.
|
||||||
if shouldFoldDirWithPath(child.Name(), fullPath) {
|
if shouldFoldDirWithPath(child.Name(), fullPath) {
|
||||||
|
duQueueSem <- struct{}{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(name, path string) {
|
go func(name, path string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
sem <- struct{}{}
|
defer func() { <-duQueueSem }()
|
||||||
defer func() { <-sem }()
|
|
||||||
|
|
||||||
size, err := getDirectorySizeFromDu(path)
|
size, err := func() (int64, error) {
|
||||||
|
duSem <- struct{}{}
|
||||||
|
defer func() { <-duSem }()
|
||||||
|
return getDirectorySizeFromDu(path)
|
||||||
|
}()
|
||||||
if err != nil || size <= 0 {
|
if err != nil || size <= 0 {
|
||||||
size = calculateDirSizeFast(path, filesScanned, dirsScanned, bytesScanned, currentPath)
|
size = calculateDirSizeFast(path, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||||
}
|
}
|
||||||
@@ -180,13 +194,13 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sem <- struct{}{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(name, path string) {
|
go func(name, path string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
sem <- struct{}{}
|
|
||||||
defer func() { <-sem }()
|
defer func() { <-sem }()
|
||||||
|
|
||||||
size := calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
|
size := calculateDirSizeConcurrent(path, largeFileChan, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||||
atomic.AddInt64(&total, size)
|
atomic.AddInt64(&total, size)
|
||||||
atomic.AddInt64(dirsScanned, 1)
|
atomic.AddInt64(dirsScanned, 1)
|
||||||
|
|
||||||
@@ -280,7 +294,7 @@ func shouldSkipFileForLargeTracking(path string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calculateDirSizeFast performs concurrent dir sizing using os.ReadDir.
|
// calculateDirSizeFast performs concurrent dir sizing using os.ReadDir.
|
||||||
func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
|
func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *atomic.Value) int64 {
|
||||||
var total int64
|
var total int64
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
@@ -299,7 +313,7 @@ func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *
|
|||||||
}
|
}
|
||||||
|
|
||||||
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
|
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
|
||||||
*currentPath = dirPath
|
currentPath.Store(dirPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := os.ReadDir(dirPath)
|
entries, err := os.ReadDir(dirPath)
|
||||||
@@ -311,11 +325,11 @@ func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *
|
|||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.IsDir() {
|
if entry.IsDir() {
|
||||||
wg.Add(1)
|
|
||||||
subDir := filepath.Join(dirPath, entry.Name())
|
subDir := filepath.Join(dirPath, entry.Name())
|
||||||
|
sem <- struct{}{}
|
||||||
|
wg.Add(1)
|
||||||
go func(p string) {
|
go func(p string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
sem <- struct{}{}
|
|
||||||
defer func() { <-sem }()
|
defer func() { <-sem }()
|
||||||
walk(p)
|
walk(p)
|
||||||
}(subDir)
|
}(subDir)
|
||||||
@@ -416,7 +430,7 @@ func isInFoldedDir(path string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
|
func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, duSem, duQueueSem chan struct{}, filesScanned, dirsScanned, bytesScanned *int64, currentPath *atomic.Value) int64 {
|
||||||
children, err := os.ReadDir(root)
|
children, err := os.ReadDir(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0
|
return 0
|
||||||
@@ -446,26 +460,35 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
|||||||
|
|
||||||
if child.IsDir() {
|
if child.IsDir() {
|
||||||
if shouldFoldDirWithPath(child.Name(), fullPath) {
|
if shouldFoldDirWithPath(child.Name(), fullPath) {
|
||||||
|
duQueueSem <- struct{}{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(path string) {
|
go func(path string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
size, err := getDirectorySizeFromDu(path)
|
defer func() { <-duQueueSem }()
|
||||||
if err == nil && size > 0 {
|
|
||||||
atomic.AddInt64(&total, size)
|
size, err := func() (int64, error) {
|
||||||
|
duSem <- struct{}{}
|
||||||
|
defer func() { <-duSem }()
|
||||||
|
return getDirectorySizeFromDu(path)
|
||||||
|
}()
|
||||||
|
if err != nil || size <= 0 {
|
||||||
|
size = calculateDirSizeFast(path, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||||
|
} else {
|
||||||
atomic.AddInt64(bytesScanned, size)
|
atomic.AddInt64(bytesScanned, size)
|
||||||
atomic.AddInt64(dirsScanned, 1)
|
|
||||||
}
|
}
|
||||||
|
atomic.AddInt64(&total, size)
|
||||||
|
atomic.AddInt64(dirsScanned, 1)
|
||||||
}(fullPath)
|
}(fullPath)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sem <- struct{}{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(path string) {
|
go func(path string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
sem <- struct{}{}
|
|
||||||
defer func() { <-sem }()
|
defer func() { <-sem }()
|
||||||
|
|
||||||
size := calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
|
size := calculateDirSizeConcurrent(path, largeFileChan, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||||
atomic.AddInt64(&total, size)
|
atomic.AddInt64(&total, size)
|
||||||
atomic.AddInt64(dirsScanned, 1)
|
atomic.AddInt64(dirsScanned, 1)
|
||||||
}(fullPath)
|
}(fullPath)
|
||||||
@@ -488,7 +511,7 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
|||||||
|
|
||||||
// Update current path occasionally to prevent UI jitter.
|
// Update current path occasionally to prevent UI jitter.
|
||||||
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
|
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
|
||||||
*currentPath = fullPath
|
currentPath.Store(fullPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -519,10 +542,6 @@ func measureOverviewSize(path string) (int64, error) {
|
|||||||
excludePath = filepath.Join(home, "Library")
|
excludePath = filepath.Join(home, "Library")
|
||||||
}
|
}
|
||||||
|
|
||||||
if cached, err := loadStoredOverviewSize(path); err == nil && cached > 0 {
|
|
||||||
return cached, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if duSize, err := getDirectorySizeFromDuWithExclude(path, excludePath); err == nil && duSize > 0 {
|
if duSize, err := getDirectorySizeFromDuWithExclude(path, excludePath); err == nil && duSize > 0 {
|
||||||
_ = storeOverviewSize(path, duSize)
|
_ = storeOverviewSize(path, duSize)
|
||||||
return duSize, nil
|
return duSize, nil
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func (m model) View() string {
|
|||||||
return b.String()
|
return b.String()
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
|
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
|
||||||
fmt.Fprintf(&b, "%s%s%s%s Scanning...\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset)
|
fmt.Fprintf(&b, "%s%s%s%s %s\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset, m.status)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
hasPending := false
|
hasPending := false
|
||||||
@@ -44,7 +44,7 @@ func (m model) View() string {
|
|||||||
}
|
}
|
||||||
if hasPending {
|
if hasPending {
|
||||||
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
|
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
|
||||||
fmt.Fprintf(&b, "%s%s%s%s Scanning...\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset)
|
fmt.Fprintf(&b, "%s%s%s%s %s\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset, m.status)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(&b, "%sSelect a location to explore:%s\n\n", colorGray, colorReset)
|
fmt.Fprintf(&b, "%sSelect a location to explore:%s\n\n", colorGray, colorReset)
|
||||||
}
|
}
|
||||||
@@ -99,7 +99,7 @@ func (m model) View() string {
|
|||||||
colorGreen, humanizeBytes(bytesScanned), colorReset)
|
colorGreen, humanizeBytes(bytesScanned), colorReset)
|
||||||
|
|
||||||
if m.currentPath != nil {
|
if m.currentPath != nil {
|
||||||
currentPath := *m.currentPath
|
currentPath := m.currentPath.Load().(string)
|
||||||
if currentPath != "" {
|
if currentPath != "" {
|
||||||
shortPath := displayPath(currentPath)
|
shortPath := displayPath(currentPath)
|
||||||
shortPath = truncateMiddle(shortPath, 50)
|
shortPath = truncateMiddle(shortPath, 50)
|
||||||
|
|||||||
46
install.sh
46
install.sh
@@ -52,6 +52,39 @@ log_error() { echo -e "${YELLOW}${ICON_ERROR}${NC} $1"; }
|
|||||||
log_admin() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_ADMIN}${NC} $1"; }
|
log_admin() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_ADMIN}${NC} $1"; }
|
||||||
log_confirm() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_CONFIRM}${NC} $1"; }
|
log_confirm() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_CONFIRM}${NC} $1"; }
|
||||||
|
|
||||||
|
safe_rm() {
|
||||||
|
local target="${1:-}"
|
||||||
|
local tmp_root
|
||||||
|
|
||||||
|
if [[ -z "$target" ]]; then
|
||||||
|
log_error "safe_rm: empty path"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
if [[ ! -e "$target" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
tmp_root="${TMPDIR:-/tmp}"
|
||||||
|
case "$target" in
|
||||||
|
"$tmp_root" | /tmp)
|
||||||
|
log_error "safe_rm: refusing to remove temp root: $target"
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
"$tmp_root"/* | /tmp/*) ;;
|
||||||
|
*)
|
||||||
|
log_error "safe_rm: refusing to remove non-temp path: $target"
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [[ -d "$target" ]]; then
|
||||||
|
find "$target" -depth \( -type f -o -type l \) -exec rm -f {} + 2> /dev/null || true
|
||||||
|
find "$target" -depth -type d -exec rmdir {} + 2> /dev/null || true
|
||||||
|
else
|
||||||
|
rm -f "$target" 2> /dev/null || true
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Install defaults
|
# Install defaults
|
||||||
INSTALL_DIR="/usr/local/bin"
|
INSTALL_DIR="/usr/local/bin"
|
||||||
CONFIG_DIR="$HOME/.config/mole"
|
CONFIG_DIR="$HOME/.config/mole"
|
||||||
@@ -100,7 +133,16 @@ resolve_source_dir() {
|
|||||||
|
|
||||||
local tmp
|
local tmp
|
||||||
tmp="$(mktemp -d)"
|
tmp="$(mktemp -d)"
|
||||||
trap 'stop_line_spinner 2>/dev/null; rm -rf "$tmp"' EXIT
|
|
||||||
|
# Safe cleanup function for temporary directory
|
||||||
|
cleanup_tmp() {
|
||||||
|
stop_line_spinner 2> /dev/null || true
|
||||||
|
if [[ -z "${tmp:-}" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
safe_rm "$tmp"
|
||||||
|
}
|
||||||
|
trap cleanup_tmp EXIT
|
||||||
|
|
||||||
local branch="${MOLE_VERSION:-}"
|
local branch="${MOLE_VERSION:-}"
|
||||||
if [[ -z "$branch" ]]; then
|
if [[ -z "$branch" ]]; then
|
||||||
@@ -125,7 +167,7 @@ resolve_source_dir() {
|
|||||||
|
|
||||||
start_line_spinner "Fetching Mole source (${branch})..."
|
start_line_spinner "Fetching Mole source (${branch})..."
|
||||||
if command -v curl > /dev/null 2>&1; then
|
if command -v curl > /dev/null 2>&1; then
|
||||||
if curl -fsSL -o "$tmp/mole.tar.gz" "$url" 2> /dev/null; then
|
if curl -fsSL --connect-timeout 10 --max-time 60 -o "$tmp/mole.tar.gz" "$url" 2> /dev/null; then
|
||||||
if tar -xzf "$tmp/mole.tar.gz" -C "$tmp" 2> /dev/null; then
|
if tar -xzf "$tmp/mole.tar.gz" -C "$tmp" 2> /dev/null; then
|
||||||
stop_line_spinner
|
stop_line_spinner
|
||||||
|
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ readonly PURGE_TARGETS=(
|
|||||||
readonly MIN_AGE_DAYS=7
|
readonly MIN_AGE_DAYS=7
|
||||||
# Scan depth defaults (relative to search root).
|
# Scan depth defaults (relative to search root).
|
||||||
readonly PURGE_MIN_DEPTH_DEFAULT=2
|
readonly PURGE_MIN_DEPTH_DEFAULT=2
|
||||||
readonly PURGE_MAX_DEPTH_DEFAULT=8
|
readonly PURGE_MAX_DEPTH_DEFAULT=4
|
||||||
# Search paths (default, can be overridden via config file).
|
# Search paths (default, can be overridden via config file).
|
||||||
readonly DEFAULT_PURGE_SEARCH_PATHS=(
|
readonly DEFAULT_PURGE_SEARCH_PATHS=(
|
||||||
"$HOME/www"
|
"$HOME/www"
|
||||||
@@ -339,6 +339,11 @@ scan_purge_targets() {
|
|||||||
if [[ ! -d "$search_path" ]]; then
|
if [[ ! -d "$search_path" ]]; then
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Update current scanning path
|
||||||
|
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||||
|
echo "$search_path" > "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||||
|
|
||||||
if command -v fd > /dev/null 2>&1; then
|
if command -v fd > /dev/null 2>&1; then
|
||||||
# Escape regex special characters in target names for fd patterns
|
# Escape regex special characters in target names for fd patterns
|
||||||
local escaped_targets=()
|
local escaped_targets=()
|
||||||
@@ -356,28 +361,39 @@ scan_purge_targets() {
|
|||||||
"--type" "d"
|
"--type" "d"
|
||||||
"--min-depth" "$min_depth"
|
"--min-depth" "$min_depth"
|
||||||
"--max-depth" "$max_depth"
|
"--max-depth" "$max_depth"
|
||||||
"--threads" "4"
|
"--threads" "8"
|
||||||
"--exclude" ".git"
|
"--exclude" ".git"
|
||||||
"--exclude" "Library"
|
"--exclude" "Library"
|
||||||
"--exclude" ".Trash"
|
"--exclude" ".Trash"
|
||||||
"--exclude" "Applications"
|
"--exclude" "Applications"
|
||||||
)
|
)
|
||||||
fd "${fd_args[@]}" "$pattern" "$search_path" 2> /dev/null | while IFS= read -r item; do
|
# Write to temp file first, then filter - more efficient than piping
|
||||||
if is_safe_project_artifact "$item" "$search_path"; then
|
fd "${fd_args[@]}" "$pattern" "$search_path" 2> /dev/null > "$output_file.raw" || true
|
||||||
echo "$item"
|
|
||||||
fi
|
# Single pass: safe + nested + protected
|
||||||
done | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
if [[ -f "$output_file.raw" ]]; then
|
||||||
|
while IFS= read -r item; do
|
||||||
|
# Check if we should abort (scanning file removed by Ctrl+C)
|
||||||
|
if [[ ! -f "$stats_dir/purge_scanning" ]]; then
|
||||||
|
rm -f "$output_file.raw"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$item" ]] && is_safe_project_artifact "$item" "$search_path"; then
|
||||||
|
echo "$item"
|
||||||
|
# Update scanning path to show current project directory
|
||||||
|
local project_dir=$(dirname "$item")
|
||||||
|
echo "$project_dir" > "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||||
|
fi
|
||||||
|
done < "$output_file.raw" | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
||||||
|
rm -f "$output_file.raw"
|
||||||
|
else
|
||||||
|
touch "$output_file"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
# Pruned find avoids descending into heavy directories.
|
# Pruned find avoids descending into heavy directories.
|
||||||
local prune_args=()
|
|
||||||
local prune_dirs=(".git" "Library" ".Trash" "Applications")
|
|
||||||
for dir in "${prune_dirs[@]}"; do
|
|
||||||
prune_args+=("-name" "$dir" "-prune" "-o")
|
|
||||||
done
|
|
||||||
for target in "${PURGE_TARGETS[@]}"; do
|
|
||||||
prune_args+=("-name" "$target" "-print" "-prune" "-o")
|
|
||||||
done
|
|
||||||
local find_expr=()
|
local find_expr=()
|
||||||
|
local prune_dirs=(".git" "Library" ".Trash" "Applications")
|
||||||
for dir in "${prune_dirs[@]}"; do
|
for dir in "${prune_dirs[@]}"; do
|
||||||
find_expr+=("-name" "$dir" "-prune" "-o")
|
find_expr+=("-name" "$dir" "-prune" "-o")
|
||||||
done
|
done
|
||||||
@@ -390,28 +406,49 @@ scan_purge_targets() {
|
|||||||
((i++))
|
((i++))
|
||||||
done
|
done
|
||||||
command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \
|
command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \
|
||||||
\( "${find_expr[@]}" \) 2> /dev/null | while IFS= read -r item; do
|
\( "${find_expr[@]}" \) 2> /dev/null > "$output_file.raw" || true
|
||||||
if is_safe_project_artifact "$item" "$search_path"; then
|
|
||||||
echo "$item"
|
# Single pass: safe + nested + protected
|
||||||
fi
|
if [[ -f "$output_file.raw" ]]; then
|
||||||
done | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
while IFS= read -r item; do
|
||||||
|
# Check if we should abort (scanning file removed by Ctrl+C)
|
||||||
|
if [[ ! -f "$stats_dir/purge_scanning" ]]; then
|
||||||
|
rm -f "$output_file.raw"
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$item" ]] && is_safe_project_artifact "$item" "$search_path"; then
|
||||||
|
echo "$item"
|
||||||
|
# Update scanning path to show current project directory
|
||||||
|
local project_dir=$(dirname "$item")
|
||||||
|
echo "$project_dir" > "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||||
|
fi
|
||||||
|
done < "$output_file.raw" | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
||||||
|
rm -f "$output_file.raw"
|
||||||
|
else
|
||||||
|
touch "$output_file"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
# Filter out nested artifacts (e.g. node_modules inside node_modules).
|
# Filter out nested artifacts (e.g. node_modules inside node_modules, .build inside build).
|
||||||
|
# Optimized: Sort paths to put parents before children, then filter in single pass.
|
||||||
filter_nested_artifacts() {
|
filter_nested_artifacts() {
|
||||||
while IFS= read -r item; do
|
# 1. Append trailing slash to each path (to ensure /foo/bar starts with /foo/)
|
||||||
local parent_dir=$(dirname "$item")
|
# 2. Sort to group parents and children (LC_COLLATE=C ensures standard sorting)
|
||||||
local is_nested=false
|
# 3. Use awk to filter out paths that start with the previous kept path
|
||||||
for target in "${PURGE_TARGETS[@]}"; do
|
# 4. Remove trailing slash
|
||||||
if [[ "$parent_dir" == *"/$target/"* || "$parent_dir" == *"/$target" ]]; then
|
sed 's|[^/]$|&/|' | LC_COLLATE=C sort | awk '
|
||||||
is_nested=true
|
BEGIN { last_kept = "" }
|
||||||
break
|
{
|
||||||
fi
|
current = $0
|
||||||
done
|
# If current path starts with last_kept, it is nested
|
||||||
if [[ "$is_nested" == "false" ]]; then
|
# Only check if last_kept is not empty
|
||||||
echo "$item"
|
if (last_kept == "" || index(current, last_kept) != 1) {
|
||||||
fi
|
print current
|
||||||
done
|
last_kept = current
|
||||||
|
}
|
||||||
|
}
|
||||||
|
' | sed 's|/$||'
|
||||||
}
|
}
|
||||||
|
|
||||||
filter_protected_artifacts() {
|
filter_protected_artifacts() {
|
||||||
@@ -703,17 +740,14 @@ clean_project_artifacts() {
|
|||||||
for temp in "${scan_temps[@]+"${scan_temps[@]}"}"; do
|
for temp in "${scan_temps[@]+"${scan_temps[@]}"}"; do
|
||||||
rm -f "$temp" 2> /dev/null || true
|
rm -f "$temp" 2> /dev/null || true
|
||||||
done
|
done
|
||||||
if [[ -t 1 ]]; then
|
# Clean up purge scanning file
|
||||||
stop_inline_spinner
|
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||||
fi
|
rm -f "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||||
echo ""
|
echo ""
|
||||||
exit 130
|
exit 130
|
||||||
}
|
}
|
||||||
trap cleanup_scan INT TERM
|
trap cleanup_scan INT TERM
|
||||||
# Start parallel scanning of all paths at once
|
# Scanning is started from purge.sh with start_inline_spinner
|
||||||
if [[ -t 1 ]]; then
|
|
||||||
start_inline_spinner "Scanning projects..."
|
|
||||||
fi
|
|
||||||
# Launch all scans in parallel
|
# Launch all scans in parallel
|
||||||
for path in "${PURGE_SEARCH_PATHS[@]}"; do
|
for path in "${PURGE_SEARCH_PATHS[@]}"; do
|
||||||
if [[ -d "$path" ]]; then
|
if [[ -d "$path" ]]; then
|
||||||
@@ -730,9 +764,6 @@ clean_project_artifacts() {
|
|||||||
for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do
|
for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do
|
||||||
wait "$pid" 2> /dev/null || true
|
wait "$pid" 2> /dev/null || true
|
||||||
done
|
done
|
||||||
if [[ -t 1 ]]; then
|
|
||||||
stop_inline_spinner
|
|
||||||
fi
|
|
||||||
# Collect all results
|
# Collect all results
|
||||||
for scan_output in "${scan_temps[@]+"${scan_temps[@]}"}"; do
|
for scan_output in "${scan_temps[@]+"${scan_temps[@]}"}"; do
|
||||||
if [[ -f "$scan_output" ]]; then
|
if [[ -f "$scan_output" ]]; then
|
||||||
|
|||||||
@@ -267,40 +267,21 @@ tm_is_running() {
|
|||||||
grep -qE '(^|[[:space:]])("Running"|Running)[[:space:]]*=[[:space:]]*1([[:space:]]*;|$)' <<< "$st"
|
grep -qE '(^|[[:space:]])("Running"|Running)[[:space:]]*=[[:space:]]*1([[:space:]]*;|$)' <<< "$st"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Returns 0 if snapshot mounts exist under local snapshot paths
|
|
||||||
# Returns 1 if none found
|
|
||||||
# Returns 2 if mount state cannot be determined
|
|
||||||
tm_snapshots_mounted() {
|
|
||||||
local m
|
|
||||||
if ! m="$(run_with_timeout 3 mount 2> /dev/null)"; then
|
|
||||||
return 2
|
|
||||||
fi
|
|
||||||
# Match modern and legacy local-snapshot browse mounts:
|
|
||||||
# - /Volumes/com.apple.TimeMachine.localsnapshots/... (APFS)
|
|
||||||
# - /.TimeMachine (APFS)
|
|
||||||
# - /Volumes/MobileBackups (HFS+, legacy)
|
|
||||||
grep -qE '[[:space:]]on[[:space:]](/\.TimeMachine(/|[[:space:]])|/Volumes/com\.apple\.TimeMachine\.localsnapshots(/|[[:space:]])|/Volumes/MobileBackups(/|[[:space:]]))' <<< "$m"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Local APFS snapshots (keep the most recent).
|
# Local APFS snapshots (keep the most recent).
|
||||||
clean_local_snapshots() {
|
clean_local_snapshots() {
|
||||||
if ! command -v tmutil > /dev/null 2>&1; then
|
if ! command -v tmutil > /dev/null 2>&1; then
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local rc_running rc_mounted
|
local rc_running=0
|
||||||
rc_running=0
|
|
||||||
tm_is_running || rc_running=$?
|
tm_is_running || rc_running=$?
|
||||||
|
|
||||||
rc_mounted=0
|
if [[ $rc_running -eq 2 ]]; then
|
||||||
tm_snapshots_mounted || rc_mounted=$?
|
|
||||||
|
|
||||||
if [[ $rc_running -eq 2 || $rc_mounted -eq 2 ]]; then
|
|
||||||
echo -e " ${YELLOW}!${NC} Could not determine Time Machine status; skipping snapshot cleanup"
|
echo -e " ${YELLOW}!${NC} Could not determine Time Machine status; skipping snapshot cleanup"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $rc_running -eq 0 || $rc_mounted -eq 0 ]]; then
|
if [[ $rc_running -eq 0 ]]; then
|
||||||
echo -e " ${YELLOW}!${NC} Time Machine is active; skipping snapshot cleanup"
|
echo -e " ${YELLOW}!${NC} Time Machine is active; skipping snapshot cleanup"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -512,7 +512,7 @@ declare -a MOLE_TEMP_DIRS=()
|
|||||||
create_temp_file() {
|
create_temp_file() {
|
||||||
local temp
|
local temp
|
||||||
temp=$(mktemp) || return 1
|
temp=$(mktemp) || return 1
|
||||||
MOLE_TEMP_FILES+=("$temp")
|
register_temp_file "$temp"
|
||||||
echo "$temp"
|
echo "$temp"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -520,7 +520,7 @@ create_temp_file() {
|
|||||||
create_temp_dir() {
|
create_temp_dir() {
|
||||||
local temp
|
local temp
|
||||||
temp=$(mktemp -d) || return 1
|
temp=$(mktemp -d) || return 1
|
||||||
MOLE_TEMP_DIRS+=("$temp")
|
register_temp_dir "$temp"
|
||||||
echo "$temp"
|
echo "$temp"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -538,9 +538,17 @@ register_temp_dir() {
|
|||||||
# Compatible with both BSD mktemp (macOS default) and GNU mktemp (coreutils)
|
# Compatible with both BSD mktemp (macOS default) and GNU mktemp (coreutils)
|
||||||
mktemp_file() {
|
mktemp_file() {
|
||||||
local prefix="${1:-mole}"
|
local prefix="${1:-mole}"
|
||||||
|
local temp
|
||||||
|
local error_msg
|
||||||
# Use TMPDIR if set, otherwise /tmp
|
# Use TMPDIR if set, otherwise /tmp
|
||||||
# Add .XXXXXX suffix to work with both BSD and GNU mktemp
|
# Add .XXXXXX suffix to work with both BSD and GNU mktemp
|
||||||
mktemp "${TMPDIR:-/tmp}/${prefix}.XXXXXX"
|
if ! error_msg=$(mktemp "${TMPDIR:-/tmp}/${prefix}.XXXXXX" 2>&1); then
|
||||||
|
echo "Error: Failed to create temporary file: $error_msg" >&2
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
temp="$error_msg"
|
||||||
|
register_temp_file "$temp"
|
||||||
|
echo "$temp"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Cleanup all tracked temp files and directories
|
# Cleanup all tracked temp files and directories
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ update_via_homebrew() {
|
|||||||
temp_upgrade=$(mktemp_file "brew_upgrade")
|
temp_upgrade=$(mktemp_file "brew_upgrade")
|
||||||
|
|
||||||
# Set up trap for interruption (Ctrl+C) with inline cleanup
|
# Set up trap for interruption (Ctrl+C) with inline cleanup
|
||||||
trap 'stop_inline_spinner 2>/dev/null; rm -f "$temp_update" "$temp_upgrade" 2>/dev/null; echo ""; exit 130' INT TERM
|
trap 'stop_inline_spinner 2>/dev/null; safe_remove "$temp_update" true; safe_remove "$temp_upgrade" true; echo ""; exit 130' INT TERM
|
||||||
|
|
||||||
# Update Homebrew
|
# Update Homebrew
|
||||||
if [[ -t 1 ]]; then
|
if [[ -t 1 ]]; then
|
||||||
@@ -73,7 +73,8 @@ update_via_homebrew() {
|
|||||||
trap - INT TERM
|
trap - INT TERM
|
||||||
|
|
||||||
# Cleanup temp files
|
# Cleanup temp files
|
||||||
rm -f "$temp_update" "$temp_upgrade"
|
safe_remove "$temp_update" true
|
||||||
|
safe_remove "$temp_upgrade" true
|
||||||
|
|
||||||
if echo "$upgrade_output" | grep -q "already installed"; then
|
if echo "$upgrade_output" | grep -q "already installed"; then
|
||||||
local installed_version
|
local installed_version
|
||||||
|
|||||||
@@ -126,7 +126,6 @@ tmutil() {
|
|||||||
start_section_spinner(){ :; }
|
start_section_spinner(){ :; }
|
||||||
stop_section_spinner(){ :; }
|
stop_section_spinner(){ :; }
|
||||||
tm_is_running(){ return 1; }
|
tm_is_running(){ return 1; }
|
||||||
tm_snapshots_mounted(){ return 1; }
|
|
||||||
|
|
||||||
DRY_RUN="false"
|
DRY_RUN="false"
|
||||||
clean_local_snapshots
|
clean_local_snapshots
|
||||||
@@ -157,7 +156,6 @@ start_section_spinner(){ :; }
|
|||||||
stop_section_spinner(){ :; }
|
stop_section_spinner(){ :; }
|
||||||
note_activity(){ :; }
|
note_activity(){ :; }
|
||||||
tm_is_running(){ return 1; }
|
tm_is_running(){ return 1; }
|
||||||
tm_snapshots_mounted(){ return 1; }
|
|
||||||
|
|
||||||
DRY_RUN="true"
|
DRY_RUN="true"
|
||||||
clean_local_snapshots
|
clean_local_snapshots
|
||||||
@@ -193,7 +191,6 @@ start_section_spinner(){ :; }
|
|||||||
stop_section_spinner(){ :; }
|
stop_section_spinner(){ :; }
|
||||||
note_activity(){ :; }
|
note_activity(){ :; }
|
||||||
tm_is_running(){ return 1; }
|
tm_is_running(){ return 1; }
|
||||||
tm_snapshots_mounted(){ return 1; }
|
|
||||||
|
|
||||||
unset -f read_key
|
unset -f read_key
|
||||||
|
|
||||||
|
|||||||
@@ -101,6 +101,27 @@ setup() {
|
|||||||
[[ "$result" == "2" ]]
|
[[ "$result" == "2" ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "filter_nested_artifacts: removes Xcode build subdirectories (Mac projects)" {
|
||||||
|
# Simulate Mac Xcode project with nested .build directories:
|
||||||
|
# ~/www/testapp/build
|
||||||
|
# ~/www/testapp/build/Framework.build
|
||||||
|
# ~/www/testapp/build/Package.build
|
||||||
|
mkdir -p "$HOME/www/testapp/build/Framework.build"
|
||||||
|
mkdir -p "$HOME/www/testapp/build/Package.build"
|
||||||
|
|
||||||
|
result=$(bash -c "
|
||||||
|
source '$PROJECT_ROOT/lib/clean/project.sh'
|
||||||
|
printf '%s\n' \
|
||||||
|
'$HOME/www/testapp/build' \
|
||||||
|
'$HOME/www/testapp/build/Framework.build' \
|
||||||
|
'$HOME/www/testapp/build/Package.build' | \
|
||||||
|
filter_nested_artifacts | wc -l | tr -d ' '
|
||||||
|
")
|
||||||
|
|
||||||
|
# Should only keep the top-level 'build' directory, filtering out nested .build dirs
|
||||||
|
[[ "$result" == "1" ]]
|
||||||
|
}
|
||||||
|
|
||||||
# Vendor protection unit tests
|
# Vendor protection unit tests
|
||||||
@test "is_rails_project_root: detects valid Rails project" {
|
@test "is_rails_project_root: detects valid Rails project" {
|
||||||
mkdir -p "$HOME/www/test-rails/config"
|
mkdir -p "$HOME/www/test-rails/config"
|
||||||
|
|||||||
Reference in New Issue
Block a user