mirror of
https://github.com/tw93/Mole.git
synced 2026-02-04 11:31:46 +00:00
Merge branch 'dev'
This commit is contained in:
104
bin/purge.sh
104
bin/purge.sh
@@ -47,21 +47,119 @@ start_purge() {
|
||||
printf '\033[2J\033[H'
|
||||
fi
|
||||
printf '\n'
|
||||
echo -e "${PURPLE_BOLD}Purge Project Artifacts${NC}"
|
||||
|
||||
# Initialize stats file in user cache directory
|
||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||
ensure_user_dir "$stats_dir"
|
||||
ensure_user_file "$stats_dir/purge_stats"
|
||||
ensure_user_file "$stats_dir/purge_count"
|
||||
ensure_user_file "$stats_dir/purge_scanning"
|
||||
echo "0" > "$stats_dir/purge_stats"
|
||||
echo "0" > "$stats_dir/purge_count"
|
||||
echo "" > "$stats_dir/purge_scanning"
|
||||
}
|
||||
|
||||
# Perform the purge
|
||||
perform_purge() {
|
||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||
local monitor_pid=""
|
||||
|
||||
# Cleanup function
|
||||
cleanup_monitor() {
|
||||
# Remove scanning file to stop monitor
|
||||
rm -f "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||
|
||||
if [[ -n "$monitor_pid" ]]; then
|
||||
kill "$monitor_pid" 2> /dev/null || true
|
||||
wait "$monitor_pid" 2> /dev/null || true
|
||||
fi
|
||||
if [[ -t 1 ]]; then
|
||||
printf '\r\033[K\n\033[K\033[A'
|
||||
fi
|
||||
}
|
||||
|
||||
# Set up trap for cleanup
|
||||
trap cleanup_monitor INT TERM
|
||||
|
||||
# Show scanning with spinner on same line as title
|
||||
if [[ -t 1 ]]; then
|
||||
# Print title first
|
||||
printf '%s' "${PURPLE_BOLD}Purge Project Artifacts${NC} "
|
||||
|
||||
# Start background monitor with ASCII spinner
|
||||
(
|
||||
local spinner_chars="|/-\\"
|
||||
local spinner_idx=0
|
||||
local last_path=""
|
||||
|
||||
# Set up trap to exit cleanly
|
||||
trap 'exit 0' INT TERM
|
||||
|
||||
# Function to truncate path in the middle
|
||||
truncate_path() {
|
||||
local path="$1"
|
||||
local max_len=80
|
||||
|
||||
if [[ ${#path} -le $max_len ]]; then
|
||||
echo "$path"
|
||||
return
|
||||
fi
|
||||
|
||||
# Calculate how much to show on each side
|
||||
local side_len=$(( (max_len - 3) / 2 ))
|
||||
local start="${path:0:$side_len}"
|
||||
local end="${path: -$side_len}"
|
||||
echo "${start}...${end}"
|
||||
}
|
||||
|
||||
while [[ -f "$stats_dir/purge_scanning" ]]; do
|
||||
local current_path=$(cat "$stats_dir/purge_scanning" 2> /dev/null || echo "")
|
||||
local display_path=""
|
||||
|
||||
if [[ -n "$current_path" ]]; then
|
||||
display_path="${current_path/#$HOME/~}"
|
||||
display_path=$(truncate_path "$display_path")
|
||||
last_path="$display_path"
|
||||
elif [[ -n "$last_path" ]]; then
|
||||
display_path="$last_path"
|
||||
fi
|
||||
|
||||
# Get current spinner character
|
||||
local spin_char="${spinner_chars:$spinner_idx:1}"
|
||||
spinner_idx=$(( (spinner_idx + 1) % ${#spinner_chars} ))
|
||||
|
||||
# Show title on first line, spinner and scanning info on second line
|
||||
if [[ -n "$display_path" ]]; then
|
||||
printf '\r%s\n%s %sScanning %s\033[K\033[A' \
|
||||
"${PURPLE_BOLD}Purge Project Artifacts${NC}" \
|
||||
"${BLUE}${spin_char}${NC}" \
|
||||
"${GRAY}" "$display_path"
|
||||
else
|
||||
printf '\r%s\n%s %sScanning...\033[K\033[A' \
|
||||
"${PURPLE_BOLD}Purge Project Artifacts${NC}" \
|
||||
"${BLUE}${spin_char}${NC}" \
|
||||
"${GRAY}"
|
||||
fi
|
||||
|
||||
sleep 0.05
|
||||
done
|
||||
exit 0
|
||||
) &
|
||||
monitor_pid=$!
|
||||
else
|
||||
echo -e "${PURPLE_BOLD}Purge Project Artifacts${NC}"
|
||||
fi
|
||||
|
||||
clean_project_artifacts
|
||||
local exit_code=$?
|
||||
|
||||
# Clean up
|
||||
trap - INT TERM
|
||||
cleanup_monitor
|
||||
|
||||
if [[ -t 1 ]]; then
|
||||
echo -e "${PURPLE_BOLD}Purge Project Artifacts${NC}"
|
||||
fi
|
||||
|
||||
# Exit codes:
|
||||
# 0 = success, show summary
|
||||
@@ -79,15 +177,11 @@ perform_purge() {
|
||||
local total_size_cleaned=0
|
||||
local total_items_cleaned=0
|
||||
|
||||
# Read stats from user cache directory
|
||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||
|
||||
if [[ -f "$stats_dir/purge_stats" ]]; then
|
||||
total_size_cleaned=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0")
|
||||
rm -f "$stats_dir/purge_stats"
|
||||
fi
|
||||
|
||||
# Read count
|
||||
if [[ -f "$stats_dir/purge_count" ]]; then
|
||||
total_items_cleaned=$(cat "$stats_dir/purge_count" 2> /dev/null || echo "0")
|
||||
rm -f "$stats_dir/purge_count"
|
||||
|
||||
@@ -13,8 +13,13 @@ LIB_DIR="$(cd "$SCRIPT_DIR/../lib" && pwd)"
|
||||
# shellcheck source=../lib/core/common.sh
|
||||
source "$LIB_DIR/core/common.sh"
|
||||
|
||||
readonly PAM_SUDO_FILE="${MOLE_PAM_SUDO_FILE:-/etc/pam.d/sudo}"
|
||||
readonly PAM_SUDO_LOCAL_FILE="${MOLE_PAM_SUDO_LOCAL_FILE:-/etc/pam.d/sudo_local}"
|
||||
# Set up global cleanup trap
|
||||
trap cleanup_temp_files EXIT INT TERM
|
||||
|
||||
PAM_SUDO_FILE="${MOLE_PAM_SUDO_FILE:-/etc/pam.d/sudo}"
|
||||
PAM_SUDO_LOCAL_FILE="${MOLE_PAM_SUDO_LOCAL_FILE:-$(dirname "$PAM_SUDO_FILE")/sudo_local}"
|
||||
readonly PAM_SUDO_FILE
|
||||
readonly PAM_SUDO_LOCAL_FILE
|
||||
readonly PAM_TID_LINE="auth sufficient pam_tid.so"
|
||||
|
||||
# Check if Touch ID is already configured
|
||||
@@ -66,9 +71,8 @@ show_status() {
|
||||
|
||||
# Enable Touch ID for sudo
|
||||
enable_touchid() {
|
||||
# Cleanup trap
|
||||
# Cleanup trap handled by global EXIT trap
|
||||
local temp_file=""
|
||||
trap '[[ -n "${temp_file:-}" ]] && rm -f "${temp_file:-}"' EXIT
|
||||
|
||||
# First check if system supports Touch ID
|
||||
if ! supports_touchid; then
|
||||
@@ -88,7 +92,7 @@ enable_touchid() {
|
||||
# It is in sudo_local, but let's check if it's ALSO in sudo (incomplete migration)
|
||||
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
|
||||
# Clean up legacy config
|
||||
temp_file=$(mktemp)
|
||||
temp_file=$(create_temp_file)
|
||||
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
||||
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
|
||||
echo -e "${GREEN}${ICON_SUCCESS} Cleanup legacy configuration${NC}"
|
||||
@@ -117,7 +121,7 @@ enable_touchid() {
|
||||
else
|
||||
# Append if not present
|
||||
if ! grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then
|
||||
temp_file=$(mktemp)
|
||||
temp_file=$(create_temp_file)
|
||||
cp "$PAM_SUDO_LOCAL_FILE" "$temp_file"
|
||||
echo "$PAM_TID_LINE" >> "$temp_file"
|
||||
sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE"
|
||||
@@ -132,7 +136,7 @@ enable_touchid() {
|
||||
if $write_success; then
|
||||
# If we migrated from legacy, clean it up now
|
||||
if $is_legacy_configured; then
|
||||
temp_file=$(mktemp)
|
||||
temp_file=$(create_temp_file)
|
||||
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
||||
sudo mv "$temp_file" "$PAM_SUDO_FILE"
|
||||
log_success "Touch ID migrated to sudo_local"
|
||||
@@ -163,7 +167,7 @@ enable_touchid() {
|
||||
fi
|
||||
|
||||
# Create temp file
|
||||
temp_file=$(mktemp)
|
||||
temp_file=$(create_temp_file)
|
||||
|
||||
# Insert pam_tid.so after the first comment block
|
||||
awk '
|
||||
@@ -194,9 +198,8 @@ enable_touchid() {
|
||||
|
||||
# Disable Touch ID for sudo
|
||||
disable_touchid() {
|
||||
# Cleanup trap
|
||||
# Cleanup trap handled by global EXIT trap
|
||||
local temp_file=""
|
||||
trap '[[ -n "${temp_file:-}" ]] && rm -f "${temp_file:-}"' EXIT
|
||||
|
||||
if ! is_touchid_configured; then
|
||||
echo -e "${YELLOW}Touch ID is not currently enabled${NC}"
|
||||
@@ -206,13 +209,13 @@ disable_touchid() {
|
||||
# Check sudo_local first
|
||||
if [[ -f "$PAM_SUDO_LOCAL_FILE" ]] && grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then
|
||||
# Remove from sudo_local
|
||||
temp_file=$(mktemp)
|
||||
temp_file=$(create_temp_file)
|
||||
grep -v "pam_tid.so" "$PAM_SUDO_LOCAL_FILE" > "$temp_file"
|
||||
|
||||
if sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE" 2> /dev/null; then
|
||||
# Since we modified sudo_local, we should also check if it's in sudo file (legacy cleanup)
|
||||
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
|
||||
temp_file=$(mktemp)
|
||||
temp_file=$(create_temp_file)
|
||||
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
||||
sudo mv "$temp_file" "$PAM_SUDO_FILE"
|
||||
fi
|
||||
@@ -236,7 +239,7 @@ disable_touchid() {
|
||||
fi
|
||||
|
||||
# Remove pam_tid.so line
|
||||
temp_file=$(mktemp)
|
||||
temp_file=$(create_temp_file)
|
||||
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
|
||||
|
||||
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
|
||||
|
||||
@@ -45,9 +45,10 @@ func TestScanPathConcurrentBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
var filesScanned, dirsScanned, bytesScanned int64
|
||||
current := ""
|
||||
current := &atomic.Value{}
|
||||
current.Store("")
|
||||
|
||||
result, err := scanPathConcurrent(root, &filesScanned, &dirsScanned, &bytesScanned, ¤t)
|
||||
result, err := scanPathConcurrent(root, &filesScanned, &dirsScanned, &bytesScanned, current)
|
||||
if err != nil {
|
||||
t.Fatalf("scanPathConcurrent returned error: %v", err)
|
||||
}
|
||||
@@ -204,7 +205,7 @@ func TestMeasureOverviewSize(t *testing.T) {
|
||||
if err := os.MkdirAll(target, 0o755); err != nil {
|
||||
t.Fatalf("create target: %v", err)
|
||||
}
|
||||
content := []byte(strings.Repeat("x", 2048))
|
||||
content := []byte(strings.Repeat("x", 4096))
|
||||
if err := os.WriteFile(filepath.Join(target, "data.bin"), content, 0o644); err != nil {
|
||||
t.Fatalf("write file: %v", err)
|
||||
}
|
||||
@@ -225,6 +226,20 @@ func TestMeasureOverviewSize(t *testing.T) {
|
||||
if cached != size {
|
||||
t.Fatalf("snapshot mismatch: want %d, got %d", size, cached)
|
||||
}
|
||||
|
||||
// Ensure measureOverviewSize does not use cache
|
||||
// APFS block size is 4KB, 4097 bytes should use more blocks
|
||||
content = []byte(strings.Repeat("x", 4097))
|
||||
if err := os.WriteFile(filepath.Join(target, "data2.bin"), content, 0o644); err != nil {
|
||||
t.Fatalf("write file: %v", err)
|
||||
}
|
||||
size2, err := measureOverviewSize(target)
|
||||
if err != nil {
|
||||
t.Fatalf("measureOverviewSize: %v", err)
|
||||
}
|
||||
if size2 == size {
|
||||
t.Fatalf("measureOverwiewSize used cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsCleanableDir(t *testing.T) {
|
||||
@@ -347,10 +362,11 @@ func TestScanPathPermissionError(t *testing.T) {
|
||||
}()
|
||||
|
||||
var files, dirs, bytes int64
|
||||
current := ""
|
||||
current := &atomic.Value{}
|
||||
current.Store("")
|
||||
|
||||
// Scanning the locked dir itself should fail.
|
||||
_, err := scanPathConcurrent(lockedDir, &files, &dirs, &bytes, ¤t)
|
||||
_, err := scanPathConcurrent(lockedDir, &files, &dirs, &bytes, current)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error scanning locked directory, got nil")
|
||||
}
|
||||
|
||||
@@ -97,7 +97,7 @@ type model struct {
|
||||
filesScanned *int64
|
||||
dirsScanned *int64
|
||||
bytesScanned *int64
|
||||
currentPath *string
|
||||
currentPath *atomic.Value
|
||||
showLargeFiles bool
|
||||
isOverview bool
|
||||
deleteConfirm bool
|
||||
@@ -162,7 +162,8 @@ func main() {
|
||||
|
||||
func newModel(path string, isOverview bool) model {
|
||||
var filesScanned, dirsScanned, bytesScanned int64
|
||||
currentPath := ""
|
||||
currentPath := &atomic.Value{}
|
||||
currentPath.Store("")
|
||||
var overviewFilesScanned, overviewDirsScanned, overviewBytesScanned int64
|
||||
overviewCurrentPath := ""
|
||||
|
||||
@@ -174,7 +175,7 @@ func newModel(path string, isOverview bool) model {
|
||||
filesScanned: &filesScanned,
|
||||
dirsScanned: &dirsScanned,
|
||||
bytesScanned: &bytesScanned,
|
||||
currentPath: ¤tPath,
|
||||
currentPath: currentPath,
|
||||
showLargeFiles: false,
|
||||
isOverview: isOverview,
|
||||
cache: make(map[string]historyEntry),
|
||||
@@ -394,7 +395,7 @@ func (m model) scanCmd(path string) tea.Cmd {
|
||||
}
|
||||
|
||||
func tickCmd() tea.Cmd {
|
||||
return tea.Tick(time.Millisecond*80, func(t time.Time) tea.Msg {
|
||||
return tea.Tick(time.Millisecond*100, func(t time.Time) tea.Msg {
|
||||
return tickMsg(t)
|
||||
})
|
||||
}
|
||||
@@ -434,7 +435,7 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
atomic.StoreInt64(m.dirsScanned, 0)
|
||||
atomic.StoreInt64(m.bytesScanned, 0)
|
||||
if m.currentPath != nil {
|
||||
*m.currentPath = ""
|
||||
m.currentPath.Store("")
|
||||
}
|
||||
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
|
||||
}
|
||||
@@ -683,6 +684,11 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
m.largeMultiSelected = make(map[string]bool)
|
||||
|
||||
if m.inOverviewMode() {
|
||||
// Explicitly invalidate cache for all overview entries to force re-scan
|
||||
for _, entry := range m.entries {
|
||||
invalidateCache(entry.Path)
|
||||
}
|
||||
|
||||
m.overviewSizeCache = make(map[string]int64)
|
||||
m.overviewScanningSet = make(map[string]bool)
|
||||
m.hydrateOverviewEntries() // Reset sizes to pending
|
||||
@@ -707,7 +713,7 @@ func (m model) updateKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) {
|
||||
atomic.StoreInt64(m.dirsScanned, 0)
|
||||
atomic.StoreInt64(m.bytesScanned, 0)
|
||||
if m.currentPath != nil {
|
||||
*m.currentPath = ""
|
||||
m.currentPath.Store("")
|
||||
}
|
||||
return m, tea.Batch(m.scanCmd(m.path), tickCmd())
|
||||
case "t", "T":
|
||||
@@ -979,7 +985,7 @@ func (m model) enterSelectedDir() (tea.Model, tea.Cmd) {
|
||||
atomic.StoreInt64(m.dirsScanned, 0)
|
||||
atomic.StoreInt64(m.bytesScanned, 0)
|
||||
if m.currentPath != nil {
|
||||
*m.currentPath = ""
|
||||
m.currentPath.Store("")
|
||||
}
|
||||
|
||||
if cached, ok := m.cache[m.path]; ok && !cached.Dirty {
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
var scanGroup singleflight.Group
|
||||
|
||||
func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) (scanResult, error) {
|
||||
func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *atomic.Value) (scanResult, error) {
|
||||
children, err := os.ReadDir(root)
|
||||
if err != nil {
|
||||
return scanResult{}, err
|
||||
@@ -50,10 +50,20 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
numWorkers = 1
|
||||
}
|
||||
sem := make(chan struct{}, numWorkers)
|
||||
duSem := make(chan struct{}, min(4, runtime.NumCPU())) // limits concurrent du processes
|
||||
duQueueSem := make(chan struct{}, min(4, runtime.NumCPU())*2) // limits how many goroutines may be waiting to run du
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Collect results via channels.
|
||||
entryChan := make(chan dirEntry, len(children))
|
||||
// Cap buffer size to prevent memory spikes with huge directories.
|
||||
entryBufSize := len(children)
|
||||
if entryBufSize > 4096 {
|
||||
entryBufSize = 4096
|
||||
}
|
||||
if entryBufSize < 1 {
|
||||
entryBufSize = 1
|
||||
}
|
||||
entryChan := make(chan dirEntry, entryBufSize)
|
||||
largeFileChan := make(chan fileEntry, maxLargeFiles*2)
|
||||
|
||||
var collectorWg sync.WaitGroup
|
||||
@@ -126,10 +136,10 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
|
||||
// ~/Library is scanned separately; reuse cache when possible.
|
||||
if isHomeDir && child.Name() == "Library" {
|
||||
sem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(name, path string) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
var size int64
|
||||
@@ -138,7 +148,7 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
} else if cached, err := loadCacheFromDisk(path); err == nil {
|
||||
size = cached.TotalSize
|
||||
} else {
|
||||
size = calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
size = calculateDirSizeConcurrent(path, largeFileChan, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
}
|
||||
atomic.AddInt64(&total, size)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
@@ -156,13 +166,17 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
|
||||
// Folded dirs: fast size without expanding.
|
||||
if shouldFoldDirWithPath(child.Name(), fullPath) {
|
||||
duQueueSem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(name, path string) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
defer func() { <-duQueueSem }()
|
||||
|
||||
size, err := getDirectorySizeFromDu(path)
|
||||
size, err := func() (int64, error) {
|
||||
duSem <- struct{}{}
|
||||
defer func() { <-duSem }()
|
||||
return getDirectorySizeFromDu(path)
|
||||
}()
|
||||
if err != nil || size <= 0 {
|
||||
size = calculateDirSizeFast(path, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
}
|
||||
@@ -180,13 +194,13 @@ func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *in
|
||||
continue
|
||||
}
|
||||
|
||||
sem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(name, path string) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
size := calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
size := calculateDirSizeConcurrent(path, largeFileChan, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
atomic.AddInt64(&total, size)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
|
||||
@@ -280,7 +294,7 @@ func shouldSkipFileForLargeTracking(path string) bool {
|
||||
}
|
||||
|
||||
// calculateDirSizeFast performs concurrent dir sizing using os.ReadDir.
|
||||
func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
|
||||
func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *atomic.Value) int64 {
|
||||
var total int64
|
||||
var wg sync.WaitGroup
|
||||
|
||||
@@ -299,7 +313,7 @@ func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *
|
||||
}
|
||||
|
||||
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
|
||||
*currentPath = dirPath
|
||||
currentPath.Store(dirPath)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dirPath)
|
||||
@@ -311,11 +325,11 @@ func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
wg.Add(1)
|
||||
subDir := filepath.Join(dirPath, entry.Name())
|
||||
sem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(p string) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
walk(p)
|
||||
}(subDir)
|
||||
@@ -416,7 +430,7 @@ func isInFoldedDir(path string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
|
||||
func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, duSem, duQueueSem chan struct{}, filesScanned, dirsScanned, bytesScanned *int64, currentPath *atomic.Value) int64 {
|
||||
children, err := os.ReadDir(root)
|
||||
if err != nil {
|
||||
return 0
|
||||
@@ -446,26 +460,35 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
||||
|
||||
if child.IsDir() {
|
||||
if shouldFoldDirWithPath(child.Name(), fullPath) {
|
||||
duQueueSem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(path string) {
|
||||
defer wg.Done()
|
||||
size, err := getDirectorySizeFromDu(path)
|
||||
if err == nil && size > 0 {
|
||||
atomic.AddInt64(&total, size)
|
||||
defer func() { <-duQueueSem }()
|
||||
|
||||
size, err := func() (int64, error) {
|
||||
duSem <- struct{}{}
|
||||
defer func() { <-duSem }()
|
||||
return getDirectorySizeFromDu(path)
|
||||
}()
|
||||
if err != nil || size <= 0 {
|
||||
size = calculateDirSizeFast(path, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
} else {
|
||||
atomic.AddInt64(bytesScanned, size)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
}
|
||||
atomic.AddInt64(&total, size)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
}(fullPath)
|
||||
continue
|
||||
}
|
||||
|
||||
sem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(path string) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
size := calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
size := calculateDirSizeConcurrent(path, largeFileChan, duSem, duQueueSem, filesScanned, dirsScanned, bytesScanned, currentPath)
|
||||
atomic.AddInt64(&total, size)
|
||||
atomic.AddInt64(dirsScanned, 1)
|
||||
}(fullPath)
|
||||
@@ -488,7 +511,7 @@ func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, fil
|
||||
|
||||
// Update current path occasionally to prevent UI jitter.
|
||||
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
|
||||
*currentPath = fullPath
|
||||
currentPath.Store(fullPath)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -519,10 +542,6 @@ func measureOverviewSize(path string) (int64, error) {
|
||||
excludePath = filepath.Join(home, "Library")
|
||||
}
|
||||
|
||||
if cached, err := loadStoredOverviewSize(path); err == nil && cached > 0 {
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
if duSize, err := getDirectorySizeFromDuWithExclude(path, excludePath); err == nil && duSize > 0 {
|
||||
_ = storeOverviewSize(path, duSize)
|
||||
return duSize, nil
|
||||
|
||||
@@ -32,7 +32,7 @@ func (m model) View() string {
|
||||
return b.String()
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
|
||||
fmt.Fprintf(&b, "%s%s%s%s Scanning...\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset)
|
||||
fmt.Fprintf(&b, "%s%s%s%s %s\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset, m.status)
|
||||
}
|
||||
} else {
|
||||
hasPending := false
|
||||
@@ -44,7 +44,7 @@ func (m model) View() string {
|
||||
}
|
||||
if hasPending {
|
||||
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
|
||||
fmt.Fprintf(&b, "%s%s%s%s Scanning...\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset)
|
||||
fmt.Fprintf(&b, "%s%s%s%s %s\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset, m.status)
|
||||
} else {
|
||||
fmt.Fprintf(&b, "%sSelect a location to explore:%s\n\n", colorGray, colorReset)
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func (m model) View() string {
|
||||
colorGreen, humanizeBytes(bytesScanned), colorReset)
|
||||
|
||||
if m.currentPath != nil {
|
||||
currentPath := *m.currentPath
|
||||
currentPath := m.currentPath.Load().(string)
|
||||
if currentPath != "" {
|
||||
shortPath := displayPath(currentPath)
|
||||
shortPath = truncateMiddle(shortPath, 50)
|
||||
|
||||
46
install.sh
46
install.sh
@@ -52,6 +52,39 @@ log_error() { echo -e "${YELLOW}${ICON_ERROR}${NC} $1"; }
|
||||
log_admin() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_ADMIN}${NC} $1"; }
|
||||
log_confirm() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_CONFIRM}${NC} $1"; }
|
||||
|
||||
safe_rm() {
|
||||
local target="${1:-}"
|
||||
local tmp_root
|
||||
|
||||
if [[ -z "$target" ]]; then
|
||||
log_error "safe_rm: empty path"
|
||||
return 1
|
||||
fi
|
||||
if [[ ! -e "$target" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
tmp_root="${TMPDIR:-/tmp}"
|
||||
case "$target" in
|
||||
"$tmp_root" | /tmp)
|
||||
log_error "safe_rm: refusing to remove temp root: $target"
|
||||
return 1
|
||||
;;
|
||||
"$tmp_root"/* | /tmp/*) ;;
|
||||
*)
|
||||
log_error "safe_rm: refusing to remove non-temp path: $target"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ -d "$target" ]]; then
|
||||
find "$target" -depth \( -type f -o -type l \) -exec rm -f {} + 2> /dev/null || true
|
||||
find "$target" -depth -type d -exec rmdir {} + 2> /dev/null || true
|
||||
else
|
||||
rm -f "$target" 2> /dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Install defaults
|
||||
INSTALL_DIR="/usr/local/bin"
|
||||
CONFIG_DIR="$HOME/.config/mole"
|
||||
@@ -100,7 +133,16 @@ resolve_source_dir() {
|
||||
|
||||
local tmp
|
||||
tmp="$(mktemp -d)"
|
||||
trap 'stop_line_spinner 2>/dev/null; rm -rf "$tmp"' EXIT
|
||||
|
||||
# Safe cleanup function for temporary directory
|
||||
cleanup_tmp() {
|
||||
stop_line_spinner 2> /dev/null || true
|
||||
if [[ -z "${tmp:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
safe_rm "$tmp"
|
||||
}
|
||||
trap cleanup_tmp EXIT
|
||||
|
||||
local branch="${MOLE_VERSION:-}"
|
||||
if [[ -z "$branch" ]]; then
|
||||
@@ -125,7 +167,7 @@ resolve_source_dir() {
|
||||
|
||||
start_line_spinner "Fetching Mole source (${branch})..."
|
||||
if command -v curl > /dev/null 2>&1; then
|
||||
if curl -fsSL -o "$tmp/mole.tar.gz" "$url" 2> /dev/null; then
|
||||
if curl -fsSL --connect-timeout 10 --max-time 60 -o "$tmp/mole.tar.gz" "$url" 2> /dev/null; then
|
||||
if tar -xzf "$tmp/mole.tar.gz" -C "$tmp" 2> /dev/null; then
|
||||
stop_line_spinner
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ readonly PURGE_TARGETS=(
|
||||
readonly MIN_AGE_DAYS=7
|
||||
# Scan depth defaults (relative to search root).
|
||||
readonly PURGE_MIN_DEPTH_DEFAULT=2
|
||||
readonly PURGE_MAX_DEPTH_DEFAULT=8
|
||||
readonly PURGE_MAX_DEPTH_DEFAULT=4
|
||||
# Search paths (default, can be overridden via config file).
|
||||
readonly DEFAULT_PURGE_SEARCH_PATHS=(
|
||||
"$HOME/www"
|
||||
@@ -339,6 +339,11 @@ scan_purge_targets() {
|
||||
if [[ ! -d "$search_path" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Update current scanning path
|
||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||
echo "$search_path" > "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||
|
||||
if command -v fd > /dev/null 2>&1; then
|
||||
# Escape regex special characters in target names for fd patterns
|
||||
local escaped_targets=()
|
||||
@@ -356,28 +361,39 @@ scan_purge_targets() {
|
||||
"--type" "d"
|
||||
"--min-depth" "$min_depth"
|
||||
"--max-depth" "$max_depth"
|
||||
"--threads" "4"
|
||||
"--threads" "8"
|
||||
"--exclude" ".git"
|
||||
"--exclude" "Library"
|
||||
"--exclude" ".Trash"
|
||||
"--exclude" "Applications"
|
||||
)
|
||||
fd "${fd_args[@]}" "$pattern" "$search_path" 2> /dev/null | while IFS= read -r item; do
|
||||
if is_safe_project_artifact "$item" "$search_path"; then
|
||||
echo "$item"
|
||||
fi
|
||||
done | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
||||
# Write to temp file first, then filter - more efficient than piping
|
||||
fd "${fd_args[@]}" "$pattern" "$search_path" 2> /dev/null > "$output_file.raw" || true
|
||||
|
||||
# Single pass: safe + nested + protected
|
||||
if [[ -f "$output_file.raw" ]]; then
|
||||
while IFS= read -r item; do
|
||||
# Check if we should abort (scanning file removed by Ctrl+C)
|
||||
if [[ ! -f "$stats_dir/purge_scanning" ]]; then
|
||||
rm -f "$output_file.raw"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -n "$item" ]] && is_safe_project_artifact "$item" "$search_path"; then
|
||||
echo "$item"
|
||||
# Update scanning path to show current project directory
|
||||
local project_dir=$(dirname "$item")
|
||||
echo "$project_dir" > "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||
fi
|
||||
done < "$output_file.raw" | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
||||
rm -f "$output_file.raw"
|
||||
else
|
||||
touch "$output_file"
|
||||
fi
|
||||
else
|
||||
# Pruned find avoids descending into heavy directories.
|
||||
local prune_args=()
|
||||
local prune_dirs=(".git" "Library" ".Trash" "Applications")
|
||||
for dir in "${prune_dirs[@]}"; do
|
||||
prune_args+=("-name" "$dir" "-prune" "-o")
|
||||
done
|
||||
for target in "${PURGE_TARGETS[@]}"; do
|
||||
prune_args+=("-name" "$target" "-print" "-prune" "-o")
|
||||
done
|
||||
local find_expr=()
|
||||
local prune_dirs=(".git" "Library" ".Trash" "Applications")
|
||||
for dir in "${prune_dirs[@]}"; do
|
||||
find_expr+=("-name" "$dir" "-prune" "-o")
|
||||
done
|
||||
@@ -390,28 +406,49 @@ scan_purge_targets() {
|
||||
((i++))
|
||||
done
|
||||
command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \
|
||||
\( "${find_expr[@]}" \) 2> /dev/null | while IFS= read -r item; do
|
||||
if is_safe_project_artifact "$item" "$search_path"; then
|
||||
echo "$item"
|
||||
fi
|
||||
done | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
||||
\( "${find_expr[@]}" \) 2> /dev/null > "$output_file.raw" || true
|
||||
|
||||
# Single pass: safe + nested + protected
|
||||
if [[ -f "$output_file.raw" ]]; then
|
||||
while IFS= read -r item; do
|
||||
# Check if we should abort (scanning file removed by Ctrl+C)
|
||||
if [[ ! -f "$stats_dir/purge_scanning" ]]; then
|
||||
rm -f "$output_file.raw"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -n "$item" ]] && is_safe_project_artifact "$item" "$search_path"; then
|
||||
echo "$item"
|
||||
# Update scanning path to show current project directory
|
||||
local project_dir=$(dirname "$item")
|
||||
echo "$project_dir" > "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||
fi
|
||||
done < "$output_file.raw" | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
|
||||
rm -f "$output_file.raw"
|
||||
else
|
||||
touch "$output_file"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
# Filter out nested artifacts (e.g. node_modules inside node_modules).
|
||||
# Filter out nested artifacts (e.g. node_modules inside node_modules, .build inside build).
|
||||
# Optimized: Sort paths to put parents before children, then filter in single pass.
|
||||
filter_nested_artifacts() {
|
||||
while IFS= read -r item; do
|
||||
local parent_dir=$(dirname "$item")
|
||||
local is_nested=false
|
||||
for target in "${PURGE_TARGETS[@]}"; do
|
||||
if [[ "$parent_dir" == *"/$target/"* || "$parent_dir" == *"/$target" ]]; then
|
||||
is_nested=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_nested" == "false" ]]; then
|
||||
echo "$item"
|
||||
fi
|
||||
done
|
||||
# 1. Append trailing slash to each path (to ensure /foo/bar starts with /foo/)
|
||||
# 2. Sort to group parents and children (LC_COLLATE=C ensures standard sorting)
|
||||
# 3. Use awk to filter out paths that start with the previous kept path
|
||||
# 4. Remove trailing slash
|
||||
sed 's|[^/]$|&/|' | LC_COLLATE=C sort | awk '
|
||||
BEGIN { last_kept = "" }
|
||||
{
|
||||
current = $0
|
||||
# If current path starts with last_kept, it is nested
|
||||
# Only check if last_kept is not empty
|
||||
if (last_kept == "" || index(current, last_kept) != 1) {
|
||||
print current
|
||||
last_kept = current
|
||||
}
|
||||
}
|
||||
' | sed 's|/$||'
|
||||
}
|
||||
|
||||
filter_protected_artifacts() {
|
||||
@@ -703,17 +740,14 @@ clean_project_artifacts() {
|
||||
for temp in "${scan_temps[@]+"${scan_temps[@]}"}"; do
|
||||
rm -f "$temp" 2> /dev/null || true
|
||||
done
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
# Clean up purge scanning file
|
||||
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
|
||||
rm -f "$stats_dir/purge_scanning" 2> /dev/null || true
|
||||
echo ""
|
||||
exit 130
|
||||
}
|
||||
trap cleanup_scan INT TERM
|
||||
# Start parallel scanning of all paths at once
|
||||
if [[ -t 1 ]]; then
|
||||
start_inline_spinner "Scanning projects..."
|
||||
fi
|
||||
# Scanning is started from purge.sh with start_inline_spinner
|
||||
# Launch all scans in parallel
|
||||
for path in "${PURGE_SEARCH_PATHS[@]}"; do
|
||||
if [[ -d "$path" ]]; then
|
||||
@@ -730,9 +764,6 @@ clean_project_artifacts() {
|
||||
for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do
|
||||
wait "$pid" 2> /dev/null || true
|
||||
done
|
||||
if [[ -t 1 ]]; then
|
||||
stop_inline_spinner
|
||||
fi
|
||||
# Collect all results
|
||||
for scan_output in "${scan_temps[@]+"${scan_temps[@]}"}"; do
|
||||
if [[ -f "$scan_output" ]]; then
|
||||
|
||||
@@ -267,40 +267,21 @@ tm_is_running() {
|
||||
grep -qE '(^|[[:space:]])("Running"|Running)[[:space:]]*=[[:space:]]*1([[:space:]]*;|$)' <<< "$st"
|
||||
}
|
||||
|
||||
# Returns 0 if snapshot mounts exist under local snapshot paths
|
||||
# Returns 1 if none found
|
||||
# Returns 2 if mount state cannot be determined
|
||||
tm_snapshots_mounted() {
|
||||
local m
|
||||
if ! m="$(run_with_timeout 3 mount 2> /dev/null)"; then
|
||||
return 2
|
||||
fi
|
||||
# Match modern and legacy local-snapshot browse mounts:
|
||||
# - /Volumes/com.apple.TimeMachine.localsnapshots/... (APFS)
|
||||
# - /.TimeMachine (APFS)
|
||||
# - /Volumes/MobileBackups (HFS+, legacy)
|
||||
grep -qE '[[:space:]]on[[:space:]](/\.TimeMachine(/|[[:space:]])|/Volumes/com\.apple\.TimeMachine\.localsnapshots(/|[[:space:]])|/Volumes/MobileBackups(/|[[:space:]]))' <<< "$m"
|
||||
}
|
||||
|
||||
# Local APFS snapshots (keep the most recent).
|
||||
clean_local_snapshots() {
|
||||
if ! command -v tmutil > /dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local rc_running rc_mounted
|
||||
rc_running=0
|
||||
local rc_running=0
|
||||
tm_is_running || rc_running=$?
|
||||
|
||||
rc_mounted=0
|
||||
tm_snapshots_mounted || rc_mounted=$?
|
||||
|
||||
if [[ $rc_running -eq 2 || $rc_mounted -eq 2 ]]; then
|
||||
if [[ $rc_running -eq 2 ]]; then
|
||||
echo -e " ${YELLOW}!${NC} Could not determine Time Machine status; skipping snapshot cleanup"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ $rc_running -eq 0 || $rc_mounted -eq 0 ]]; then
|
||||
if [[ $rc_running -eq 0 ]]; then
|
||||
echo -e " ${YELLOW}!${NC} Time Machine is active; skipping snapshot cleanup"
|
||||
return 0
|
||||
fi
|
||||
|
||||
@@ -512,7 +512,7 @@ declare -a MOLE_TEMP_DIRS=()
|
||||
create_temp_file() {
|
||||
local temp
|
||||
temp=$(mktemp) || return 1
|
||||
MOLE_TEMP_FILES+=("$temp")
|
||||
register_temp_file "$temp"
|
||||
echo "$temp"
|
||||
}
|
||||
|
||||
@@ -520,7 +520,7 @@ create_temp_file() {
|
||||
create_temp_dir() {
|
||||
local temp
|
||||
temp=$(mktemp -d) || return 1
|
||||
MOLE_TEMP_DIRS+=("$temp")
|
||||
register_temp_dir "$temp"
|
||||
echo "$temp"
|
||||
}
|
||||
|
||||
@@ -538,9 +538,17 @@ register_temp_dir() {
|
||||
# Compatible with both BSD mktemp (macOS default) and GNU mktemp (coreutils)
|
||||
mktemp_file() {
|
||||
local prefix="${1:-mole}"
|
||||
local temp
|
||||
local error_msg
|
||||
# Use TMPDIR if set, otherwise /tmp
|
||||
# Add .XXXXXX suffix to work with both BSD and GNU mktemp
|
||||
mktemp "${TMPDIR:-/tmp}/${prefix}.XXXXXX"
|
||||
if ! error_msg=$(mktemp "${TMPDIR:-/tmp}/${prefix}.XXXXXX" 2>&1); then
|
||||
echo "Error: Failed to create temporary file: $error_msg" >&2
|
||||
return 1
|
||||
fi
|
||||
temp="$error_msg"
|
||||
register_temp_file "$temp"
|
||||
echo "$temp"
|
||||
}
|
||||
|
||||
# Cleanup all tracked temp files and directories
|
||||
|
||||
@@ -34,7 +34,7 @@ update_via_homebrew() {
|
||||
temp_upgrade=$(mktemp_file "brew_upgrade")
|
||||
|
||||
# Set up trap for interruption (Ctrl+C) with inline cleanup
|
||||
trap 'stop_inline_spinner 2>/dev/null; rm -f "$temp_update" "$temp_upgrade" 2>/dev/null; echo ""; exit 130' INT TERM
|
||||
trap 'stop_inline_spinner 2>/dev/null; safe_remove "$temp_update" true; safe_remove "$temp_upgrade" true; echo ""; exit 130' INT TERM
|
||||
|
||||
# Update Homebrew
|
||||
if [[ -t 1 ]]; then
|
||||
@@ -73,7 +73,8 @@ update_via_homebrew() {
|
||||
trap - INT TERM
|
||||
|
||||
# Cleanup temp files
|
||||
rm -f "$temp_update" "$temp_upgrade"
|
||||
safe_remove "$temp_update" true
|
||||
safe_remove "$temp_upgrade" true
|
||||
|
||||
if echo "$upgrade_output" | grep -q "already installed"; then
|
||||
local installed_version
|
||||
|
||||
@@ -126,7 +126,6 @@ tmutil() {
|
||||
start_section_spinner(){ :; }
|
||||
stop_section_spinner(){ :; }
|
||||
tm_is_running(){ return 1; }
|
||||
tm_snapshots_mounted(){ return 1; }
|
||||
|
||||
DRY_RUN="false"
|
||||
clean_local_snapshots
|
||||
@@ -157,7 +156,6 @@ start_section_spinner(){ :; }
|
||||
stop_section_spinner(){ :; }
|
||||
note_activity(){ :; }
|
||||
tm_is_running(){ return 1; }
|
||||
tm_snapshots_mounted(){ return 1; }
|
||||
|
||||
DRY_RUN="true"
|
||||
clean_local_snapshots
|
||||
@@ -193,7 +191,6 @@ start_section_spinner(){ :; }
|
||||
stop_section_spinner(){ :; }
|
||||
note_activity(){ :; }
|
||||
tm_is_running(){ return 1; }
|
||||
tm_snapshots_mounted(){ return 1; }
|
||||
|
||||
unset -f read_key
|
||||
|
||||
|
||||
@@ -101,6 +101,27 @@ setup() {
|
||||
[[ "$result" == "2" ]]
|
||||
}
|
||||
|
||||
@test "filter_nested_artifacts: removes Xcode build subdirectories (Mac projects)" {
|
||||
# Simulate Mac Xcode project with nested .build directories:
|
||||
# ~/www/testapp/build
|
||||
# ~/www/testapp/build/Framework.build
|
||||
# ~/www/testapp/build/Package.build
|
||||
mkdir -p "$HOME/www/testapp/build/Framework.build"
|
||||
mkdir -p "$HOME/www/testapp/build/Package.build"
|
||||
|
||||
result=$(bash -c "
|
||||
source '$PROJECT_ROOT/lib/clean/project.sh'
|
||||
printf '%s\n' \
|
||||
'$HOME/www/testapp/build' \
|
||||
'$HOME/www/testapp/build/Framework.build' \
|
||||
'$HOME/www/testapp/build/Package.build' | \
|
||||
filter_nested_artifacts | wc -l | tr -d ' '
|
||||
")
|
||||
|
||||
# Should only keep the top-level 'build' directory, filtering out nested .build dirs
|
||||
[[ "$result" == "1" ]]
|
||||
}
|
||||
|
||||
# Vendor protection unit tests
|
||||
@test "is_rails_project_root: detects valid Rails project" {
|
||||
mkdir -p "$HOME/www/test-rails/config"
|
||||
|
||||
Reference in New Issue
Block a user