1
0
mirror of https://github.com/tw93/Mole.git synced 2026-02-04 12:41:46 +00:00

chore: restructure windows branch (move windows/ content to root, remove macos files)

This commit is contained in:
Tw93
2026-01-10 13:23:29 +08:00
parent e84a457c2f
commit edf5ed09a9
140 changed files with 1472 additions and 34059 deletions

74
.gitignore vendored
View File

@@ -1,70 +1,16 @@
# macOS
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
# Windows Mole - .gitignore
# Editor files
*~
*.swp
*.swo
.idea/
.vscode/*.code-workspace
.vscode/settings.json
# Build artifacts
bin/*.exe
# Logs
*.log
logs/
# Temporary files
tmp/
temp/
*.tmp
*.temp
*.dmg
tests/tmp-*
# Cache
.cache/
*.cache
# Go build cache
.gocache/
.gomod/
# Backup files
*.bak
*.backup
# System files
*.pid
*.lock
# AI Assistant Instructions
.claude/
.gemini/
.kiro/
CLAUDE.md
GEMINI.md
.cursorrules
# Go build artifacts (development)
cmd/analyze/analyze
cmd/status/status
/status
/analyze
mole-analyze
# Go binaries
bin/analyze-go
bin/status-go
bin/analyze-darwin-*
bin/status-darwin-*
# IDE files
.idea/
.vscode/
*.code-workspace
# Test artifacts
tests/tmp-*/
tests/*.tmp
tests/*.log
session.json
run_tests.ps1
*.test
coverage.out

View File

@@ -1,40 +1,44 @@
# Makefile for Mole
# Mole Windows - Makefile
# Build Go tools for Windows
.PHONY: all build clean release
# Output directory
BIN_DIR := bin
# Binaries
ANALYZE := analyze
STATUS := status
# Source directories
ANALYZE_SRC := ./cmd/analyze
STATUS_SRC := ./cmd/status
# Build flags
LDFLAGS := -s -w
.PHONY: all build clean analyze status
# Default target
all: build
# Local build (current architecture)
build:
@echo "Building for local architecture..."
go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(ANALYZE)-go $(ANALYZE_SRC)
go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(STATUS)-go $(STATUS_SRC)
# Build both tools
build: analyze status
# Release build targets (run on native architectures for CGO support)
release-amd64:
@echo "Building release binaries (amd64)..."
GOOS=darwin GOARCH=amd64 go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(ANALYZE)-darwin-amd64 $(ANALYZE_SRC)
GOOS=darwin GOARCH=amd64 go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(STATUS)-darwin-amd64 $(STATUS_SRC)
# Build analyze tool
analyze:
@echo "Building analyze..."
@go build -o bin/analyze.exe ./cmd/analyze/
release-arm64:
@echo "Building release binaries (arm64)..."
GOOS=darwin GOARCH=arm64 go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(ANALYZE)-darwin-arm64 $(ANALYZE_SRC)
GOOS=darwin GOARCH=arm64 go build -ldflags="$(LDFLAGS)" -o $(BIN_DIR)/$(STATUS)-darwin-arm64 $(STATUS_SRC)
# Build status tool
status:
@echo "Building status..."
@go build -o bin/status.exe ./cmd/status/
# Clean build artifacts
clean:
@echo "Cleaning binaries..."
rm -f $(BIN_DIR)/$(ANALYZE)-* $(BIN_DIR)/$(STATUS)-* $(BIN_DIR)/$(ANALYZE)-go $(BIN_DIR)/$(STATUS)-go
@echo "Cleaning..."
@rm -f bin/analyze.exe bin/status.exe
# Install (copy to PATH)
install: build
@echo "Installing to $(USERPROFILE)/bin..."
@mkdir -p "$(USERPROFILE)/bin"
@cp bin/analyze.exe "$(USERPROFILE)/bin/"
@cp bin/status.exe "$(USERPROFILE)/bin/"
# Run tests
test:
@go test -v ./...
# Format code
fmt:
@go fmt ./...
# Vet code
vet:
@go vet ./...

380
README.md
View File

@@ -1,301 +1,169 @@
<div align="center">
<h1>Mole</h1>
<p><em>Deep clean and optimize your Mac.</em></p>
</div>
# Mole for Windows
<p align="center">
<a href="https://github.com/tw93/mole/stargazers"><img src="https://img.shields.io/github/stars/tw93/mole?style=flat-square" alt="Stars"></a>
<a href="https://github.com/tw93/mole/releases"><img src="https://img.shields.io/github/v/tag/tw93/mole?label=version&style=flat-square" alt="Version"></a>
<a href="LICENSE"><img src="https://img.shields.io/badge/license-MIT-blue.svg?style=flat-square" alt="License"></a>
<a href="https://github.com/tw93/mole/commits"><img src="https://img.shields.io/github/commit-activity/m/tw93/mole?style=flat-square" alt="Commits"></a>
<a href="https://twitter.com/HiTw93"><img src="https://img.shields.io/badge/follow-Tw93-red?style=flat-square&logo=Twitter" alt="Twitter"></a>
<a href="https://t.me/+GclQS9ZnxyI2ODQ1"><img src="https://img.shields.io/badge/chat-Telegram-blueviolet?style=flat-square&logo=Telegram" alt="Telegram"></a>
</p>
Windows support for [Mole](https://github.com/tw93/Mole) - A system maintenance toolkit.
<p align="center">
<img src="https://cdn.tw93.fun/img/mole.jpeg" alt="Mole - 95.50GB freed" width="1000" />
</p>
## Requirements
## Features
- Windows 10/11
- PowerShell 5.1 or later (pre-installed on Windows 10/11)
- Go 1.24+ (for building TUI tools)
- **All-in-one toolkit**: CleanMyMac, AppCleaner, DaisyDisk, and iStat Menus combined into a **single binary**
- **Deep cleaning**: Scans and removes caches, logs, and browser leftovers to **reclaim gigabytes of space**
- **Smart uninstaller**: Thoroughly removes apps along with launch agents, preferences, and **hidden remnants**
- **Disk insights**: Visualizes usage, manages large files, **rebuilds caches**, and refreshes system services
- **Live monitoring**: Real-time stats for CPU, GPU, memory, disk, and network to **diagnose performance issues**
## Installation
## Platform Support
### Quick Install
Mole is designed for **macOS**. For Windows users, check out the `windows/` directory which provides a native Windows port with the same features:
**Windows Installation:**
```powershell
irm https://raw.githubusercontent.com/tw93/mole/main/windows/install.ps1 | iex
# Clone the repository
git clone https://github.com/tw93/Mole.git
cd Mole/windows
# Run the installer
.\install.ps1 -AddToPath
```
**Windows Features:**
- Deep system cleanup (temp files, caches, logs, Windows Update cache)
- Smart app uninstaller with leftover detection
- System optimization and service refresh
- Developer artifact cleanup (node_modules, target, .venv, etc.)
- Disk analysis and real-time monitoring tools (TUI)
### Manual Installation
Built with PowerShell and Go for native Windows performance. Run `mole` after installation.
```powershell
# Install to custom location
.\install.ps1 -InstallDir C:\Tools\Mole -AddToPath
## Quick Start
**Install via Homebrew — recommended:**
```bash
brew install mole
# Create Start Menu shortcut
.\install.ps1 -AddToPath -CreateShortcut
```
**Or via script:**
### Uninstall
```bash
# Optional args: -s latest for main branch code, -s 1.17.0 for specific version
curl -fsSL https://raw.githubusercontent.com/tw93/mole/main/install.sh | bash
```powershell
.\install.ps1 -Uninstall
```
**Run:**
## Usage
```bash
mo # Interactive menu
mo clean # Deep cleanup
mo uninstall # Remove apps + leftovers
mo optimize # Refresh caches & services
mo analyze # Visual disk explorer
mo status # Live system health dashboard
mo purge # Clean project build artifacts
mo installer # Find and remove installer files
```powershell
# Interactive menu
mole
mo touchid # Configure Touch ID for sudo
mo completion # Set up shell tab completion
mo update # Update Mole
mo remove # Remove Mole from system
mo --help # Show help
mo --version # Show installed version
# Show help
mole -ShowHelp
mo clean --dry-run # Preview the cleanup plan
mo clean --whitelist # Manage protected caches
mo clean --dry-run --debug # Detailed preview with risk levels and file info
# Show version
mole -Version
mo optimize --dry-run # Preview optimization actions
mo optimize --debug # Run with detailed operation logs
mo optimize --whitelist # Manage protected optimization rules
mo purge --paths # Configure project scan directories
# Commands
mole clean # Deep system cleanup
mole clean -DryRun # Preview cleanup without deleting
mole uninstall # Interactive app uninstaller
mole optimize # System optimization
mole purge # Clean developer artifacts
mole analyze # Disk space analyzer
mole status # System health monitor
```
## Tips
## Commands
- **Terminal**: iTerm2 has known compatibility issues; we recommend Alacritty, kitty, WezTerm, Ghostty, or Warp.
- **Safety**: Built with strict protections. See [Security Audit](SECURITY_AUDIT.md). Preview changes with `mo clean --dry-run`.
- **Be Careful**: Although safe by design, file deletion is permanent. Please review operations carefully.
- **Debug Mode**: Use `--debug` for detailed logs (e.g., `mo clean --debug`). Combine with `--dry-run` for comprehensive preview including risk levels and file details.
- **Navigation**: Supports arrow keys and Vim bindings (`h/j/k/l`).
- **Status Shortcuts**: In `mo status`, press `k` to toggle cat visibility and save preference, `q` to quit.
- **Configuration**: Run `mo touchid` for Touch ID sudo, `mo completion` for shell tab completion, `mo clean --whitelist` to manage protected paths.
| Command | Description |
|---------|-------------|
| `clean` | Deep cleanup of temp files, caches, and logs |
| `uninstall` | Interactive application uninstaller |
| `optimize` | System optimization and health checks |
| `purge` | Clean project build artifacts (node_modules, etc.) |
| `analyze` | Interactive disk space analyzer (TUI) |
| `status` | Real-time system health monitor (TUI) |
## Features in Detail
## Environment Variables
### Deep System Cleanup
| Variable | Description |
|----------|-------------|
| `MOLE_DRY_RUN=1` | Preview changes without making them |
| `MOLE_DEBUG=1` | Enable debug output |
| `MO_ANALYZE_PATH` | Starting path for analyze tool |
```bash
$ mo clean
## Directory Structure
Scanning cache directories...
✓ User app cache 45.2GB
✓ Browser cache (Chrome, Safari, Firefox) 10.5GB
✓ Developer tools (Xcode, Node.js, npm) 23.3GB
✓ System logs and temp files 3.8GB
✓ App-specific cache (Spotify, Dropbox, Slack) 8.4GB
✓ Trash 12.3GB
====================================================================
Space freed: 95.5GB | Free space now: 223.5GB
====================================================================
```
windows/
├── mole.ps1 # Main CLI entry point
├── install.ps1 # Windows installer
├── Makefile # Build automation for Go tools
├── go.mod # Go module definition
├── go.sum # Go dependencies
├── bin/
│ ├── clean.ps1 # Deep cleanup orchestrator
│ ├── uninstall.ps1 # Interactive app uninstaller
│ ├── optimize.ps1 # System optimization
│ ├── purge.ps1 # Project artifact cleanup
│ ├── analyze.ps1 # Disk analyzer wrapper
│ └── status.ps1 # Status monitor wrapper
├── cmd/
│ ├── analyze/ # Disk analyzer (Go TUI)
│ │ └── main.go
│ └── status/ # System status (Go TUI)
│ └── main.go
└── lib/
├── core/
│ ├── base.ps1 # Core definitions and utilities
│ ├── common.ps1 # Common functions loader
│ ├── file_ops.ps1 # Safe file operations
│ ├── log.ps1 # Logging functions
│ └── ui.ps1 # Interactive UI components
└── clean/
├── user.ps1 # User cleanup (temp, downloads, etc.)
├── caches.ps1 # Browser and app caches
├── dev.ps1 # Developer tool caches
├── apps.ps1 # Application leftovers
└── system.ps1 # System cleanup (requires admin)
```
### Smart App Uninstaller
## Building TUI Tools
```bash
$ mo uninstall
The analyze and status commands require Go to be installed:
Select Apps to Remove
═══════════════════════════
▶ ☑ Photoshop 2024 (4.2G) | Old
☐ IntelliJ IDEA (2.8G) | Recent
☐ Premiere Pro (3.4G) | Recent
```powershell
cd windows
Uninstalling: Photoshop 2024
# Build both tools
make build
✓ Removed application
✓ Cleaned 52 related files across 12 locations
- Application Support, Caches, Preferences
- Logs, WebKit storage, Cookies
- Extensions, Plugins, Launch daemons
# Or build individually
go build -o bin/analyze.exe ./cmd/analyze/
go build -o bin/status.exe ./cmd/status/
====================================================================
Space freed: 12.8GB
====================================================================
# The wrapper scripts will auto-build if Go is available
```
### System Optimization
## Configuration
```bash
$ mo optimize
Mole stores its configuration in:
- Config: `~\.config\mole\`
- Cache: `~\.cache\mole\`
- Whitelist: `~\.config\mole\whitelist.txt`
- Purge paths: `~\.config\mole\purge_paths.txt`
System: 5/32 GB RAM | 333/460 GB Disk (72%) | Uptime 6d
## Development Phases
✓ Rebuild system databases and clear caches
✓ Reset network services
✓ Refresh Finder and Dock
✓ Clean diagnostic and crash logs
✓ Remove swap files and restart dynamic pager
✓ Rebuild launch services and spotlight index
### Phase 1: Core Infrastructure ✅
- [x] `install.ps1` - Windows installer
- [x] `mole.ps1` - Main CLI entry point
- [x] `lib/core/*` - Core utility libraries
====================================================================
System optimization completed
====================================================================
### Phase 2: Cleanup Features ✅
- [x] `bin/clean.ps1` - Deep cleanup orchestrator
- [x] `bin/uninstall.ps1` - App removal with leftover detection
- [x] `bin/optimize.ps1` - System optimization
- [x] `bin/purge.ps1` - Project artifact cleanup
- [x] `lib/clean/*` - Cleanup modules
Use `mo optimize --whitelist` to exclude specific optimizations.
```
### Phase 3: TUI Tools ✅
- [x] `cmd/analyze/` - Disk usage analyzer (Go)
- [x] `cmd/status/` - Real-time system monitor (Go)
- [x] `bin/analyze.ps1` - Analyzer wrapper
- [x] `bin/status.ps1` - Status wrapper
### Disk Space Analyzer
```bash
$ mo analyze
Analyze Disk ~/Documents | Total: 156.8GB
▶ 1. ███████████████████ 48.2% | 📁 Library 75.4GB >6mo
2. ██████████░░░░░░░░░ 22.1% | 📁 Downloads 34.6GB
3. ████░░░░░░░░░░░░░░░ 14.3% | 📁 Movies 22.4GB
4. ███░░░░░░░░░░░░░░░░ 10.8% | 📁 Documents 16.9GB
5. ██░░░░░░░░░░░░░░░░░ 5.2% | 📄 backup_2023.zip 8.2GB
↑↓←→ Navigate | O Open | F Show | ⌫ Delete | L Large files | Q Quit
```
### Live System Status
Real-time dashboard with system health score, hardware info, and performance metrics.
```bash
$ mo status
Mole Status Health ● 92 MacBook Pro · M4 Pro · 32GB · macOS 14.5
⚙ CPU ▦ Memory
Total ████████████░░░░░░░ 45.2% Used ███████████░░░░░░░ 58.4%
Load 0.82 / 1.05 / 1.23 (8 cores) Total 14.2 / 24.0 GB
Core 1 ███████████████░░░░ 78.3% Free ████████░░░░░░░░░░ 41.6%
Core 2 ████████████░░░░░░░ 62.1% Avail 9.8 GB
▤ Disk ⚡ Power
Used █████████████░░░░░░ 67.2% Level ██████████████████ 100%
Free 156.3 GB Status Charged
Read ▮▯▯▯▯ 2.1 MB/s Health Normal · 423 cycles
Write ▮▮▮▯▯ 18.3 MB/s Temp 58°C · 1200 RPM
⇅ Network ▶ Processes
Down ▮▮▯▯▯ 3.2 MB/s Code ▮▮▮▮▯ 42.1%
Up ▮▯▯▯▯ 0.8 MB/s Chrome ▮▮▮▯▯ 28.3%
Proxy HTTP · 192.168.1.100 Terminal ▮▯▯▯▯ 12.5%
```
Health score based on CPU, memory, disk, temperature, and I/O load. Color-coded by range.
### Project Artifact Purge
Clean old build artifacts (`node_modules`, `target`, `build`, `dist`, etc.) from your projects to free up disk space.
```bash
mo purge
Select Categories to Clean - 18.5GB (8 selected)
➤ ● my-react-app 3.2GB | node_modules
● old-project 2.8GB | node_modules
● rust-app 4.1GB | target
● next-blog 1.9GB | node_modules
○ current-work 856MB | node_modules | Recent
● django-api 2.3GB | venv
● vue-dashboard 1.7GB | node_modules
● backend-service 2.5GB | node_modules
```
> **Use with caution:** This will permanently delete selected artifacts. Review carefully before confirming. Recent projects — less than 7 days old — are marked and unselected by default.
<details>
<summary><strong>Custom Scan Paths</strong></summary>
Run `mo purge --paths` to configure which directories to scan, or edit `~/.config/mole/purge_paths` directly:
```shell
~/Documents/MyProjects
~/Work/ClientA
~/Work/ClientB
```
When custom paths are configured, only those directories are scanned. Otherwise, it defaults to `~/Projects`, `~/GitHub`, `~/dev`, etc.
</details>
### Installer Cleanup
Find and remove large installer files scattered across Downloads, Desktop, Homebrew caches, iCloud, and Mail. Each file is labeled by source to help you know where the space is hiding.
```bash
mo installer
Select Installers to Remove - 3.8GB (5 selected)
➤ ● Photoshop_2024.dmg 1.2GB | Downloads
● IntelliJ_IDEA.dmg 850.6MB | Downloads
● Illustrator_Setup.pkg 920.4MB | Downloads
● PyCharm_Pro.dmg 640.5MB | Homebrew
● Acrobat_Reader.dmg 220.4MB | Downloads
○ AppCode_Legacy.zip 410.6MB | Downloads
```
## Quick Launchers
Launch Mole commands instantly from Raycast or Alfred:
```bash
curl -fsSL https://raw.githubusercontent.com/tw93/Mole/main/scripts/setup-quick-launchers.sh | bash
```
Adds 5 commands: `clean`, `uninstall`, `optimize`, `analyze`, `status`.
Mole automatically detects your terminal, or set `MO_LAUNCHER_APP=<name>` to override. For Raycast users: if this is your first script directory, add it via Raycast Extensions → Add Script Directory, then run "Reload Script Directories".
## Community Love
Mole wouldn't be possible without these amazing contributors. They've built countless features that make Mole what it is today. Go follow them! ❤️
<a href="https://github.com/tw93/Mole/graphs/contributors">
<img src="./CONTRIBUTORS.svg?v=2" width="1000" />
</a>
Join thousands of users worldwide who trust Mole to keep their Macs clean and optimized.
<img src="https://cdn.tw93.fun/pic/lovemole.jpeg" alt="Community feedback on Mole" width="1000" />
## Support
- If Mole saved you disk space, consider starring the repo or [sharing it](https://twitter.com/intent/tweet?url=https://github.com/tw93/Mole&text=Mole%20-%20Deep%20clean%20and%20optimize%20your%20Mac.) with friends.
- Have ideas or fixes? Check our [Contributing Guide](CONTRIBUTING.md), then open an issue or PR to help shape Mole's future.
- Love Mole? <a href="https://miaoyan.app/cats.html?name=Mole" target="_blank">Buy Tw93 an ice-cold Coke</a> to keep the project alive and kicking! 🥤
<details>
<summary><strong>Friends who bought me Coke</strong></summary>
<br/>
<a href="https://miaoyan.app/cats.html?name=Mole"><img src="https://miaoyan.app/assets/sponsors.svg" width="1000" /></a>
</details>
### Phase 4: Testing & CI (Planned)
- [ ] `tests/` - Pester tests
- [ ] GitHub Actions workflows
- [ ] `scripts/build.ps1` - Build automation
## License
MIT License — feel free to enjoy and participate in open source.
Same license as the main Mole project.

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# Mole - Analyze command.
# Runs the Go disk analyzer UI.
# Uses bundled analyze-go binary.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
GO_BIN="$SCRIPT_DIR/analyze-go"
if [[ -x "$GO_BIN" ]]; then
exec "$GO_BIN" "$@"
fi
echo "Bundled analyzer binary not found. Please reinstall Mole or run mo update to restore it." >&2
exit 1

View File

@@ -1,101 +0,0 @@
#!/bin/bash
set -euo pipefail
# Fix locale issues (similar to Issue #83)
export LC_ALL=C
export LANG=C
# Load common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
source "$SCRIPT_DIR/lib/core/common.sh"
source "$SCRIPT_DIR/lib/core/sudo.sh"
source "$SCRIPT_DIR/lib/manage/update.sh"
source "$SCRIPT_DIR/lib/manage/autofix.sh"
source "$SCRIPT_DIR/lib/check/all.sh"
cleanup_all() {
stop_inline_spinner 2> /dev/null || true
stop_sudo_session
cleanup_temp_files
}
handle_interrupt() {
cleanup_all
exit 130
}
main() {
# Register unified cleanup handler
trap cleanup_all EXIT
trap handle_interrupt INT TERM
if [[ -t 1 ]]; then
clear
fi
printf '\n'
# Create temp files for parallel execution
local updates_file=$(mktemp_file)
local health_file=$(mktemp_file)
local security_file=$(mktemp_file)
local config_file=$(mktemp_file)
# Run all checks in parallel with spinner
if [[ -t 1 ]]; then
echo -ne "${PURPLE_BOLD}System Check${NC} "
start_inline_spinner "Running checks..."
else
echo -e "${PURPLE_BOLD}System Check${NC}"
echo ""
fi
# Parallel execution
{
check_all_updates > "$updates_file" 2>&1 &
check_system_health > "$health_file" 2>&1 &
check_all_security > "$security_file" 2>&1 &
check_all_config > "$config_file" 2>&1 &
wait
}
if [[ -t 1 ]]; then
stop_inline_spinner
printf '\n'
fi
# Display results
echo -e "${BLUE}${ICON_ARROW}${NC} System updates"
cat "$updates_file"
printf '\n'
echo -e "${BLUE}${ICON_ARROW}${NC} System health"
cat "$health_file"
printf '\n'
echo -e "${BLUE}${ICON_ARROW}${NC} Security posture"
cat "$security_file"
printf '\n'
echo -e "${BLUE}${ICON_ARROW}${NC} Configuration"
cat "$config_file"
# Show suggestions
show_suggestions
# Ask about auto-fix
if ask_for_auto_fix; then
perform_auto_fix
fi
# Ask about updates
if ask_for_updates; then
perform_updates
fi
printf '\n'
}
main "$@"

File diff suppressed because it is too large Load Diff

View File

@@ -1,251 +0,0 @@
#!/bin/bash
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
source "$ROOT_DIR/lib/core/common.sh"
source "$ROOT_DIR/lib/core/commands.sh"
command_names=()
for entry in "${MOLE_COMMANDS[@]}"; do
command_names+=("${entry%%:*}")
done
command_words="${command_names[*]}"
emit_zsh_subcommands() {
for entry in "${MOLE_COMMANDS[@]}"; do
printf " '%s:%s'\n" "${entry%%:*}" "${entry#*:}"
done
}
emit_fish_completions() {
local cmd="$1"
for entry in "${MOLE_COMMANDS[@]}"; do
local name="${entry%%:*}"
local desc="${entry#*:}"
printf 'complete -c %s -n "__fish_mole_no_subcommand" -a %s -d "%s"\n' "$cmd" "$name" "$desc"
done
printf '\n'
printf 'complete -c %s -n "not __fish_mole_no_subcommand" -a bash -d "generate bash completion" -n "__fish_see_subcommand_path completion"\n' "$cmd"
printf 'complete -c %s -n "not __fish_mole_no_subcommand" -a zsh -d "generate zsh completion" -n "__fish_see_subcommand_path completion"\n' "$cmd"
printf 'complete -c %s -n "not __fish_mole_no_subcommand" -a fish -d "generate fish completion" -n "__fish_see_subcommand_path completion"\n' "$cmd"
}
# Auto-install mode when run without arguments
if [[ $# -eq 0 ]]; then
# Detect current shell
current_shell="${SHELL##*/}"
if [[ -z "$current_shell" ]]; then
current_shell="$(ps -p "$PPID" -o comm= 2> /dev/null | awk '{print $1}')"
fi
completion_name=""
if command -v mole > /dev/null 2>&1; then
completion_name="mole"
elif command -v mo > /dev/null 2>&1; then
completion_name="mo"
fi
case "$current_shell" in
bash)
config_file="${HOME}/.bashrc"
[[ -f "${HOME}/.bash_profile" ]] && config_file="${HOME}/.bash_profile"
# shellcheck disable=SC2016
completion_line='if output="$('"$completion_name"' completion bash 2>/dev/null)"; then eval "$output"; fi'
;;
zsh)
config_file="${HOME}/.zshrc"
# shellcheck disable=SC2016
completion_line='if output="$('"$completion_name"' completion zsh 2>/dev/null)"; then eval "$output"; fi'
;;
fish)
config_file="${HOME}/.config/fish/config.fish"
# shellcheck disable=SC2016
completion_line='set -l output ('"$completion_name"' completion fish 2>/dev/null); and echo "$output" | source'
;;
*)
log_error "Unsupported shell: $current_shell"
echo " mole completion <bash|zsh|fish>"
exit 1
;;
esac
if [[ -z "$completion_name" ]]; then
if [[ -f "$config_file" ]] && grep -Eq "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" 2> /dev/null; then
original_mode=""
original_mode="$(stat -f '%Mp%Lp' "$config_file" 2> /dev/null || true)"
temp_file="$(mktemp)"
grep -Ev "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" > "$temp_file" || true
mv "$temp_file" "$config_file"
if [[ -n "$original_mode" ]]; then
chmod "$original_mode" "$config_file" 2> /dev/null || true
fi
echo -e "${GREEN}${ICON_SUCCESS}${NC} Removed stale completion entries from $config_file"
echo ""
fi
log_error "mole not found in PATH - install Mole before enabling completion"
exit 1
fi
# Check if already installed and normalize to latest line
if [[ -f "$config_file" ]] && grep -Eq "(mole|mo)[[:space:]]+completion" "$config_file" 2> /dev/null; then
original_mode=""
original_mode="$(stat -f '%Mp%Lp' "$config_file" 2> /dev/null || true)"
temp_file="$(mktemp)"
grep -Ev "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" > "$temp_file" || true
mv "$temp_file" "$config_file"
if [[ -n "$original_mode" ]]; then
chmod "$original_mode" "$config_file" 2> /dev/null || true
fi
{
echo ""
echo "# Mole shell completion"
echo "$completion_line"
} >> "$config_file"
echo ""
echo -e "${GREEN}${ICON_SUCCESS}${NC} Shell completion updated in $config_file"
echo ""
exit 0
fi
# Prompt user for installation
echo ""
echo -e "${GRAY}Will add to ${config_file}:${NC}"
echo " $completion_line"
echo ""
echo -ne "${PURPLE}${ICON_ARROW}${NC} Enable completion for ${GREEN}${current_shell}${NC}? ${GRAY}Enter confirm / Q cancel${NC}: "
IFS= read -r -s -n1 key || key=""
drain_pending_input
echo ""
case "$key" in
$'\e' | [Qq] | [Nn])
echo -e "${YELLOW}Cancelled${NC}"
exit 0
;;
"" | $'\n' | $'\r' | [Yy]) ;;
*)
log_error "Invalid key"
exit 1
;;
esac
# Create config file if it doesn't exist
if [[ ! -f "$config_file" ]]; then
mkdir -p "$(dirname "$config_file")"
touch "$config_file"
fi
# Remove previous Mole completion lines to avoid duplicates
if [[ -f "$config_file" ]]; then
original_mode=""
original_mode="$(stat -f '%Mp%Lp' "$config_file" 2> /dev/null || true)"
temp_file="$(mktemp)"
grep -Ev "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" > "$temp_file" || true
mv "$temp_file" "$config_file"
if [[ -n "$original_mode" ]]; then
chmod "$original_mode" "$config_file" 2> /dev/null || true
fi
fi
# Add completion line
{
echo ""
echo "# Mole shell completion"
echo "$completion_line"
} >> "$config_file"
echo -e "${GREEN}${ICON_SUCCESS}${NC} Completion added to $config_file"
echo ""
echo ""
echo -e "${GRAY}To activate now:${NC}"
echo -e " ${GREEN}source $config_file${NC}"
exit 0
fi
case "$1" in
bash)
cat << EOF
_mole_completions()
{
local cur_word prev_word
cur_word="\${COMP_WORDS[\$COMP_CWORD]}"
prev_word="\${COMP_WORDS[\$COMP_CWORD-1]}"
if [ "\$COMP_CWORD" -eq 1 ]; then
COMPREPLY=( \$(compgen -W "$command_words" -- "\$cur_word") )
else
case "\$prev_word" in
completion)
COMPREPLY=( \$(compgen -W "bash zsh fish" -- "\$cur_word") )
;;
*)
COMPREPLY=()
;;
esac
fi
}
complete -F _mole_completions mole mo
EOF
;;
zsh)
printf '#compdef mole mo\n\n'
printf '_mole() {\n'
printf ' local -a subcommands\n'
printf ' subcommands=(\n'
emit_zsh_subcommands
printf ' )\n'
printf " _describe 'subcommand' subcommands\n"
printf '}\n\n'
printf 'compdef _mole mole mo\n'
;;
fish)
printf '# Completions for mole\n'
emit_fish_completions mole
printf '\n# Completions for mo (alias)\n'
emit_fish_completions mo
printf '\nfunction __fish_mole_no_subcommand\n'
printf ' for i in (commandline -opc)\n'
# shellcheck disable=SC2016
printf ' if contains -- $i %s\n' "$command_words"
printf ' return 1\n'
printf ' end\n'
printf ' end\n'
printf ' return 0\n'
printf 'end\n\n'
printf 'function __fish_see_subcommand_path\n'
printf ' string match -q -- "completion" (commandline -opc)[1]\n'
printf 'end\n'
;;
*)
cat << 'EOF'
Usage: mole completion [bash|zsh|fish]
Setup shell tab completion for mole and mo commands.
Auto-install:
mole completion # Auto-detect shell and install
Manual install:
mole completion bash # Generate bash completion script
mole completion zsh # Generate zsh completion script
mole completion fish # Generate fish completion script
Examples:
# Auto-install (recommended)
mole completion
# Manual install - Bash
eval "$(mole completion bash)"
# Manual install - Zsh
eval "$(mole completion zsh)"
# Manual install - Fish
mole completion fish | source
EOF
exit 1
;;
esac

View File

@@ -1,704 +0,0 @@
#!/bin/bash
# Mole - Installer command
# Find and remove installer files - .dmg, .pkg, .mpkg, .iso, .xip, .zip
set -euo pipefail
# shellcheck disable=SC2154
# External variables set by menu_paginated.sh and environment
declare MOLE_SELECTION_RESULT
declare MOLE_INSTALLER_SCAN_MAX_DEPTH
export LC_ALL=C
export LANG=C
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/../lib/core/common.sh"
source "$SCRIPT_DIR/../lib/ui/menu_paginated.sh"
cleanup() {
if [[ "${IN_ALT_SCREEN:-0}" == "1" ]]; then
leave_alt_screen
IN_ALT_SCREEN=0
fi
show_cursor
cleanup_temp_files
}
trap cleanup EXIT
trap 'trap - EXIT; cleanup; exit 130' INT TERM
# Scan configuration
readonly INSTALLER_SCAN_MAX_DEPTH_DEFAULT=2
readonly INSTALLER_SCAN_PATHS=(
"$HOME/Downloads"
"$HOME/Desktop"
"$HOME/Documents"
"$HOME/Public"
"$HOME/Library/Downloads"
"/Users/Shared"
"/Users/Shared/Downloads"
"$HOME/Library/Caches/Homebrew"
"$HOME/Library/Mobile Documents/com~apple~CloudDocs/Downloads"
"$HOME/Library/Containers/com.apple.mail/Data/Library/Mail Downloads"
"$HOME/Library/Application Support/Telegram Desktop"
"$HOME/Downloads/Telegram Desktop"
)
readonly MAX_ZIP_ENTRIES=50
ZIP_LIST_CMD=()
IN_ALT_SCREEN=0
if command -v zipinfo > /dev/null 2>&1; then
ZIP_LIST_CMD=(zipinfo -1)
elif command -v unzip > /dev/null 2>&1; then
ZIP_LIST_CMD=(unzip -Z -1)
fi
TERMINAL_WIDTH=0
# Check for installer payloads inside ZIP - check first N entries for installer patterns
is_installer_zip() {
local zip="$1"
local cap="$MAX_ZIP_ENTRIES"
[[ ${#ZIP_LIST_CMD[@]} -gt 0 ]] || return 1
if ! "${ZIP_LIST_CMD[@]}" "$zip" 2> /dev/null |
head -n "$cap" |
awk '
/\.(app|pkg|dmg|xip)(\/|$)/ { found=1; exit 0 }
END { exit found ? 0 : 1 }
'; then
return 1
fi
return 0
}
handle_candidate_file() {
local file="$1"
[[ -L "$file" ]] && return 0 # Skip symlinks explicitly
case "$file" in
*.dmg | *.pkg | *.mpkg | *.iso | *.xip)
echo "$file"
;;
*.zip)
[[ -r "$file" ]] || return 0
if is_installer_zip "$file" 2> /dev/null; then
echo "$file"
fi
;;
esac
}
scan_installers_in_path() {
local path="$1"
local max_depth="${MOLE_INSTALLER_SCAN_MAX_DEPTH:-$INSTALLER_SCAN_MAX_DEPTH_DEFAULT}"
[[ -d "$path" ]] || return 0
local file
if command -v fd > /dev/null 2>&1; then
while IFS= read -r file; do
handle_candidate_file "$file"
done < <(
fd --no-ignore --hidden --type f --max-depth "$max_depth" \
-e dmg -e pkg -e mpkg -e iso -e xip -e zip \
. "$path" 2> /dev/null || true
)
else
while IFS= read -r file; do
handle_candidate_file "$file"
done < <(
find "$path" -maxdepth "$max_depth" -type f \
\( -name '*.dmg' -o -name '*.pkg' -o -name '*.mpkg' \
-o -name '*.iso' -o -name '*.xip' -o -name '*.zip' \) \
2> /dev/null || true
)
fi
}
scan_all_installers() {
for path in "${INSTALLER_SCAN_PATHS[@]}"; do
scan_installers_in_path "$path"
done
}
# Initialize stats
declare -i total_deleted=0
declare -i total_size_freed_kb=0
# Global arrays for installer data
declare -a INSTALLER_PATHS=()
declare -a INSTALLER_SIZES=()
declare -a INSTALLER_SOURCES=()
declare -a DISPLAY_NAMES=()
# Get source directory display name - for example "Downloads" or "Desktop"
get_source_display() {
local file_path="$1"
local dir_path="${file_path%/*}"
# Match against known paths and return friendly names
case "$dir_path" in
"$HOME/Downloads"*) echo "Downloads" ;;
"$HOME/Desktop"*) echo "Desktop" ;;
"$HOME/Documents"*) echo "Documents" ;;
"$HOME/Public"*) echo "Public" ;;
"$HOME/Library/Downloads"*) echo "Library" ;;
"/Users/Shared"*) echo "Shared" ;;
"$HOME/Library/Caches/Homebrew"*) echo "Homebrew" ;;
"$HOME/Library/Mobile Documents/com~apple~CloudDocs/Downloads"*) echo "iCloud" ;;
"$HOME/Library/Containers/com.apple.mail"*) echo "Mail" ;;
*"Telegram Desktop"*) echo "Telegram" ;;
*) echo "${dir_path##*/}" ;;
esac
}
get_terminal_width() {
if [[ $TERMINAL_WIDTH -le 0 ]]; then
TERMINAL_WIDTH=$(tput cols 2> /dev/null || echo 80)
fi
echo "$TERMINAL_WIDTH"
}
# Format installer display with alignment - similar to purge command
format_installer_display() {
local filename="$1"
local size_str="$2"
local source="$3"
# Terminal width for alignment
local terminal_width
terminal_width=$(get_terminal_width)
local fixed_width=24 # Reserve for size and source
local available_width=$((terminal_width - fixed_width))
# Bounds check: 20-40 chars for filename
[[ $available_width -lt 20 ]] && available_width=20
[[ $available_width -gt 40 ]] && available_width=40
# Truncate filename if needed
local truncated_name
truncated_name=$(truncate_by_display_width "$filename" "$available_width")
local current_width
current_width=$(get_display_width "$truncated_name")
local char_count=${#truncated_name}
local padding=$((available_width - current_width))
local printf_width=$((char_count + padding))
# Format: "filename size | source"
printf "%-*s %8s | %-10s" "$printf_width" "$truncated_name" "$size_str" "$source"
}
# Collect all installers with their metadata
collect_installers() {
# Clear previous results
INSTALLER_PATHS=()
INSTALLER_SIZES=()
INSTALLER_SOURCES=()
DISPLAY_NAMES=()
# Start scanning with spinner
if [[ -t 1 ]]; then
start_inline_spinner "Scanning for installers..."
fi
# Start debug session
debug_operation_start "Collect Installers" "Scanning for redundant installer files"
# Scan all paths, deduplicate, and sort results
local -a all_files=()
while IFS= read -r file; do
[[ -z "$file" ]] && continue
all_files+=("$file")
debug_file_action "Found installer" "$file"
done < <(scan_all_installers | sort -u)
if [[ -t 1 ]]; then
stop_inline_spinner
fi
if [[ ${#all_files[@]} -eq 0 ]]; then
if [[ "${IN_ALT_SCREEN:-0}" != "1" ]]; then
echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No installer files to clean"
fi
return 1
fi
# Calculate sizes with spinner
if [[ -t 1 ]]; then
start_inline_spinner "Calculating sizes..."
fi
# Process each installer
for file in "${all_files[@]}"; do
# Calculate file size
local file_size=0
if [[ -f "$file" ]]; then
file_size=$(get_file_size "$file")
fi
# Get source directory
local source
source=$(get_source_display "$file")
# Format human readable size
local size_human
size_human=$(bytes_to_human "$file_size")
# Get display filename - strip Homebrew hash prefix if present
local display_name
display_name=$(basename "$file")
if [[ "$source" == "Homebrew" ]]; then
# Homebrew names often look like: sha256--name--version
# Strip the leading hash if it matches [0-9a-f]{64}--
if [[ "$display_name" =~ ^[0-9a-f]{64}--(.*) ]]; then
display_name="${BASH_REMATCH[1]}"
fi
fi
# Format display with alignment
local display
display=$(format_installer_display "$display_name" "$size_human" "$source")
# Store installer data in parallel arrays
INSTALLER_PATHS+=("$file")
INSTALLER_SIZES+=("$file_size")
INSTALLER_SOURCES+=("$source")
DISPLAY_NAMES+=("$display")
done
if [[ -t 1 ]]; then
stop_inline_spinner
fi
return 0
}
# Installer selector with Select All / Invert support
select_installers() {
local -a items=("$@")
local total_items=${#items[@]}
local clear_line=$'\r\033[2K'
if [[ $total_items -eq 0 ]]; then
return 1
fi
# Calculate items per page based on terminal height
_get_items_per_page() {
local term_height=24
if [[ -t 0 ]] || [[ -t 2 ]]; then
term_height=$(stty size < /dev/tty 2> /dev/null | awk '{print $1}')
fi
if [[ -z "$term_height" || $term_height -le 0 ]]; then
if command -v tput > /dev/null 2>&1; then
term_height=$(tput lines 2> /dev/null || echo "24")
else
term_height=24
fi
fi
local reserved=6
local available=$((term_height - reserved))
if [[ $available -lt 3 ]]; then
echo 3
elif [[ $available -gt 50 ]]; then
echo 50
else
echo "$available"
fi
}
local items_per_page=$(_get_items_per_page)
local cursor_pos=0
local top_index=0
# Initialize selection (all unselected by default)
local -a selected=()
for ((i = 0; i < total_items; i++)); do
selected[i]=false
done
local original_stty=""
if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then
original_stty=$(stty -g 2> /dev/null || echo "")
fi
restore_terminal() {
trap - EXIT INT TERM
if [[ "${IN_ALT_SCREEN:-0}" == "1" ]]; then
leave_alt_screen
IN_ALT_SCREEN=0
fi
show_cursor
if [[ -n "${original_stty:-}" ]]; then
stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || true
fi
}
handle_interrupt() {
restore_terminal
exit 130
}
draw_menu() {
items_per_page=$(_get_items_per_page)
local max_top_index=0
if [[ $total_items -gt $items_per_page ]]; then
max_top_index=$((total_items - items_per_page))
fi
if [[ $top_index -gt $max_top_index ]]; then
top_index=$max_top_index
fi
if [[ $top_index -lt 0 ]]; then
top_index=0
fi
local visible_count=$((total_items - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -gt $((visible_count - 1)) ]]; then
cursor_pos=$((visible_count - 1))
fi
if [[ $cursor_pos -lt 0 ]]; then
cursor_pos=0
fi
printf "\033[H"
# Calculate selected size and count
local selected_size=0
local selected_count=0
for ((i = 0; i < total_items; i++)); do
if [[ ${selected[i]} == true ]]; then
selected_size=$((selected_size + ${INSTALLER_SIZES[i]:-0}))
((selected_count++))
fi
done
local selected_human
selected_human=$(bytes_to_human "$selected_size")
# Show position indicator if scrolling is needed
local scroll_indicator=""
if [[ $total_items -gt $items_per_page ]]; then
local current_pos=$((top_index + cursor_pos + 1))
scroll_indicator=" ${GRAY}[${current_pos}/${total_items}]${NC}"
fi
printf "${PURPLE_BOLD}Select Installers to Remove${NC}%s ${GRAY}- ${selected_human} ($selected_count selected)${NC}\n" "$scroll_indicator"
printf "%s\n" "$clear_line"
# Calculate visible range
local end_index=$((top_index + visible_count))
# Draw only visible items
for ((i = top_index; i < end_index; i++)); do
local checkbox="$ICON_EMPTY"
[[ ${selected[i]} == true ]] && checkbox="$ICON_SOLID"
local rel_pos=$((i - top_index))
if [[ $rel_pos -eq $cursor_pos ]]; then
printf "%s${CYAN}${ICON_ARROW} %s %s${NC}\n" "$clear_line" "$checkbox" "${items[i]}"
else
printf "%s %s %s\n" "$clear_line" "$checkbox" "${items[i]}"
fi
done
# Fill empty slots
local items_shown=$visible_count
for ((i = items_shown; i < items_per_page; i++)); do
printf "%s\n" "$clear_line"
done
printf "%s\n" "$clear_line"
printf "%s${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space Select | Enter Confirm | A All | I Invert | Q Quit${NC}\n" "$clear_line"
}
trap restore_terminal EXIT
trap handle_interrupt INT TERM
stty -echo -icanon intr ^C 2> /dev/null || true
hide_cursor
if [[ -t 1 ]]; then
printf "\033[2J\033[H" >&2
fi
# Main loop
while true; do
draw_menu
IFS= read -r -s -n1 key || key=""
case "$key" in
$'\x1b')
IFS= read -r -s -n1 -t 1 key2 || key2=""
if [[ "$key2" == "[" ]]; then
IFS= read -r -s -n1 -t 1 key3 || key3=""
case "$key3" in
A) # Up arrow
if [[ $cursor_pos -gt 0 ]]; then
((cursor_pos--))
elif [[ $top_index -gt 0 ]]; then
((top_index--))
fi
;;
B) # Down arrow
local absolute_index=$((top_index + cursor_pos))
local last_index=$((total_items - 1))
if [[ $absolute_index -lt $last_index ]]; then
local visible_count=$((total_items - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
((cursor_pos++))
elif [[ $((top_index + visible_count)) -lt $total_items ]]; then
((top_index++))
fi
fi
;;
esac
else
# ESC alone
restore_terminal
return 1
fi
;;
" ") # Space - toggle current item
local idx=$((top_index + cursor_pos))
if [[ ${selected[idx]} == true ]]; then
selected[idx]=false
else
selected[idx]=true
fi
;;
"a" | "A") # Select all
for ((i = 0; i < total_items; i++)); do
selected[i]=true
done
;;
"i" | "I") # Invert selection
for ((i = 0; i < total_items; i++)); do
if [[ ${selected[i]} == true ]]; then
selected[i]=false
else
selected[i]=true
fi
done
;;
"q" | "Q" | $'\x03') # Quit or Ctrl-C
restore_terminal
return 1
;;
"" | $'\n' | $'\r') # Enter - confirm
MOLE_SELECTION_RESULT=""
for ((i = 0; i < total_items; i++)); do
if [[ ${selected[i]} == true ]]; then
[[ -n "$MOLE_SELECTION_RESULT" ]] && MOLE_SELECTION_RESULT+=","
MOLE_SELECTION_RESULT+="$i"
fi
done
restore_terminal
return 0
;;
esac
done
}
# Show menu for user selection
show_installer_menu() {
if [[ ${#DISPLAY_NAMES[@]} -eq 0 ]]; then
return 1
fi
echo ""
MOLE_SELECTION_RESULT=""
if ! select_installers "${DISPLAY_NAMES[@]}"; then
return 1
fi
return 0
}
# Delete selected installers
delete_selected_installers() {
# Parse selection indices
local -a selected_indices=()
[[ -n "$MOLE_SELECTION_RESULT" ]] && IFS=',' read -ra selected_indices <<< "$MOLE_SELECTION_RESULT"
if [[ ${#selected_indices[@]} -eq 0 ]]; then
return 1
fi
# Calculate total size for confirmation
local confirm_size=0
for idx in "${selected_indices[@]}"; do
if [[ "$idx" =~ ^[0-9]+$ ]] && [[ $idx -lt ${#INSTALLER_SIZES[@]} ]]; then
confirm_size=$((confirm_size + ${INSTALLER_SIZES[$idx]:-0}))
fi
done
local confirm_human
confirm_human=$(bytes_to_human "$confirm_size")
# Show files to be deleted
echo -e "${PURPLE_BOLD}Files to be removed:${NC}"
for idx in "${selected_indices[@]}"; do
if [[ "$idx" =~ ^[0-9]+$ ]] && [[ $idx -lt ${#INSTALLER_PATHS[@]} ]]; then
local file_path="${INSTALLER_PATHS[$idx]}"
local file_size="${INSTALLER_SIZES[$idx]}"
local size_human
size_human=$(bytes_to_human "$file_size")
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $(basename "$file_path") ${GRAY}(${size_human})${NC}"
fi
done
# Confirm deletion
echo ""
echo -ne "${PURPLE}${ICON_ARROW}${NC} Delete ${#selected_indices[@]} installer(s) (${confirm_human}) ${GREEN}Enter${NC} confirm, ${GRAY}ESC${NC} cancel: "
IFS= read -r -s -n1 confirm || confirm=""
case "$confirm" in
$'\e' | q | Q)
return 1
;;
"" | $'\n' | $'\r')
printf "\r\033[K" # Clear prompt line
echo "" # Single line break
;;
*)
return 1
;;
esac
# Delete each selected installer with spinner
total_deleted=0
total_size_freed_kb=0
if [[ -t 1 ]]; then
start_inline_spinner "Removing installers..."
fi
for idx in "${selected_indices[@]}"; do
if [[ ! "$idx" =~ ^[0-9]+$ ]] || [[ $idx -ge ${#INSTALLER_PATHS[@]} ]]; then
continue
fi
local file_path="${INSTALLER_PATHS[$idx]}"
local file_size="${INSTALLER_SIZES[$idx]}"
# Validate path before deletion
if ! validate_path_for_deletion "$file_path"; then
continue
fi
# Delete the file
if safe_remove "$file_path" true; then
total_size_freed_kb=$((total_size_freed_kb + ((file_size + 1023) / 1024)))
total_deleted=$((total_deleted + 1))
fi
done
if [[ -t 1 ]]; then
stop_inline_spinner
fi
return 0
}
# Perform the installers cleanup
perform_installers() {
# Enter alt screen for scanning and selection
if [[ -t 1 ]]; then
enter_alt_screen
IN_ALT_SCREEN=1
printf "\033[2J\033[H" >&2
fi
# Collect installers
if ! collect_installers; then
if [[ -t 1 ]]; then
leave_alt_screen
IN_ALT_SCREEN=0
fi
printf '\n'
echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No installer files to clean"
printf '\n'
return 2 # Nothing to clean
fi
# Show menu
if ! show_installer_menu; then
if [[ -t 1 ]]; then
leave_alt_screen
IN_ALT_SCREEN=0
fi
return 1 # User cancelled
fi
# Leave alt screen before deletion (so confirmation and results are on main screen)
if [[ -t 1 ]]; then
leave_alt_screen
IN_ALT_SCREEN=0
fi
# Delete selected
if ! delete_selected_installers; then
return 1
fi
return 0
}
show_summary() {
local summary_heading="Installers cleaned"
local -a summary_details=()
if [[ $total_deleted -gt 0 ]]; then
local freed_mb
freed_mb=$(echo "$total_size_freed_kb" | awk '{printf "%.2f", $1/1024}')
summary_details+=("Removed ${GREEN}$total_deleted${NC} installer(s), freed ${GREEN}${freed_mb}MB${NC}")
summary_details+=("Your Mac is cleaner now!")
else
summary_details+=("No installers were removed")
fi
print_summary_block "$summary_heading" "${summary_details[@]}"
printf '\n'
}
main() {
for arg in "$@"; do
case "$arg" in
"--debug")
export MO_DEBUG=1
;;
*)
echo "Unknown option: $arg"
exit 1
;;
esac
done
hide_cursor
perform_installers
local exit_code=$?
show_cursor
case $exit_code in
0)
show_summary
;;
1)
printf '\n'
;;
2)
# Already handled by collect_installers
;;
esac
return 0
}
# Only run main if not in test mode
if [[ "${MOLE_TEST_MODE:-0}" != "1" ]]; then
main "$@"
fi

View File

@@ -1,509 +0,0 @@
#!/bin/bash
# Mole - Optimize command.
# Runs system maintenance checks and fixes.
# Supports dry-run where applicable.
set -euo pipefail
# Fix locale issues.
export LC_ALL=C
export LANG=C
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
source "$SCRIPT_DIR/lib/core/common.sh"
# Clean temp files on exit.
trap cleanup_temp_files EXIT INT TERM
source "$SCRIPT_DIR/lib/core/sudo.sh"
source "$SCRIPT_DIR/lib/manage/update.sh"
source "$SCRIPT_DIR/lib/manage/autofix.sh"
source "$SCRIPT_DIR/lib/optimize/maintenance.sh"
source "$SCRIPT_DIR/lib/optimize/tasks.sh"
source "$SCRIPT_DIR/lib/check/health_json.sh"
source "$SCRIPT_DIR/lib/check/all.sh"
source "$SCRIPT_DIR/lib/manage/whitelist.sh"
print_header() {
printf '\n'
echo -e "${PURPLE_BOLD}Optimize and Check${NC}"
}
run_system_checks() {
# Skip checks in dry-run mode.
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
return 0
fi
unset AUTO_FIX_SUMMARY AUTO_FIX_DETAILS
unset MOLE_SECURITY_FIXES_SHOWN
unset MOLE_SECURITY_FIXES_SKIPPED
echo ""
check_all_updates
echo ""
check_system_health
echo ""
check_all_security
if ask_for_security_fixes; then
perform_security_fixes
fi
if [[ "${MOLE_SECURITY_FIXES_SKIPPED:-}" != "true" ]]; then
echo ""
fi
check_all_config
echo ""
show_suggestions
if ask_for_updates; then
perform_updates
fi
if ask_for_auto_fix; then
perform_auto_fix
fi
}
show_optimization_summary() {
local safe_count="${OPTIMIZE_SAFE_COUNT:-0}"
local confirm_count="${OPTIMIZE_CONFIRM_COUNT:-0}"
if ((safe_count == 0 && confirm_count == 0)) && [[ -z "${AUTO_FIX_SUMMARY:-}" ]]; then
return
fi
local summary_title
local -a summary_details=()
local total_applied=$((safe_count + confirm_count))
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
summary_title="Dry Run Complete - No Changes Made"
summary_details+=("Would apply ${YELLOW}${total_applied:-0}${NC} optimizations")
summary_details+=("Run without ${YELLOW}--dry-run${NC} to apply these changes")
else
summary_title="Optimization and Check Complete"
# Build statistics summary
local -a stats=()
local cache_kb="${OPTIMIZE_CACHE_CLEANED_KB:-0}"
local db_count="${OPTIMIZE_DATABASES_COUNT:-0}"
local config_count="${OPTIMIZE_CONFIGS_REPAIRED:-0}"
if [[ "$cache_kb" =~ ^[0-9]+$ ]] && [[ "$cache_kb" -gt 0 ]]; then
local cache_human=$(bytes_to_human "$((cache_kb * 1024))")
stats+=("${cache_human} cache cleaned")
fi
if [[ "$db_count" =~ ^[0-9]+$ ]] && [[ "$db_count" -gt 0 ]]; then
stats+=("${db_count} databases optimized")
fi
if [[ "$config_count" =~ ^[0-9]+$ ]] && [[ "$config_count" -gt 0 ]]; then
stats+=("${config_count} configs repaired")
fi
# Build first summary line with most important stat only
local key_stat=""
if [[ "$cache_kb" =~ ^[0-9]+$ ]] && [[ "$cache_kb" -gt 0 ]]; then
local cache_human=$(bytes_to_human "$((cache_kb * 1024))")
key_stat="${cache_human} cache cleaned"
elif [[ "$db_count" =~ ^[0-9]+$ ]] && [[ "$db_count" -gt 0 ]]; then
key_stat="${db_count} databases optimized"
elif [[ "$config_count" =~ ^[0-9]+$ ]] && [[ "$config_count" -gt 0 ]]; then
key_stat="${config_count} configs repaired"
fi
if [[ -n "$key_stat" ]]; then
summary_details+=("Applied ${GREEN}${total_applied:-0}${NC} optimizations — ${key_stat}")
else
summary_details+=("Applied ${GREEN}${total_applied:-0}${NC} optimizations — all services tuned")
fi
local summary_line3=""
if [[ -n "${AUTO_FIX_SUMMARY:-}" ]]; then
summary_line3="${AUTO_FIX_SUMMARY}"
if [[ -n "${AUTO_FIX_DETAILS:-}" ]]; then
local detail_join
detail_join=$(echo "${AUTO_FIX_DETAILS}" | paste -sd ", " -)
[[ -n "$detail_join" ]] && summary_line3+="${detail_join}"
fi
summary_details+=("$summary_line3")
fi
summary_details+=("System fully optimized — faster, more secure and responsive")
fi
print_summary_block "$summary_title" "${summary_details[@]}"
}
show_system_health() {
local health_json="$1"
local mem_used=$(echo "$health_json" | jq -r '.memory_used_gb // 0' 2> /dev/null || echo "0")
local mem_total=$(echo "$health_json" | jq -r '.memory_total_gb // 0' 2> /dev/null || echo "0")
local disk_used=$(echo "$health_json" | jq -r '.disk_used_gb // 0' 2> /dev/null || echo "0")
local disk_total=$(echo "$health_json" | jq -r '.disk_total_gb // 0' 2> /dev/null || echo "0")
local disk_percent=$(echo "$health_json" | jq -r '.disk_used_percent // 0' 2> /dev/null || echo "0")
local uptime=$(echo "$health_json" | jq -r '.uptime_days // 0' 2> /dev/null || echo "0")
mem_used=${mem_used:-0}
mem_total=${mem_total:-0}
disk_used=${disk_used:-0}
disk_total=${disk_total:-0}
disk_percent=${disk_percent:-0}
uptime=${uptime:-0}
printf "${ICON_ADMIN} System %.0f/%.0f GB RAM | %.0f/%.0f GB Disk | Uptime %.0fd\n" \
"$mem_used" "$mem_total" "$disk_used" "$disk_total" "$uptime"
}
parse_optimizations() {
local health_json="$1"
echo "$health_json" | jq -c '.optimizations[]' 2> /dev/null
}
announce_action() {
local name="$1"
local desc="$2"
local kind="$3"
if [[ "${FIRST_ACTION:-true}" == "true" ]]; then
export FIRST_ACTION=false
else
echo ""
fi
echo -e "${BLUE}${ICON_ARROW} ${name}${NC}"
}
touchid_configured() {
local pam_file="/etc/pam.d/sudo"
[[ -f "$pam_file" ]] && grep -q "pam_tid.so" "$pam_file" 2> /dev/null
}
touchid_supported() {
if command -v bioutil > /dev/null 2>&1; then
if bioutil -r 2> /dev/null | grep -qi "Touch ID"; then
return 0
fi
fi
# Fallback: Apple Silicon Macs usually have Touch ID.
if [[ "$(uname -m)" == "arm64" ]]; then
return 0
fi
return 1
}
cleanup_path() {
local raw_path="$1"
local label="$2"
local expanded_path="${raw_path/#\~/$HOME}"
if [[ ! -e "$expanded_path" ]]; then
echo -e "${GREEN}${ICON_SUCCESS}${NC} $label"
return
fi
if should_protect_path "$expanded_path"; then
echo -e "${YELLOW}${ICON_WARNING}${NC} Protected $label"
return
fi
local size_kb
size_kb=$(get_path_size_kb "$expanded_path")
local size_display=""
if [[ "$size_kb" =~ ^[0-9]+$ && "$size_kb" -gt 0 ]]; then
size_display=$(bytes_to_human "$((size_kb * 1024))")
fi
local removed=false
if safe_remove "$expanded_path" true; then
removed=true
elif request_sudo_access "Removing $label requires admin access"; then
if safe_sudo_remove "$expanded_path"; then
removed=true
fi
fi
if [[ "$removed" == "true" ]]; then
if [[ -n "$size_display" ]]; then
echo -e "${GREEN}${ICON_SUCCESS}${NC} $label ${GREEN}(${size_display})${NC}"
else
echo -e "${GREEN}${ICON_SUCCESS}${NC} $label"
fi
else
echo -e "${YELLOW}${ICON_WARNING}${NC} Skipped $label ${GRAY}(grant Full Disk Access to your terminal and retry)${NC}"
fi
}
ensure_directory() {
local raw_path="$1"
local expanded_path="${raw_path/#\~/$HOME}"
ensure_user_dir "$expanded_path"
}
declare -a SECURITY_FIXES=()
collect_security_fix_actions() {
SECURITY_FIXES=()
if [[ "${FIREWALL_DISABLED:-}" == "true" ]]; then
if ! is_whitelisted "firewall"; then
SECURITY_FIXES+=("firewall|Enable macOS firewall")
fi
fi
if [[ "${GATEKEEPER_DISABLED:-}" == "true" ]]; then
if ! is_whitelisted "gatekeeper"; then
SECURITY_FIXES+=("gatekeeper|Enable Gatekeeper (App download protection)")
fi
fi
if touchid_supported && ! touchid_configured; then
if ! is_whitelisted "check_touchid"; then
SECURITY_FIXES+=("touchid|Enable Touch ID for sudo")
fi
fi
((${#SECURITY_FIXES[@]} > 0))
}
ask_for_security_fixes() {
if ! collect_security_fix_actions; then
return 1
fi
echo ""
echo -e "${BLUE}SECURITY FIXES${NC}"
for entry in "${SECURITY_FIXES[@]}"; do
IFS='|' read -r _ label <<< "$entry"
echo -e " ${ICON_LIST} $label"
done
echo ""
export MOLE_SECURITY_FIXES_SHOWN=true
echo -ne "${YELLOW}Apply now?${NC} ${GRAY}Enter confirm / Space cancel${NC}: "
local key
if ! key=$(read_key); then
export MOLE_SECURITY_FIXES_SKIPPED=true
echo -e "\n ${GRAY}${ICON_WARNING}${NC} Security fixes skipped"
echo ""
return 1
fi
if [[ "$key" == "ENTER" ]]; then
echo ""
return 0
else
export MOLE_SECURITY_FIXES_SKIPPED=true
echo -e "\n ${GRAY}${ICON_WARNING}${NC} Security fixes skipped"
echo ""
return 1
fi
}
apply_firewall_fix() {
if sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate on > /dev/null 2>&1; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Firewall enabled"
FIREWALL_DISABLED=false
return 0
fi
echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to enable firewall (check permissions)"
return 1
}
apply_gatekeeper_fix() {
if sudo spctl --master-enable 2> /dev/null; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Gatekeeper enabled"
GATEKEEPER_DISABLED=false
return 0
fi
echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to enable Gatekeeper"
return 1
}
apply_touchid_fix() {
if "$SCRIPT_DIR/bin/touchid.sh" enable; then
return 0
fi
return 1
}
perform_security_fixes() {
if ! ensure_sudo_session "Security changes require admin access"; then
echo -e "${YELLOW}${ICON_WARNING}${NC} Skipped security fixes (sudo denied)"
return 1
fi
local applied=0
for entry in "${SECURITY_FIXES[@]}"; do
IFS='|' read -r action _ <<< "$entry"
case "$action" in
firewall)
apply_firewall_fix && ((applied++))
;;
gatekeeper)
apply_gatekeeper_fix && ((applied++))
;;
touchid)
apply_touchid_fix && ((applied++))
;;
esac
done
if ((applied > 0)); then
log_success "Security settings updated"
fi
SECURITY_FIXES=()
}
cleanup_all() {
stop_inline_spinner 2> /dev/null || true
stop_sudo_session
cleanup_temp_files
}
handle_interrupt() {
cleanup_all
exit 130
}
main() {
local health_json
for arg in "$@"; do
case "$arg" in
"--debug")
export MO_DEBUG=1
;;
"--dry-run")
export MOLE_DRY_RUN=1
;;
"--whitelist")
manage_whitelist "optimize"
exit 0
;;
esac
done
trap cleanup_all EXIT
trap handle_interrupt INT TERM
if [[ -t 1 ]]; then
clear
fi
print_header
# Dry-run indicator.
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC} - No files will be modified\n"
fi
if ! command -v jq > /dev/null 2>&1; then
echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: jq"
echo -e "${GRAY}Install with: ${GREEN}brew install jq${NC}"
exit 1
fi
if ! command -v bc > /dev/null 2>&1; then
echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: bc"
echo -e "${GRAY}Install with: ${GREEN}brew install bc${NC}"
exit 1
fi
if [[ -t 1 ]]; then
start_inline_spinner "Collecting system info..."
fi
if ! health_json=$(generate_health_json 2> /dev/null); then
if [[ -t 1 ]]; then
stop_inline_spinner
fi
echo ""
log_error "Failed to collect system health data"
exit 1
fi
if ! echo "$health_json" | jq empty 2> /dev/null; then
if [[ -t 1 ]]; then
stop_inline_spinner
fi
echo ""
log_error "Invalid system health data format"
echo -e "${YELLOW}Tip:${NC} Check if jq, awk, sysctl, and df commands are available"
exit 1
fi
if [[ -t 1 ]]; then
stop_inline_spinner
fi
show_system_health "$health_json"
load_whitelist "optimize"
if [[ ${#CURRENT_WHITELIST_PATTERNS[@]} -gt 0 ]]; then
local count=${#CURRENT_WHITELIST_PATTERNS[@]}
if [[ $count -le 3 ]]; then
local patterns_list=$(
IFS=', '
echo "${CURRENT_WHITELIST_PATTERNS[*]}"
)
echo -e "${ICON_ADMIN} Active Whitelist: ${patterns_list}"
fi
fi
local -a safe_items=()
local -a confirm_items=()
local opts_file
opts_file=$(mktemp_file)
parse_optimizations "$health_json" > "$opts_file"
while IFS= read -r opt_json; do
[[ -z "$opt_json" ]] && continue
local name=$(echo "$opt_json" | jq -r '.name')
local desc=$(echo "$opt_json" | jq -r '.description')
local action=$(echo "$opt_json" | jq -r '.action')
local path=$(echo "$opt_json" | jq -r '.path // ""')
local safe=$(echo "$opt_json" | jq -r '.safe')
local item="${name}|${desc}|${action}|${path}"
if [[ "$safe" == "true" ]]; then
safe_items+=("$item")
else
confirm_items+=("$item")
fi
done < "$opts_file"
echo ""
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
ensure_sudo_session "System optimization requires admin access" || true
fi
export FIRST_ACTION=true
if [[ ${#safe_items[@]} -gt 0 ]]; then
for item in "${safe_items[@]}"; do
IFS='|' read -r name desc action path <<< "$item"
announce_action "$name" "$desc" "safe"
execute_optimization "$action" "$path"
done
fi
if [[ ${#confirm_items[@]} -gt 0 ]]; then
for item in "${confirm_items[@]}"; do
IFS='|' read -r name desc action path <<< "$item"
announce_action "$name" "$desc" "confirm"
execute_optimization "$action" "$path"
done
fi
local safe_count=${#safe_items[@]}
local confirm_count=${#confirm_items[@]}
run_system_checks
export OPTIMIZE_SAFE_COUNT=$safe_count
export OPTIMIZE_CONFIRM_COUNT=$confirm_count
show_optimization_summary
printf '\n'
}
main "$@"

View File

@@ -1,166 +0,0 @@
#!/bin/bash
# Mole - Purge command.
# Cleans heavy project build artifacts.
# Interactive selection by project.
set -euo pipefail
# Fix locale issues (avoid Perl warnings on non-English systems)
export LC_ALL=C
export LANG=C
# Get script directory and source common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/../lib/core/common.sh"
# Set up cleanup trap for temporary files
trap cleanup_temp_files EXIT INT TERM
source "$SCRIPT_DIR/../lib/core/log.sh"
source "$SCRIPT_DIR/../lib/clean/project.sh"
# Configuration
CURRENT_SECTION=""
# Section management
start_section() {
local section_name="$1"
CURRENT_SECTION="$section_name"
printf '\n'
echo -e "${BLUE}━━━ ${section_name} ━━━${NC}"
}
end_section() {
CURRENT_SECTION=""
}
# Note activity for export list
note_activity() {
if [[ -n "$CURRENT_SECTION" ]]; then
printf '%s\n' "$CURRENT_SECTION" >> "$EXPORT_LIST_FILE"
fi
}
# Main purge function
start_purge() {
# Clear screen for better UX
if [[ -t 1 ]]; then
printf '\033[2J\033[H'
fi
printf '\n'
echo -e "${PURPLE_BOLD}Purge Project Artifacts${NC}"
# Initialize stats file in user cache directory
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
ensure_user_dir "$stats_dir"
ensure_user_file "$stats_dir/purge_stats"
ensure_user_file "$stats_dir/purge_count"
echo "0" > "$stats_dir/purge_stats"
echo "0" > "$stats_dir/purge_count"
}
# Perform the purge
perform_purge() {
clean_project_artifacts
local exit_code=$?
# Exit codes:
# 0 = success, show summary
# 1 = user cancelled
# 2 = nothing to clean
if [[ $exit_code -ne 0 ]]; then
return 0
fi
# Final summary (matching clean.sh format)
echo ""
local summary_heading="Purge complete"
local -a summary_details=()
local total_size_cleaned=0
local total_items_cleaned=0
# Read stats from user cache directory
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
if [[ -f "$stats_dir/purge_stats" ]]; then
total_size_cleaned=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0")
rm -f "$stats_dir/purge_stats"
fi
# Read count
if [[ -f "$stats_dir/purge_count" ]]; then
total_items_cleaned=$(cat "$stats_dir/purge_count" 2> /dev/null || echo "0")
rm -f "$stats_dir/purge_count"
fi
if [[ $total_size_cleaned -gt 0 ]]; then
local freed_gb
freed_gb=$(echo "$total_size_cleaned" | awk '{printf "%.2f", $1/1024/1024}')
summary_details+=("Space freed: ${GREEN}${freed_gb}GB${NC}")
summary_details+=("Free space now: $(get_free_space)")
if [[ $total_items_cleaned -gt 0 ]]; then
summary_details+=("Items cleaned: $total_items_cleaned")
fi
else
summary_details+=("No old project artifacts to clean.")
summary_details+=("Free space now: $(get_free_space)")
fi
print_summary_block "$summary_heading" "${summary_details[@]}"
printf '\n'
}
# Show help message
show_help() {
echo -e "${PURPLE_BOLD}Mole Purge${NC} - Clean old project build artifacts"
echo ""
echo -e "${YELLOW}Usage:${NC} mo purge [options]"
echo ""
echo -e "${YELLOW}Options:${NC}"
echo " --paths Edit custom scan directories"
echo " --debug Enable debug logging"
echo " --help Show this help message"
echo ""
echo -e "${YELLOW}Default Paths:${NC}"
for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do
echo " - $path"
done
}
# Main entry point
main() {
# Set up signal handling
trap 'show_cursor; exit 130' INT TERM
# Parse arguments
for arg in "$@"; do
case "$arg" in
"--paths")
source "$SCRIPT_DIR/../lib/manage/purge_paths.sh"
manage_purge_paths
exit 0
;;
"--help")
show_help
exit 0
;;
"--debug")
export MO_DEBUG=1
;;
*)
echo "Unknown option: $arg"
echo "Use 'mo purge --help' for usage information"
exit 1
;;
esac
done
start_purge
hide_cursor
perform_purge
show_cursor
}
main "$@"

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# Mole - Status command.
# Runs the Go system status panel.
# Shows live system metrics.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
GO_BIN="$SCRIPT_DIR/status-go"
if [[ -x "$GO_BIN" ]]; then
exec "$GO_BIN" "$@"
fi
echo "Bundled status binary not found. Please reinstall Mole or run mo update to restore it." >&2
exit 1

View File

@@ -1,325 +0,0 @@
#!/bin/bash
# Mole - Touch ID command.
# Configures sudo with Touch ID.
# Guided toggle with safety checks.
set -euo pipefail
# Determine script location and source common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
LIB_DIR="$(cd "$SCRIPT_DIR/../lib" && pwd)"
# Source common functions
# shellcheck source=../lib/core/common.sh
source "$LIB_DIR/core/common.sh"
readonly PAM_SUDO_FILE="${MOLE_PAM_SUDO_FILE:-/etc/pam.d/sudo}"
readonly PAM_SUDO_LOCAL_FILE="${MOLE_PAM_SUDO_LOCAL_FILE:-/etc/pam.d/sudo_local}"
readonly PAM_TID_LINE="auth sufficient pam_tid.so"
# Check if Touch ID is already configured
is_touchid_configured() {
# Check sudo_local first
if [[ -f "$PAM_SUDO_LOCAL_FILE" ]]; then
grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE" 2> /dev/null && return 0
fi
# Fallback to standard sudo file
if [[ ! -f "$PAM_SUDO_FILE" ]]; then
return 1
fi
grep -q "pam_tid.so" "$PAM_SUDO_FILE" 2> /dev/null
}
# Check if system supports Touch ID
supports_touchid() {
# Check if bioutil exists and has Touch ID capability
if command -v bioutil &> /dev/null; then
bioutil -r 2> /dev/null | grep -q "Touch ID" && return 0
fi
# Fallback: check if running on Apple Silicon or modern Intel Mac
local arch
arch=$(uname -m)
if [[ "$arch" == "arm64" ]]; then
return 0
fi
# For Intel Macs, check if it's 2018 or later (approximation)
local model_year
model_year=$(system_profiler SPHardwareDataType 2> /dev/null | grep "Model Identifier" | grep -o "[0-9]\{4\}" | head -1)
if [[ -n "$model_year" ]] && [[ "$model_year" -ge 2018 ]]; then
return 0
fi
return 1
}
# Show current Touch ID status
show_status() {
if is_touchid_configured; then
echo -e "${GREEN}${ICON_SUCCESS}${NC} Touch ID is enabled for sudo"
else
echo -e "${YELLOW}${NC} Touch ID is not configured for sudo"
fi
}
# Enable Touch ID for sudo
enable_touchid() {
# Cleanup trap
local temp_file=""
trap '[[ -n "${temp_file:-}" ]] && rm -f "${temp_file:-}"' EXIT
# First check if system supports Touch ID
if ! supports_touchid; then
log_warning "This Mac may not support Touch ID"
read -rp "Continue anyway? [y/N] " confirm
if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
echo -e "${YELLOW}Cancelled${NC}"
return 1
fi
echo ""
fi
# Check if we should use sudo_local (Sonoma+)
if grep -q "sudo_local" "$PAM_SUDO_FILE"; then
# Check if already correctly configured in sudo_local
if [[ -f "$PAM_SUDO_LOCAL_FILE" ]] && grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then
# It is in sudo_local, but let's check if it's ALSO in sudo (incomplete migration)
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
# Clean up legacy config
temp_file=$(mktemp)
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
echo -e "${GREEN}${ICON_SUCCESS} Cleanup legacy configuration${NC}"
fi
fi
echo -e "${GREEN}${ICON_SUCCESS} Touch ID is already enabled${NC}"
return 0
fi
# Not configured in sudo_local yet.
# Check if configured in sudo (Legacy)
local is_legacy_configured=false
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
is_legacy_configured=true
fi
# Function to write to sudo_local
local write_success=false
if [[ ! -f "$PAM_SUDO_LOCAL_FILE" ]]; then
# Create the file
echo "# sudo_local: local customizations for sudo" | sudo tee "$PAM_SUDO_LOCAL_FILE" > /dev/null
echo "$PAM_TID_LINE" | sudo tee -a "$PAM_SUDO_LOCAL_FILE" > /dev/null
sudo chmod 444 "$PAM_SUDO_LOCAL_FILE"
sudo chown root:wheel "$PAM_SUDO_LOCAL_FILE"
write_success=true
else
# Append if not present
if ! grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then
temp_file=$(mktemp)
cp "$PAM_SUDO_LOCAL_FILE" "$temp_file"
echo "$PAM_TID_LINE" >> "$temp_file"
sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE"
sudo chmod 444 "$PAM_SUDO_LOCAL_FILE"
sudo chown root:wheel "$PAM_SUDO_LOCAL_FILE"
write_success=true
else
write_success=true # Already there (should be caught by first check, but safe fallback)
fi
fi
if $write_success; then
# If we migrated from legacy, clean it up now
if $is_legacy_configured; then
temp_file=$(mktemp)
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
sudo mv "$temp_file" "$PAM_SUDO_FILE"
log_success "Touch ID migrated to sudo_local"
else
log_success "Touch ID enabled (via sudo_local) - try: sudo ls"
fi
return 0
else
log_error "Failed to write to sudo_local"
return 1
fi
fi
# Legacy method: Modify sudo file directly
# Check if already configured (Legacy)
if is_touchid_configured; then
echo -e "${GREEN}${ICON_SUCCESS} Touch ID is already enabled${NC}"
return 0
fi
# Create backup only if it doesn't exist to preserve original state
if [[ ! -f "${PAM_SUDO_FILE}.mole-backup" ]]; then
if ! sudo cp "$PAM_SUDO_FILE" "${PAM_SUDO_FILE}.mole-backup" 2> /dev/null; then
log_error "Failed to create backup"
return 1
fi
fi
# Create temp file
temp_file=$(mktemp)
# Insert pam_tid.so after the first comment block
awk '
BEGIN { inserted = 0 }
/^#/ { print; next }
!inserted && /^[^#]/ {
print "'"$PAM_TID_LINE"'"
inserted = 1
}
{ print }
' "$PAM_SUDO_FILE" > "$temp_file"
# Verify content change
if cmp -s "$PAM_SUDO_FILE" "$temp_file"; then
log_error "Failed to modify configuration"
return 1
fi
# Apply the changes
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
log_success "Touch ID enabled - try: sudo ls"
return 0
else
log_error "Failed to enable Touch ID"
return 1
fi
}
# Disable Touch ID for sudo
disable_touchid() {
# Cleanup trap
local temp_file=""
trap '[[ -n "${temp_file:-}" ]] && rm -f "${temp_file:-}"' EXIT
if ! is_touchid_configured; then
echo -e "${YELLOW}Touch ID is not currently enabled${NC}"
return 0
fi
# Check sudo_local first
if [[ -f "$PAM_SUDO_LOCAL_FILE" ]] && grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then
# Remove from sudo_local
temp_file=$(mktemp)
grep -v "pam_tid.so" "$PAM_SUDO_LOCAL_FILE" > "$temp_file"
if sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE" 2> /dev/null; then
# Since we modified sudo_local, we should also check if it's in sudo file (legacy cleanup)
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
temp_file=$(mktemp)
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
sudo mv "$temp_file" "$PAM_SUDO_FILE"
fi
echo -e "${GREEN}${ICON_SUCCESS} Touch ID disabled (removed from sudo_local)${NC}"
echo ""
return 0
else
log_error "Failed to disable Touch ID from sudo_local"
return 1
fi
fi
# Fallback to sudo file (legacy)
if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then
# Create backup only if it doesn't exist
if [[ ! -f "${PAM_SUDO_FILE}.mole-backup" ]]; then
if ! sudo cp "$PAM_SUDO_FILE" "${PAM_SUDO_FILE}.mole-backup" 2> /dev/null; then
log_error "Failed to create backup"
return 1
fi
fi
# Remove pam_tid.so line
temp_file=$(mktemp)
grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file"
if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then
echo -e "${GREEN}${ICON_SUCCESS} Touch ID disabled${NC}"
echo ""
return 0
else
log_error "Failed to disable Touch ID"
return 1
fi
fi
# Should not reach here if is_touchid_configured was true
log_error "Could not find Touch ID configuration to disable"
return 1
}
# Interactive menu
show_menu() {
echo ""
show_status
if is_touchid_configured; then
echo -ne "${PURPLE}${NC} Press ${GREEN}Enter${NC} to disable, ${GRAY}Q${NC} to quit: "
IFS= read -r -s -n1 key || key=""
drain_pending_input # Clean up any escape sequence remnants
echo ""
case "$key" in
$'\e') # ESC
return 0
;;
"" | $'\n' | $'\r') # Enter
printf "\r\033[K" # Clear the prompt line
disable_touchid
;;
*)
echo ""
log_error "Invalid key"
;;
esac
else
echo -ne "${PURPLE}${NC} Press ${GREEN}Enter${NC} to enable, ${GRAY}Q${NC} to quit: "
IFS= read -r -s -n1 key || key=""
drain_pending_input # Clean up any escape sequence remnants
case "$key" in
$'\e') # ESC
return 0
;;
"" | $'\n' | $'\r') # Enter
printf "\r\033[K" # Clear the prompt line
enable_touchid
;;
*)
echo ""
log_error "Invalid key"
;;
esac
fi
}
# Main
main() {
local command="${1:-}"
case "$command" in
enable)
enable_touchid
;;
disable)
disable_touchid
;;
status)
show_status
;;
"")
show_menu
;;
*)
log_error "Unknown command: $command"
exit 1
;;
esac
}
main "$@"

View File

@@ -1,586 +0,0 @@
#!/bin/bash
# Mole - Uninstall command.
# Interactive app uninstaller.
# Removes app files and leftovers.
set -euo pipefail
# Fix locale issues on non-English systems.
export LC_ALL=C
export LANG=C
# Load shared helpers.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/../lib/core/common.sh"
# Clean temp files on exit.
trap cleanup_temp_files EXIT INT TERM
source "$SCRIPT_DIR/../lib/ui/menu_paginated.sh"
source "$SCRIPT_DIR/../lib/ui/app_selector.sh"
source "$SCRIPT_DIR/../lib/uninstall/batch.sh"
# State
selected_apps=()
declare -a apps_data=()
declare -a selection_state=()
total_items=0
files_cleaned=0
total_size_cleaned=0
# Scan applications and collect information.
scan_applications() {
# Cache app scan (24h TTL).
local cache_dir="$HOME/.cache/mole"
local cache_file="$cache_dir/app_scan_cache"
local cache_ttl=86400 # 24 hours
local force_rescan="${1:-false}"
ensure_user_dir "$cache_dir"
if [[ $force_rescan == false && -f "$cache_file" ]]; then
local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file")))
[[ $cache_age -eq $(get_epoch_seconds) ]] && cache_age=86401 # Handle mtime read failure
if [[ $cache_age -lt $cache_ttl ]]; then
if [[ -t 2 ]]; then
echo -e "${GREEN}Loading from cache...${NC}" >&2
sleep 0.3 # Brief pause so user sees the message
fi
echo "$cache_file"
return 0
fi
fi
local inline_loading=false
if [[ -t 1 && -t 2 ]]; then
inline_loading=true
printf "\033[2J\033[H" >&2 # Clear screen for inline loading
fi
local temp_file
temp_file=$(create_temp_file)
# Local spinner_pid for cleanup
local spinner_pid=""
# Trap to handle Ctrl+C during scan
local scan_interrupted=false
# shellcheck disable=SC2329 # Function invoked indirectly via trap
trap_scan_cleanup() {
scan_interrupted=true
if [[ -n "$spinner_pid" ]]; then
kill -TERM "$spinner_pid" 2> /dev/null || true
wait "$spinner_pid" 2> /dev/null || true
fi
printf "\r\033[K" >&2
rm -f "$temp_file" "${temp_file}.sorted" "${temp_file}.progress" 2> /dev/null || true
exit 130
}
trap trap_scan_cleanup INT
local current_epoch
current_epoch=$(get_epoch_seconds)
# Pass 1: collect app paths and bundle IDs (no mdls).
local -a app_data_tuples=()
local -a app_dirs=(
"/Applications"
"$HOME/Applications"
"/Library/Input Methods"
"$HOME/Library/Input Methods"
)
local vol_app_dir
local nullglob_was_set=0
shopt -q nullglob && nullglob_was_set=1
shopt -s nullglob
for vol_app_dir in /Volumes/*/Applications; do
[[ -d "$vol_app_dir" && -r "$vol_app_dir" ]] || continue
if [[ -d "/Applications" && "$vol_app_dir" -ef "/Applications" ]]; then
continue
fi
if [[ -d "$HOME/Applications" && "$vol_app_dir" -ef "$HOME/Applications" ]]; then
continue
fi
app_dirs+=("$vol_app_dir")
done
if [[ $nullglob_was_set -eq 0 ]]; then
shopt -u nullglob
fi
for app_dir in "${app_dirs[@]}"; do
if [[ ! -d "$app_dir" ]]; then continue; fi
while IFS= read -r -d '' app_path; do
if [[ ! -e "$app_path" ]]; then continue; fi
local app_name
app_name=$(basename "$app_path" .app)
# Skip nested apps inside another .app bundle.
local parent_dir
parent_dir=$(dirname "$app_path")
if [[ "$parent_dir" == *".app" || "$parent_dir" == *".app/"* ]]; then
continue
fi
# Bundle ID from plist (fast path).
local bundle_id="unknown"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown")
fi
if should_protect_from_uninstall "$bundle_id"; then
continue
fi
# Store tuple for pass 2 (metadata + size).
app_data_tuples+=("${app_path}|${app_name}|${bundle_id}")
done < <(command find "$app_dir" -name "*.app" -maxdepth 3 -print0 2> /dev/null)
done
# Pass 2: metadata + size in parallel (mdls is slow).
local app_count=0
local total_apps=${#app_data_tuples[@]}
local max_parallel
max_parallel=$(get_optimal_parallel_jobs "io")
if [[ $max_parallel -lt 8 ]]; then
max_parallel=8 # At least 8 for good performance
elif [[ $max_parallel -gt 32 ]]; then
max_parallel=32 # Cap at 32 to avoid too many processes
fi
local pids=()
process_app_metadata() {
local app_data_tuple="$1"
local output_file="$2"
local current_epoch="$3"
IFS='|' read -r app_path app_name bundle_id <<< "$app_data_tuple"
# Display name priority: mdls display name → bundle display → bundle name → folder.
local display_name="$app_name"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
local md_display_name
md_display_name=$(run_with_timeout 0.05 mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "")
local bundle_display_name
bundle_display_name=$(plutil -extract CFBundleDisplayName raw "$app_path/Contents/Info.plist" 2> /dev/null)
local bundle_name
bundle_name=$(plutil -extract CFBundleName raw "$app_path/Contents/Info.plist" 2> /dev/null)
if [[ "$md_display_name" == /* ]]; then md_display_name=""; fi
md_display_name="${md_display_name//|/-}"
md_display_name="${md_display_name//[$'\t\r\n']/}"
bundle_display_name="${bundle_display_name//|/-}"
bundle_display_name="${bundle_display_name//[$'\t\r\n']/}"
bundle_name="${bundle_name//|/-}"
bundle_name="${bundle_name//[$'\t\r\n']/}"
if [[ -n "$md_display_name" && "$md_display_name" != "(null)" && "$md_display_name" != "$app_name" ]]; then
display_name="$md_display_name"
elif [[ -n "$bundle_display_name" && "$bundle_display_name" != "(null)" ]]; then
display_name="$bundle_display_name"
elif [[ -n "$bundle_name" && "$bundle_name" != "(null)" ]]; then
display_name="$bundle_name"
fi
fi
if [[ "$display_name" == /* ]]; then
display_name="$app_name"
fi
display_name="${display_name//|/-}"
display_name="${display_name//[$'\t\r\n']/}"
# App size (KB → human).
local app_size="N/A"
local app_size_kb="0"
if [[ -d "$app_path" ]]; then
app_size_kb=$(get_path_size_kb "$app_path")
app_size=$(bytes_to_human "$((app_size_kb * 1024))")
fi
# Last used: mdls (fast timeout) → mtime.
local last_used="Never"
local last_used_epoch=0
if [[ -d "$app_path" ]]; then
local metadata_date
metadata_date=$(run_with_timeout 0.1 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then
last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0")
fi
if [[ "$last_used_epoch" -eq 0 ]]; then
last_used_epoch=$(get_file_mtime "$app_path")
fi
if [[ $last_used_epoch -gt 0 ]]; then
local days_ago=$(((current_epoch - last_used_epoch) / 86400))
if [[ $days_ago -eq 0 ]]; then
last_used="Today"
elif [[ $days_ago -eq 1 ]]; then
last_used="Yesterday"
elif [[ $days_ago -lt 7 ]]; then
last_used="${days_ago} days ago"
elif [[ $days_ago -lt 30 ]]; then
local weeks_ago=$((days_ago / 7))
[[ $weeks_ago -eq 1 ]] && last_used="1 week ago" || last_used="${weeks_ago} weeks ago"
elif [[ $days_ago -lt 365 ]]; then
local months_ago=$((days_ago / 30))
[[ $months_ago -eq 1 ]] && last_used="1 month ago" || last_used="${months_ago} months ago"
else
local years_ago=$((days_ago / 365))
[[ $years_ago -eq 1 ]] && last_used="1 year ago" || last_used="${years_ago} years ago"
fi
fi
fi
echo "${last_used_epoch}|${app_path}|${display_name}|${bundle_id}|${app_size}|${last_used}|${app_size_kb}" >> "$output_file"
}
export -f process_app_metadata
local progress_file="${temp_file}.progress"
echo "0" > "$progress_file"
(
# shellcheck disable=SC2329 # Function invoked indirectly via trap
cleanup_spinner() { exit 0; }
trap cleanup_spinner TERM INT EXIT
local spinner_chars="|/-\\"
local i=0
while true; do
local completed=$(cat "$progress_file" 2> /dev/null || echo 0)
local c="${spinner_chars:$((i % 4)):1}"
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K%s Scanning applications... %d/%d\n" "$c" "$completed" "$total_apps" >&2
else
printf "\r\033[K%s Scanning applications... %d/%d" "$c" "$completed" "$total_apps" >&2
fi
((i++))
sleep 0.1 2> /dev/null || sleep 1
done
) &
spinner_pid=$!
for app_data_tuple in "${app_data_tuples[@]}"; do
((app_count++))
process_app_metadata "$app_data_tuple" "$temp_file" "$current_epoch" &
pids+=($!)
echo "$app_count" > "$progress_file"
if ((${#pids[@]} >= max_parallel)); then
wait "${pids[0]}" 2> /dev/null
pids=("${pids[@]:1}")
fi
done
for pid in "${pids[@]}"; do
wait "$pid" 2> /dev/null
done
if [[ -n "$spinner_pid" ]]; then
kill -TERM "$spinner_pid" 2> /dev/null || true
wait "$spinner_pid" 2> /dev/null || true
fi
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K" >&2
else
echo -ne "\r\033[K" >&2
fi
rm -f "$progress_file"
if [[ ! -s "$temp_file" ]]; then
echo "No applications found to uninstall" >&2
rm -f "$temp_file"
return 1
fi
if [[ $total_apps -gt 50 ]]; then
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2KProcessing %d applications...\n" "$total_apps" >&2
else
printf "\rProcessing %d applications... " "$total_apps" >&2
fi
fi
sort -t'|' -k1,1n "$temp_file" > "${temp_file}.sorted" || {
rm -f "$temp_file"
return 1
}
rm -f "$temp_file"
if [[ $total_apps -gt 50 ]]; then
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K" >&2
else
printf "\r\033[K" >&2
fi
fi
ensure_user_file "$cache_file"
cp "${temp_file}.sorted" "$cache_file" 2> /dev/null || true
if [[ -f "${temp_file}.sorted" ]]; then
echo "${temp_file}.sorted"
else
return 1
fi
}
load_applications() {
local apps_file="$1"
if [[ ! -f "$apps_file" || ! -s "$apps_file" ]]; then
log_warning "No applications found for uninstallation"
return 1
fi
apps_data=()
selection_state=()
while IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb; do
[[ ! -e "$app_path" ]] && continue
apps_data+=("$epoch|$app_path|$app_name|$bundle_id|$size|$last_used|${size_kb:-0}")
selection_state+=(false)
done < "$apps_file"
if [[ ${#apps_data[@]} -eq 0 ]]; then
log_warning "No applications available for uninstallation"
return 1
fi
return 0
}
# Cleanup: restore cursor and kill keepalive.
cleanup() {
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
fi
if [[ -n "${sudo_keepalive_pid:-}" ]]; then
kill "$sudo_keepalive_pid" 2> /dev/null || true
wait "$sudo_keepalive_pid" 2> /dev/null || true
sudo_keepalive_pid=""
fi
show_cursor
exit "${1:-0}"
}
trap cleanup EXIT INT TERM
main() {
local force_rescan=false
# Global flags
for arg in "$@"; do
case "$arg" in
"--debug")
export MO_DEBUG=1
;;
esac
done
local use_inline_loading=false
if [[ -t 1 && -t 2 ]]; then
use_inline_loading=true
fi
hide_cursor
while true; do
local needs_scanning=true
local cache_file="$HOME/.cache/mole/app_scan_cache"
if [[ $force_rescan == false && -f "$cache_file" ]]; then
local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file")))
[[ $cache_age -eq $(get_epoch_seconds) ]] && cache_age=86401
[[ $cache_age -lt 86400 ]] && needs_scanning=false
fi
if [[ $needs_scanning == true && $use_inline_loading == true ]]; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" != "1" ]]; then
enter_alt_screen
export MOLE_ALT_SCREEN_ACTIVE=1
export MOLE_INLINE_LOADING=1
export MOLE_MANAGED_ALT_SCREEN=1
fi
printf "\033[2J\033[H" >&2
else
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN MOLE_ALT_SCREEN_ACTIVE
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
fi
fi
local apps_file=""
if ! apps_file=$(scan_applications "$force_rescan"); then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
printf "\033[2J\033[H" >&2
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
return 1
fi
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
printf "\033[2J\033[H" >&2
fi
if [[ ! -f "$apps_file" ]]; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
return 1
fi
if ! load_applications "$apps_file"; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
rm -f "$apps_file"
return 1
fi
set +e
select_apps_for_uninstall
local exit_code=$?
set -e
if [[ $exit_code -ne 0 ]]; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
show_cursor
clear_screen
printf '\033[2J\033[H' >&2
rm -f "$apps_file"
if [[ $exit_code -eq 10 ]]; then
force_rescan=true
continue
fi
return 0
fi
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
show_cursor
clear_screen
printf '\033[2J\033[H' >&2
local selection_count=${#selected_apps[@]}
if [[ $selection_count -eq 0 ]]; then
echo "No apps selected"
rm -f "$apps_file"
continue
fi
echo -e "${BLUE}${ICON_CONFIRM}${NC} Selected ${selection_count} app(s):"
local -a summary_rows=()
local max_name_display_width=0
local max_size_width=0
local max_last_width=0
for selected_app in "${selected_apps[@]}"; do
IFS='|' read -r _ _ app_name _ size last_used _ <<< "$selected_app"
local name_width=$(get_display_width "$app_name")
[[ $name_width -gt $max_name_display_width ]] && max_name_display_width=$name_width
local size_display="$size"
[[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]] && size_display="Unknown"
[[ ${#size_display} -gt $max_size_width ]] && max_size_width=${#size_display}
local last_display=$(format_last_used_summary "$last_used")
[[ ${#last_display} -gt $max_last_width ]] && max_last_width=${#last_display}
done
((max_size_width < 5)) && max_size_width=5
((max_last_width < 5)) && max_last_width=5
local term_width=$(tput cols 2> /dev/null || echo 100)
local available_for_name=$((term_width - 17 - max_size_width - max_last_width))
local min_name_width=24
if [[ $term_width -ge 120 ]]; then
min_name_width=50
elif [[ $term_width -ge 100 ]]; then
min_name_width=42
elif [[ $term_width -ge 80 ]]; then
min_name_width=30
fi
local name_trunc_limit=$max_name_display_width
[[ $name_trunc_limit -lt $min_name_width ]] && name_trunc_limit=$min_name_width
[[ $name_trunc_limit -gt $available_for_name ]] && name_trunc_limit=$available_for_name
[[ $name_trunc_limit -gt 60 ]] && name_trunc_limit=60
max_name_display_width=0
for selected_app in "${selected_apps[@]}"; do
IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb <<< "$selected_app"
local display_name
display_name=$(truncate_by_display_width "$app_name" "$name_trunc_limit")
local current_width
current_width=$(get_display_width "$display_name")
[[ $current_width -gt $max_name_display_width ]] && max_name_display_width=$current_width
local size_display="$size"
if [[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]]; then
size_display="Unknown"
fi
local last_display
last_display=$(format_last_used_summary "$last_used")
summary_rows+=("$display_name|$size_display|$last_display")
done
((max_name_display_width < 16)) && max_name_display_width=16
local index=1
for row in "${summary_rows[@]}"; do
IFS='|' read -r name_cell size_cell last_cell <<< "$row"
local name_display_width
name_display_width=$(get_display_width "$name_cell")
local name_char_count=${#name_cell}
local padding_needed=$((max_name_display_width - name_display_width))
local printf_name_width=$((name_char_count + padding_needed))
printf "%d. %-*s %*s | Last: %s\n" "$index" "$printf_name_width" "$name_cell" "$max_size_width" "$size_cell" "$last_cell"
((index++))
done
batch_uninstall_applications
rm -f "$apps_file"
echo -e "${GRAY}Press Enter to return to application list, any other key to exit...${NC}"
local key
IFS= read -r -s -n1 key || key=""
drain_pending_input
if [[ -z "$key" ]]; then
:
else
show_cursor
return 0
fi
force_rescan=false
done
}
main "$@"

View File

@@ -1,666 +0,0 @@
#!/bin/bash
# Mole - Uninstall Module
# Interactive application uninstaller with keyboard navigation
#
# Usage:
# uninstall.sh # Launch interactive uninstaller
# uninstall.sh --force-rescan # Rescan apps and refresh cache
set -euo pipefail
# Fix locale issues (avoid Perl warnings on non-English systems)
export LC_ALL=C
export LANG=C
# Get script directory and source common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/../lib/core/common.sh"
source "$SCRIPT_DIR/../lib/ui/menu_paginated.sh"
source "$SCRIPT_DIR/../lib/ui/app_selector.sh"
source "$SCRIPT_DIR/../lib/uninstall/batch.sh"
# Note: Bundle preservation logic is now in lib/core/common.sh
# Initialize global variables
selected_apps=() # Global array for app selection
declare -a apps_data=()
declare -a selection_state=()
total_items=0
files_cleaned=0
total_size_cleaned=0
# Compact the "last used" descriptor for aligned summaries
format_last_used_summary() {
local value="$1"
case "$value" in
"" | "Unknown")
echo "Unknown"
return 0
;;
"Never" | "Recent" | "Today" | "Yesterday" | "This year" | "Old")
echo "$value"
return 0
;;
esac
if [[ $value =~ ^([0-9]+)[[:space:]]+days?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}d ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+weeks?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}w ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+months?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}m ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+month\(s\)\ ago$ ]]; then
echo "${BASH_REMATCH[1]}m ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+years?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}y ago"
return 0
fi
echo "$value"
}
# Scan applications and collect information
scan_applications() {
# Simplified cache: only check timestamp (24h TTL)
local cache_dir="$HOME/.cache/mole"
local cache_file="$cache_dir/app_scan_cache"
local cache_ttl=86400 # 24 hours
local force_rescan="${1:-false}"
ensure_user_dir "$cache_dir"
# Check if cache exists and is fresh
if [[ $force_rescan == false && -f "$cache_file" ]]; then
local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file")))
[[ $cache_age -eq $(get_epoch_seconds) ]] && cache_age=86401 # Handle missing file
if [[ $cache_age -lt $cache_ttl ]]; then
# Cache hit - return immediately
# Show brief flash of cache usage if in interactive mode
if [[ -t 2 ]]; then
echo -e "${GREEN}Loading from cache...${NC}" >&2
# Small sleep to let user see it (optional, but good for "feeling" the speed vs glitch)
sleep 0.3
fi
echo "$cache_file"
return 0
fi
fi
# Cache miss - prepare for scanning
local inline_loading=false
if [[ -t 1 && -t 2 ]]; then
inline_loading=true
# Clear screen for inline loading
printf "\033[2J\033[H" >&2
fi
local temp_file
temp_file=$(create_temp_file)
# Pre-cache current epoch to avoid repeated calls
local current_epoch
current_epoch=$(get_epoch_seconds)
# First pass: quickly collect all valid app paths and bundle IDs (NO mdls calls)
local -a app_data_tuples=()
local -a app_dirs=(
"/Applications"
"$HOME/Applications"
)
local vol_app_dir
local nullglob_was_set=0
shopt -q nullglob && nullglob_was_set=1
shopt -s nullglob
for vol_app_dir in /Volumes/*/Applications; do
[[ -d "$vol_app_dir" && -r "$vol_app_dir" ]] || continue
if [[ -d "/Applications" && "$vol_app_dir" -ef "/Applications" ]]; then
continue
fi
if [[ -d "$HOME/Applications" && "$vol_app_dir" -ef "$HOME/Applications" ]]; then
continue
fi
app_dirs+=("$vol_app_dir")
done
if [[ $nullglob_was_set -eq 0 ]]; then
shopt -u nullglob
fi
for app_dir in "${app_dirs[@]}"; do
if [[ ! -d "$app_dir" ]]; then continue; fi
while IFS= read -r -d '' app_path; do
if [[ ! -e "$app_path" ]]; then continue; fi
local app_name
app_name=$(basename "$app_path" .app)
# Skip nested apps (e.g. inside Wrapper/ or Frameworks/ of another app)
# Check if parent path component ends in .app (e.g. /Foo.app/Bar.app or /Foo.app/Contents/Bar.app)
# This prevents false positives like /Old.apps/Target.app
local parent_dir
parent_dir=$(dirname "$app_path")
if [[ "$parent_dir" == *".app" || "$parent_dir" == *".app/"* ]]; then
continue
fi
# Get bundle ID only (fast, no mdls calls in first pass)
local bundle_id="unknown"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown")
fi
# Skip system critical apps (input methods, system components)
if should_protect_from_uninstall "$bundle_id"; then
continue
fi
# Store tuple: app_path|app_name|bundle_id (display_name will be resolved in parallel later)
app_data_tuples+=("${app_path}|${app_name}|${bundle_id}")
done < <(command find "$app_dir" -name "*.app" -maxdepth 3 -print0 2> /dev/null)
done
# Second pass: process each app with parallel size calculation
local app_count=0
local total_apps=${#app_data_tuples[@]}
# Bound parallelism - for metadata queries, can go higher since it's mostly waiting
local max_parallel
max_parallel=$(get_optimal_parallel_jobs "io")
if [[ $max_parallel -lt 8 ]]; then
max_parallel=8
elif [[ $max_parallel -gt 32 ]]; then
max_parallel=32
fi
local pids=()
# inline_loading variable already set above (line ~92)
# Process app metadata extraction function
process_app_metadata() {
local app_data_tuple="$1"
local output_file="$2"
local current_epoch="$3"
IFS='|' read -r app_path app_name bundle_id <<< "$app_data_tuple"
# Get localized display name (moved from first pass for better performance)
local display_name="$app_name"
if [[ -f "$app_path/Contents/Info.plist" ]]; then
# Try to get localized name from system metadata (best for i18n)
local md_display_name
md_display_name=$(run_with_timeout 0.05 mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "")
# Get bundle names
local bundle_display_name
bundle_display_name=$(plutil -extract CFBundleDisplayName raw "$app_path/Contents/Info.plist" 2> /dev/null)
local bundle_name
bundle_name=$(plutil -extract CFBundleName raw "$app_path/Contents/Info.plist" 2> /dev/null)
# Priority order for name selection (prefer localized names):
# 1. System metadata display name (kMDItemDisplayName) - respects system language
# 2. CFBundleDisplayName - usually localized
# 3. CFBundleName - fallback
# 4. App folder name - last resort
if [[ -n "$md_display_name" && "$md_display_name" != "(null)" && "$md_display_name" != "$app_name" ]]; then
display_name="$md_display_name"
elif [[ -n "$bundle_display_name" && "$bundle_display_name" != "(null)" ]]; then
display_name="$bundle_display_name"
elif [[ -n "$bundle_name" && "$bundle_name" != "(null)" ]]; then
display_name="$bundle_name"
fi
fi
# Parallel size calculation
local app_size="N/A"
local app_size_kb="0"
if [[ -d "$app_path" ]]; then
# Get size in KB, then format for display
app_size_kb=$(get_path_size_kb "$app_path")
app_size=$(bytes_to_human "$((app_size_kb * 1024))")
fi
# Get last used date
local last_used="Never"
local last_used_epoch=0
if [[ -d "$app_path" ]]; then
# Try mdls first with short timeout (0.1s) for accuracy, fallback to mtime for speed
local metadata_date
metadata_date=$(run_with_timeout 0.1 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then
last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0")
fi
# Fallback if mdls failed or returned nothing
if [[ "$last_used_epoch" -eq 0 ]]; then
last_used_epoch=$(get_file_mtime "$app_path")
fi
if [[ $last_used_epoch -gt 0 ]]; then
local days_ago=$(((current_epoch - last_used_epoch) / 86400))
if [[ $days_ago -eq 0 ]]; then
last_used="Today"
elif [[ $days_ago -eq 1 ]]; then
last_used="Yesterday"
elif [[ $days_ago -lt 7 ]]; then
last_used="${days_ago} days ago"
elif [[ $days_ago -lt 30 ]]; then
local weeks_ago=$((days_ago / 7))
[[ $weeks_ago -eq 1 ]] && last_used="1 week ago" || last_used="${weeks_ago} weeks ago"
elif [[ $days_ago -lt 365 ]]; then
local months_ago=$((days_ago / 30))
[[ $months_ago -eq 1 ]] && last_used="1 month ago" || last_used="${months_ago} months ago"
else
local years_ago=$((days_ago / 365))
[[ $years_ago -eq 1 ]] && last_used="1 year ago" || last_used="${years_ago} years ago"
fi
fi
fi
# Write to output file atomically
# Fields: epoch|app_path|display_name|bundle_id|size_human|last_used|size_kb
echo "${last_used_epoch}|${app_path}|${display_name}|${bundle_id}|${app_size}|${last_used}|${app_size_kb}" >> "$output_file"
}
export -f process_app_metadata
# Create a temporary file to track progress
local progress_file="${temp_file}.progress"
echo "0" > "$progress_file"
# Start a background spinner that reads progress from file
local spinner_pid=""
(
# shellcheck disable=SC2329 # Function invoked indirectly via trap
cleanup_spinner() { exit 0; }
trap cleanup_spinner TERM INT EXIT
local spinner_chars="|/-\\"
local i=0
while true; do
local completed=$(cat "$progress_file" 2> /dev/null || echo 0)
local c="${spinner_chars:$((i % 4)):1}"
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K%s Scanning applications... %d/%d\n" "$c" "$completed" "$total_apps" >&2
else
printf "\r\033[K%s Scanning applications... %d/%d" "$c" "$completed" "$total_apps" >&2
fi
((i++))
sleep 0.1 2> /dev/null || sleep 1
done
) &
spinner_pid=$!
# Process apps in parallel batches
for app_data_tuple in "${app_data_tuples[@]}"; do
((app_count++))
# Launch background process
process_app_metadata "$app_data_tuple" "$temp_file" "$current_epoch" &
pids+=($!)
# Update progress to show scanning progress (use app_count as it increments smoothly)
echo "$app_count" > "$progress_file"
# Wait if we've hit max parallel limit
if ((${#pids[@]} >= max_parallel)); then
wait "${pids[0]}" 2> /dev/null
pids=("${pids[@]:1}") # Remove first pid
fi
done
# Wait for remaining background processes
for pid in "${pids[@]}"; do
wait "$pid" 2> /dev/null
done
# Stop the spinner and clear the line
if [[ -n "$spinner_pid" ]]; then
kill -TERM "$spinner_pid" 2> /dev/null || true
wait "$spinner_pid" 2> /dev/null || true
fi
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K" >&2
else
echo -ne "\r\033[K" >&2
fi
rm -f "$progress_file"
# Check if we found any applications
if [[ ! -s "$temp_file" ]]; then
echo "No applications found to uninstall" >&2
rm -f "$temp_file"
return 1
fi
# Sort by last used (oldest first) and cache the result
# Show brief processing message for large app lists
if [[ $total_apps -gt 50 ]]; then
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2KProcessing %d applications...\n" "$total_apps" >&2
else
printf "\rProcessing %d applications... " "$total_apps" >&2
fi
fi
sort -t'|' -k1,1n "$temp_file" > "${temp_file}.sorted" || {
rm -f "$temp_file"
return 1
}
rm -f "$temp_file"
# Clear processing message
if [[ $total_apps -gt 50 ]]; then
if [[ $inline_loading == true ]]; then
printf "\033[H\033[2K" >&2
else
printf "\r\033[K" >&2
fi
fi
# Save to cache (simplified - no metadata)
ensure_user_file "$cache_file"
cp "${temp_file}.sorted" "$cache_file" 2> /dev/null || true
# Return sorted file
if [[ -f "${temp_file}.sorted" ]]; then
echo "${temp_file}.sorted"
else
return 1
fi
}
load_applications() {
local apps_file="$1"
if [[ ! -f "$apps_file" || ! -s "$apps_file" ]]; then
log_warning "No applications found for uninstallation"
return 1
fi
# Clear arrays
apps_data=()
selection_state=()
# Read apps into array, skip non-existent apps
while IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb; do
# Skip if app path no longer exists
[[ ! -e "$app_path" ]] && continue
apps_data+=("$epoch|$app_path|$app_name|$bundle_id|$size|$last_used|${size_kb:-0}")
selection_state+=(false)
done < "$apps_file"
if [[ ${#apps_data[@]} -eq 0 ]]; then
log_warning "No applications available for uninstallation"
return 1
fi
return 0
}
# Cleanup function - restore cursor and clean up
cleanup() {
# Restore cursor using common function
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
fi
if [[ -n "${sudo_keepalive_pid:-}" ]]; then
kill "$sudo_keepalive_pid" 2> /dev/null || true
wait "$sudo_keepalive_pid" 2> /dev/null || true
sudo_keepalive_pid=""
fi
show_cursor
exit "${1:-0}"
}
# Set trap for cleanup on exit
trap cleanup EXIT INT TERM
main() {
local force_rescan=false
for arg in "$@"; do
case "$arg" in
"--debug")
export MO_DEBUG=1
;;
"--force-rescan")
force_rescan=true
;;
esac
done
local use_inline_loading=false
if [[ -t 1 && -t 2 ]]; then
use_inline_loading=true
fi
# Hide cursor during operation
hide_cursor
# Main interaction loop
while true; do
# Simplified: always check if we need alt screen for scanning
# (scan_applications handles cache internally)
local needs_scanning=true
local cache_file="$HOME/.cache/mole/app_scan_cache"
if [[ $force_rescan == false && -f "$cache_file" ]]; then
local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file")))
[[ $cache_age -eq $(get_epoch_seconds) ]] && cache_age=86401 # Handle missing file
[[ $cache_age -lt 86400 ]] && needs_scanning=false
fi
# Only enter alt screen if we need scanning (shows progress)
if [[ $needs_scanning == true && $use_inline_loading == true ]]; then
# Only enter if not already active
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" != "1" ]]; then
enter_alt_screen
export MOLE_ALT_SCREEN_ACTIVE=1
export MOLE_INLINE_LOADING=1
export MOLE_MANAGED_ALT_SCREEN=1
fi
printf "\033[2J\033[H" >&2
else
# If we don't need scanning but have alt screen from previous iteration, keep it?
# Actually, scan_applications might output to stderr.
# Let's just unset the flags if we don't need scanning, but keep alt screen if it was active?
# No, select_apps_for_uninstall will handle its own screen management.
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN MOLE_ALT_SCREEN_ACTIVE
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
fi
fi
# Scan applications
local apps_file=""
if ! apps_file=$(scan_applications "$force_rescan"); then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
printf "\033[2J\033[H" >&2
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
return 1
fi
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
printf "\033[2J\033[H" >&2
fi
if [[ ! -f "$apps_file" ]]; then
# Error message already shown by scan_applications
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
return 1
fi
# Load applications
if ! load_applications "$apps_file"; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
rm -f "$apps_file"
return 1
fi
# Interactive selection using paginated menu
set +e
select_apps_for_uninstall
local exit_code=$?
set -e
if [[ $exit_code -ne 0 ]]; then
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
show_cursor
clear_screen
printf '\033[2J\033[H' >&2 # Also clear stderr
rm -f "$apps_file"
# Handle Refresh (code 10)
if [[ $exit_code -eq 10 ]]; then
force_rescan=true
continue
fi
# User cancelled selection, exit the loop
return 0
fi
# Always clear on exit from selection, regardless of alt screen state
if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then
leave_alt_screen
unset MOLE_ALT_SCREEN_ACTIVE
unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN
fi
# Restore cursor and clear screen (output to both stdout and stderr for reliability)
show_cursor
clear_screen
printf '\033[2J\033[H' >&2 # Also clear stderr in case of mixed output
local selection_count=${#selected_apps[@]}
if [[ $selection_count -eq 0 ]]; then
echo "No apps selected"
rm -f "$apps_file"
# Loop back or exit? If select_apps_for_uninstall returns 0 but empty selection,
# it technically shouldn't happen based on that function's logic.
continue
fi
# Show selected apps with clean alignment
echo -e "${BLUE}${ICON_CONFIRM}${NC} Selected ${selection_count} app(s):"
local -a summary_rows=()
local max_name_width=0
local max_size_width=0
local max_last_width=0
# First pass: get actual max widths for all columns
for selected_app in "${selected_apps[@]}"; do
IFS='|' read -r _ _ app_name _ size last_used _ <<< "$selected_app"
[[ ${#app_name} -gt $max_name_width ]] && max_name_width=${#app_name}
local size_display="$size"
[[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]] && size_display="Unknown"
[[ ${#size_display} -gt $max_size_width ]] && max_size_width=${#size_display}
local last_display=$(format_last_used_summary "$last_used")
[[ ${#last_display} -gt $max_last_width ]] && max_last_width=${#last_display}
done
((max_size_width < 5)) && max_size_width=5
((max_last_width < 5)) && max_last_width=5
# Calculate name width: use actual max, but constrain by terminal width
# Fixed elements: "99. " (4) + " " (2) + " | Last: " (11) = 17
local term_width=$(tput cols 2> /dev/null || echo 100)
local available_for_name=$((term_width - 17 - max_size_width - max_last_width))
# Dynamic minimum for better spacing on wide terminals
local min_name_width=24
if [[ $term_width -ge 120 ]]; then
min_name_width=50
elif [[ $term_width -ge 100 ]]; then
min_name_width=42
elif [[ $term_width -ge 80 ]]; then
min_name_width=30
fi
# Constrain name width: dynamic min, max min(actual_max, available, 60)
local name_trunc_limit=$max_name_width
[[ $name_trunc_limit -lt $min_name_width ]] && name_trunc_limit=$min_name_width
[[ $name_trunc_limit -gt $available_for_name ]] && name_trunc_limit=$available_for_name
[[ $name_trunc_limit -gt 60 ]] && name_trunc_limit=60
# Reset for second pass
max_name_width=0
for selected_app in "${selected_apps[@]}"; do
IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb <<< "$selected_app"
local display_name="$app_name"
if [[ ${#display_name} -gt $name_trunc_limit ]]; then
display_name="${display_name:0:$((name_trunc_limit - 3))}..."
fi
[[ ${#display_name} -gt $max_name_width ]] && max_name_width=${#display_name}
local size_display="$size"
if [[ -z "$size_display" || "$size_display" == "0" || "$size_display" == "N/A" ]]; then
size_display="Unknown"
fi
local last_display
last_display=$(format_last_used_summary "$last_used")
summary_rows+=("$display_name|$size_display|$last_display")
done
((max_name_width < 16)) && max_name_width=16
local index=1
for row in "${summary_rows[@]}"; do
IFS='|' read -r name_cell size_cell last_cell <<< "$row"
printf "%d. %-*s %*s | Last: %s\n" "$index" "$max_name_width" "$name_cell" "$max_size_width" "$size_cell" "$last_cell"
((index++))
done
# Execute batch uninstallation (handles confirmation)
batch_uninstall_applications
# Cleanup current apps file
rm -f "$apps_file"
# Pause before looping back
echo -e "${GRAY}Press Enter to return to application list, ESC to exit...${NC}"
local key
IFS= read -r -s -n1 key || key=""
drain_pending_input # Clean up any escape sequence remnants
case "$key" in
$'\e' | q | Q)
show_cursor
return 0
;;
*)
# Continue loop
;;
esac
# Reset force_rescan to false for subsequent loops,
# but relying on batch_uninstall's cache deletion for actual update
force_rescan=false
done
}
# Run main function

View File

@@ -1,360 +0,0 @@
package main
import (
"encoding/gob"
"os"
"path/filepath"
"strings"
"sync/atomic"
"testing"
"time"
)
func resetOverviewSnapshotForTest() {
overviewSnapshotMu.Lock()
overviewSnapshotCache = nil
overviewSnapshotLoaded = false
overviewSnapshotMu.Unlock()
}
func TestScanPathConcurrentBasic(t *testing.T) {
root := t.TempDir()
rootFile := filepath.Join(root, "root.txt")
if err := os.WriteFile(rootFile, []byte("root-data"), 0o644); err != nil {
t.Fatalf("write root file: %v", err)
}
nested := filepath.Join(root, "nested")
if err := os.MkdirAll(nested, 0o755); err != nil {
t.Fatalf("create nested dir: %v", err)
}
fileOne := filepath.Join(nested, "a.bin")
if err := os.WriteFile(fileOne, []byte("alpha"), 0o644); err != nil {
t.Fatalf("write file one: %v", err)
}
fileTwo := filepath.Join(nested, "b.bin")
if err := os.WriteFile(fileTwo, []byte(strings.Repeat("b", 32)), 0o644); err != nil {
t.Fatalf("write file two: %v", err)
}
linkPath := filepath.Join(root, "link-to-a")
if err := os.Symlink(fileOne, linkPath); err != nil {
t.Fatalf("create symlink: %v", err)
}
var filesScanned, dirsScanned, bytesScanned int64
current := ""
result, err := scanPathConcurrent(root, &filesScanned, &dirsScanned, &bytesScanned, &current)
if err != nil {
t.Fatalf("scanPathConcurrent returned error: %v", err)
}
linkInfo, err := os.Lstat(linkPath)
if err != nil {
t.Fatalf("stat symlink: %v", err)
}
expectedDirSize := int64(len("alpha") + len(strings.Repeat("b", 32)))
expectedRootFileSize := int64(len("root-data"))
expectedLinkSize := getActualFileSize(linkPath, linkInfo)
expectedTotal := expectedDirSize + expectedRootFileSize + expectedLinkSize
if result.TotalSize != expectedTotal {
t.Fatalf("expected total size %d, got %d", expectedTotal, result.TotalSize)
}
if got := atomic.LoadInt64(&filesScanned); got != 3 {
t.Fatalf("expected 3 files scanned, got %d", got)
}
if dirs := atomic.LoadInt64(&dirsScanned); dirs == 0 {
t.Fatalf("expected directory scan count to increase")
}
if bytes := atomic.LoadInt64(&bytesScanned); bytes == 0 {
t.Fatalf("expected byte counter to increase")
}
foundSymlink := false
for _, entry := range result.Entries {
if strings.HasSuffix(entry.Name, " →") {
foundSymlink = true
if entry.IsDir {
t.Fatalf("symlink entry should not be marked as directory")
}
}
}
if !foundSymlink {
t.Fatalf("expected symlink entry to be present in scan result")
}
}
func TestDeletePathWithProgress(t *testing.T) {
// Skip in CI environments where Finder may not be available.
if os.Getenv("CI") != "" {
t.Skip("Skipping Finder-dependent test in CI")
}
parent := t.TempDir()
target := filepath.Join(parent, "target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
files := []string{
filepath.Join(target, "one.txt"),
filepath.Join(target, "two.txt"),
}
for _, f := range files {
if err := os.WriteFile(f, []byte("content"), 0o644); err != nil {
t.Fatalf("write %s: %v", f, err)
}
}
var counter int64
count, err := trashPathWithProgress(target, &counter)
if err != nil {
t.Fatalf("trashPathWithProgress returned error: %v", err)
}
if count != int64(len(files)) {
t.Fatalf("expected %d files trashed, got %d", len(files), count)
}
if _, err := os.Stat(target); !os.IsNotExist(err) {
t.Fatalf("expected target to be moved to Trash, stat err=%v", err)
}
}
func TestOverviewStoreAndLoad(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
resetOverviewSnapshotForTest()
t.Cleanup(resetOverviewSnapshotForTest)
path := filepath.Join(home, "project")
want := int64(123456)
if err := storeOverviewSize(path, want); err != nil {
t.Fatalf("storeOverviewSize: %v", err)
}
got, err := loadStoredOverviewSize(path)
if err != nil {
t.Fatalf("loadStoredOverviewSize: %v", err)
}
if got != want {
t.Fatalf("snapshot mismatch: want %d, got %d", want, got)
}
// Reload from disk and ensure value persists.
resetOverviewSnapshotForTest()
got, err = loadStoredOverviewSize(path)
if err != nil {
t.Fatalf("loadStoredOverviewSize after reset: %v", err)
}
if got != want {
t.Fatalf("snapshot mismatch after reset: want %d, got %d", want, got)
}
}
func TestCacheSaveLoadRoundTrip(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
target := filepath.Join(home, "cache-target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target dir: %v", err)
}
result := scanResult{
Entries: []dirEntry{
{Name: "alpha", Path: filepath.Join(target, "alpha"), Size: 10, IsDir: true},
},
LargeFiles: []fileEntry{
{Name: "big.bin", Path: filepath.Join(target, "big.bin"), Size: 2048},
},
TotalSize: 42,
}
if err := saveCacheToDisk(target, result); err != nil {
t.Fatalf("saveCacheToDisk: %v", err)
}
cache, err := loadCacheFromDisk(target)
if err != nil {
t.Fatalf("loadCacheFromDisk: %v", err)
}
if cache.TotalSize != result.TotalSize {
t.Fatalf("total size mismatch: want %d, got %d", result.TotalSize, cache.TotalSize)
}
if len(cache.Entries) != len(result.Entries) {
t.Fatalf("entry count mismatch: want %d, got %d", len(result.Entries), len(cache.Entries))
}
if len(cache.LargeFiles) != len(result.LargeFiles) {
t.Fatalf("large file count mismatch: want %d, got %d", len(result.LargeFiles), len(cache.LargeFiles))
}
}
func TestMeasureOverviewSize(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
resetOverviewSnapshotForTest()
t.Cleanup(resetOverviewSnapshotForTest)
target := filepath.Join(home, "measure")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
content := []byte(strings.Repeat("x", 2048))
if err := os.WriteFile(filepath.Join(target, "data.bin"), content, 0o644); err != nil {
t.Fatalf("write file: %v", err)
}
size, err := measureOverviewSize(target)
if err != nil {
t.Fatalf("measureOverviewSize: %v", err)
}
if size <= 0 {
t.Fatalf("expected positive size, got %d", size)
}
// Ensure snapshot stored.
cached, err := loadStoredOverviewSize(target)
if err != nil {
t.Fatalf("loadStoredOverviewSize: %v", err)
}
if cached != size {
t.Fatalf("snapshot mismatch: want %d, got %d", size, cached)
}
}
func TestIsCleanableDir(t *testing.T) {
if !isCleanableDir("/Users/test/project/node_modules") {
t.Fatalf("expected node_modules to be cleanable")
}
if isCleanableDir("/Users/test/Library/Caches/AppCache") {
t.Fatalf("Library caches should be handled by mo clean")
}
if isCleanableDir("") {
t.Fatalf("empty path should not be cleanable")
}
}
func TestHasUsefulVolumeMounts(t *testing.T) {
root := t.TempDir()
if hasUsefulVolumeMounts(root) {
t.Fatalf("empty directory should not report useful mounts")
}
hidden := filepath.Join(root, ".hidden")
if err := os.Mkdir(hidden, 0o755); err != nil {
t.Fatalf("create hidden dir: %v", err)
}
if hasUsefulVolumeMounts(root) {
t.Fatalf("hidden entries should not count as useful mounts")
}
mount := filepath.Join(root, "ExternalDrive")
if err := os.Mkdir(mount, 0o755); err != nil {
t.Fatalf("create mount dir: %v", err)
}
if !hasUsefulVolumeMounts(root) {
t.Fatalf("expected useful mount when real directory exists")
}
}
func TestLoadCacheExpiresWhenDirectoryChanges(t *testing.T) {
home := t.TempDir()
t.Setenv("HOME", home)
target := filepath.Join(home, "change-target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
result := scanResult{TotalSize: 5}
if err := saveCacheToDisk(target, result); err != nil {
t.Fatalf("saveCacheToDisk: %v", err)
}
// Advance mtime beyond grace period.
time.Sleep(time.Millisecond * 10)
if err := os.Chtimes(target, time.Now(), time.Now()); err != nil {
t.Fatalf("chtimes: %v", err)
}
// Simulate older cache entry to exceed grace window.
cachePath, err := getCachePath(target)
if err != nil {
t.Fatalf("getCachePath: %v", err)
}
if _, err := os.Stat(cachePath); err != nil {
t.Fatalf("stat cache: %v", err)
}
oldTime := time.Now().Add(-cacheModTimeGrace - time.Minute)
if err := os.Chtimes(cachePath, oldTime, oldTime); err != nil {
t.Fatalf("chtimes cache: %v", err)
}
file, err := os.Open(cachePath)
if err != nil {
t.Fatalf("open cache: %v", err)
}
var entry cacheEntry
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
t.Fatalf("decode cache: %v", err)
}
_ = file.Close()
entry.ScanTime = time.Now().Add(-8 * 24 * time.Hour)
tmp := cachePath + ".tmp"
f, err := os.Create(tmp)
if err != nil {
t.Fatalf("create tmp cache: %v", err)
}
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
t.Fatalf("encode tmp cache: %v", err)
}
_ = f.Close()
if err := os.Rename(tmp, cachePath); err != nil {
t.Fatalf("rename tmp cache: %v", err)
}
if _, err := loadCacheFromDisk(target); err == nil {
t.Fatalf("expected cache load to fail after stale scan time")
}
}
func TestScanPathPermissionError(t *testing.T) {
root := t.TempDir()
lockedDir := filepath.Join(root, "locked")
if err := os.Mkdir(lockedDir, 0o755); err != nil {
t.Fatalf("create locked dir: %v", err)
}
// Create a file before locking.
if err := os.WriteFile(filepath.Join(lockedDir, "secret.txt"), []byte("shh"), 0o644); err != nil {
t.Fatalf("write secret: %v", err)
}
// Remove permissions.
if err := os.Chmod(lockedDir, 0o000); err != nil {
t.Fatalf("chmod 000: %v", err)
}
defer func() {
// Restore permissions for cleanup.
_ = os.Chmod(lockedDir, 0o755)
}()
var files, dirs, bytes int64
current := ""
// Scanning the locked dir itself should fail.
_, err := scanPathConcurrent(lockedDir, &files, &dirs, &bytes, &current)
if err == nil {
t.Fatalf("expected error scanning locked directory, got nil")
}
if !os.IsPermission(err) {
t.Logf("unexpected error type: %v", err)
}
}

View File

@@ -1,346 +0,0 @@
package main
import (
"context"
"encoding/gob"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/cespare/xxhash/v2"
)
type overviewSizeSnapshot struct {
Size int64 `json:"size"`
Updated time.Time `json:"updated"`
}
var (
overviewSnapshotMu sync.Mutex
overviewSnapshotCache map[string]overviewSizeSnapshot
overviewSnapshotLoaded bool
)
func snapshotFromModel(m model) historyEntry {
return historyEntry{
Path: m.path,
Entries: cloneDirEntries(m.entries),
LargeFiles: cloneFileEntries(m.largeFiles),
TotalSize: m.totalSize,
TotalFiles: m.totalFiles,
Selected: m.selected,
EntryOffset: m.offset,
LargeSelected: m.largeSelected,
LargeOffset: m.largeOffset,
IsOverview: m.isOverview,
}
}
func cacheSnapshot(m model) historyEntry {
entry := snapshotFromModel(m)
entry.Dirty = false
return entry
}
func cloneDirEntries(entries []dirEntry) []dirEntry {
if len(entries) == 0 {
return nil
}
copied := make([]dirEntry, len(entries))
copy(copied, entries) //nolint:all
return copied
}
func cloneFileEntries(files []fileEntry) []fileEntry {
if len(files) == 0 {
return nil
}
copied := make([]fileEntry, len(files))
copy(copied, files) //nolint:all
return copied
}
func ensureOverviewSnapshotCacheLocked() error {
if overviewSnapshotLoaded {
return nil
}
storePath, err := getOverviewSizeStorePath()
if err != nil {
return err
}
data, err := os.ReadFile(storePath)
if err != nil {
if os.IsNotExist(err) {
overviewSnapshotCache = make(map[string]overviewSizeSnapshot)
overviewSnapshotLoaded = true
return nil
}
return err
}
if len(data) == 0 {
overviewSnapshotCache = make(map[string]overviewSizeSnapshot)
overviewSnapshotLoaded = true
return nil
}
var snapshots map[string]overviewSizeSnapshot
if err := json.Unmarshal(data, &snapshots); err != nil || snapshots == nil {
backupPath := storePath + ".corrupt"
_ = os.Rename(storePath, backupPath)
overviewSnapshotCache = make(map[string]overviewSizeSnapshot)
overviewSnapshotLoaded = true
return nil
}
overviewSnapshotCache = snapshots
overviewSnapshotLoaded = true
return nil
}
func getOverviewSizeStorePath() (string, error) {
cacheDir, err := getCacheDir()
if err != nil {
return "", err
}
return filepath.Join(cacheDir, overviewCacheFile), nil
}
func loadStoredOverviewSize(path string) (int64, error) {
if path == "" {
return 0, fmt.Errorf("empty path")
}
overviewSnapshotMu.Lock()
defer overviewSnapshotMu.Unlock()
if err := ensureOverviewSnapshotCacheLocked(); err != nil {
return 0, err
}
if overviewSnapshotCache == nil {
return 0, fmt.Errorf("snapshot cache unavailable")
}
if snapshot, ok := overviewSnapshotCache[path]; ok && snapshot.Size > 0 {
if time.Since(snapshot.Updated) < overviewCacheTTL {
return snapshot.Size, nil
}
return 0, fmt.Errorf("snapshot expired")
}
return 0, fmt.Errorf("snapshot not found")
}
func storeOverviewSize(path string, size int64) error {
if path == "" || size <= 0 {
return fmt.Errorf("invalid overview size")
}
overviewSnapshotMu.Lock()
defer overviewSnapshotMu.Unlock()
if err := ensureOverviewSnapshotCacheLocked(); err != nil {
return err
}
if overviewSnapshotCache == nil {
overviewSnapshotCache = make(map[string]overviewSizeSnapshot)
}
overviewSnapshotCache[path] = overviewSizeSnapshot{
Size: size,
Updated: time.Now(),
}
return persistOverviewSnapshotLocked()
}
func persistOverviewSnapshotLocked() error {
storePath, err := getOverviewSizeStorePath()
if err != nil {
return err
}
tmpPath := storePath + ".tmp"
data, err := json.MarshalIndent(overviewSnapshotCache, "", " ")
if err != nil {
return err
}
if err := os.WriteFile(tmpPath, data, 0644); err != nil {
return err
}
return os.Rename(tmpPath, storePath)
}
func loadOverviewCachedSize(path string) (int64, error) {
if path == "" {
return 0, fmt.Errorf("empty path")
}
if snapshot, err := loadStoredOverviewSize(path); err == nil {
return snapshot, nil
}
cacheEntry, err := loadCacheFromDisk(path)
if err != nil {
return 0, err
}
_ = storeOverviewSize(path, cacheEntry.TotalSize)
return cacheEntry.TotalSize, nil
}
func getCacheDir() (string, error) {
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
cacheDir := filepath.Join(home, ".cache", "mole")
if err := os.MkdirAll(cacheDir, 0755); err != nil {
return "", err
}
return cacheDir, nil
}
func getCachePath(path string) (string, error) {
cacheDir, err := getCacheDir()
if err != nil {
return "", err
}
hash := xxhash.Sum64String(path)
filename := fmt.Sprintf("%x.cache", hash)
return filepath.Join(cacheDir, filename), nil
}
func loadCacheFromDisk(path string) (*cacheEntry, error) {
cachePath, err := getCachePath(path)
if err != nil {
return nil, err
}
file, err := os.Open(cachePath)
if err != nil {
return nil, err
}
defer file.Close() //nolint:errcheck
var entry cacheEntry
decoder := gob.NewDecoder(file)
if err := decoder.Decode(&entry); err != nil {
return nil, err
}
info, err := os.Stat(path)
if err != nil {
return nil, err
}
if info.ModTime().After(entry.ModTime) {
// Allow grace window.
if cacheModTimeGrace <= 0 || info.ModTime().Sub(entry.ModTime) > cacheModTimeGrace {
return nil, fmt.Errorf("cache expired: directory modified")
}
}
if time.Since(entry.ScanTime) > 7*24*time.Hour {
return nil, fmt.Errorf("cache expired: too old")
}
return &entry, nil
}
func saveCacheToDisk(path string, result scanResult) error {
cachePath, err := getCachePath(path)
if err != nil {
return err
}
info, err := os.Stat(path)
if err != nil {
return err
}
entry := cacheEntry{
Entries: result.Entries,
LargeFiles: result.LargeFiles,
TotalSize: result.TotalSize,
TotalFiles: result.TotalFiles,
ModTime: info.ModTime(),
ScanTime: time.Now(),
}
file, err := os.Create(cachePath)
if err != nil {
return err
}
defer file.Close() //nolint:errcheck
encoder := gob.NewEncoder(file)
return encoder.Encode(entry)
}
// peekCacheTotalFiles attempts to read the total file count from cache,
// ignoring expiration. Used for initial scan progress estimates.
func peekCacheTotalFiles(path string) (int64, error) {
cachePath, err := getCachePath(path)
if err != nil {
return 0, err
}
file, err := os.Open(cachePath)
if err != nil {
return 0, err
}
defer file.Close() //nolint:errcheck
var entry cacheEntry
decoder := gob.NewDecoder(file)
if err := decoder.Decode(&entry); err != nil {
return 0, err
}
return entry.TotalFiles, nil
}
func invalidateCache(path string) {
cachePath, err := getCachePath(path)
if err == nil {
_ = os.Remove(cachePath)
}
removeOverviewSnapshot(path)
}
func removeOverviewSnapshot(path string) {
if path == "" {
return
}
overviewSnapshotMu.Lock()
defer overviewSnapshotMu.Unlock()
if err := ensureOverviewSnapshotCacheLocked(); err != nil {
return
}
if overviewSnapshotCache == nil {
return
}
if _, ok := overviewSnapshotCache[path]; ok {
delete(overviewSnapshotCache, path)
_ = persistOverviewSnapshotLocked()
}
}
// prefetchOverviewCache warms overview cache in background.
func prefetchOverviewCache(ctx context.Context) {
entries := createOverviewEntries()
var needScan []string
for _, entry := range entries {
if size, err := loadStoredOverviewSize(entry.Path); err == nil && size > 0 {
continue
}
needScan = append(needScan, entry.Path)
}
if len(needScan) == 0 {
return
}
for _, path := range needScan {
select {
case <-ctx.Done():
return
default:
}
size, err := measureOverviewSize(path)
if err == nil && size > 0 {
_ = storeOverviewSize(path, size)
}
}
}

View File

@@ -1,107 +0,0 @@
package main
import (
"path/filepath"
"strings"
)
// isCleanableDir marks paths safe to delete manually (not handled by mo clean).
func isCleanableDir(path string) bool {
if path == "" {
return false
}
// Exclude paths mo clean already handles.
if isHandledByMoClean(path) {
return false
}
baseName := filepath.Base(path)
// Project dependencies and build outputs are safe.
if projectDependencyDirs[baseName] {
return true
}
return false
}
// isHandledByMoClean checks if a path is cleaned by mo clean.
func isHandledByMoClean(path string) bool {
cleanPaths := []string{
"/Library/Caches/",
"/Library/Logs/",
"/Library/Saved Application State/",
"/.Trash/",
"/Library/DiagnosticReports/",
}
for _, p := range cleanPaths {
if strings.Contains(path, p) {
return true
}
}
return false
}
// Project dependency and build directories.
var projectDependencyDirs = map[string]bool{
// JavaScript/Node.
"node_modules": true,
"bower_components": true,
".yarn": true,
".pnpm-store": true,
// Python.
"venv": true,
".venv": true,
"virtualenv": true,
"__pycache__": true,
".pytest_cache": true,
".mypy_cache": true,
".ruff_cache": true,
".tox": true,
".eggs": true,
"htmlcov": true,
".ipynb_checkpoints": true,
// Ruby.
"vendor": true,
".bundle": true,
// Java/Kotlin/Scala.
".gradle": true,
"out": true,
// Build outputs.
"build": true,
"dist": true,
"target": true,
".next": true,
".nuxt": true,
".output": true,
".parcel-cache": true,
".turbo": true,
".vite": true,
".nx": true,
"coverage": true,
".coverage": true,
".nyc_output": true,
// Frontend framework outputs.
".angular": true,
".svelte-kit": true,
".astro": true,
".docusaurus": true,
// Apple dev.
"DerivedData": true,
"Pods": true,
".build": true,
"Carthage": true,
".dart_tool": true,
// Other tools.
".terraform": true,
}

View File

@@ -1,248 +0,0 @@
package main
import "time"
const (
maxEntries = 30
maxLargeFiles = 30
barWidth = 24
minLargeFileSize = 100 << 20
defaultViewport = 12
overviewCacheTTL = 7 * 24 * time.Hour
overviewCacheFile = "overview_sizes.json"
duTimeout = 30 * time.Second
mdlsTimeout = 5 * time.Second
maxConcurrentOverview = 8
batchUpdateSize = 100
cacheModTimeGrace = 30 * time.Minute
// Worker pool limits.
minWorkers = 16
maxWorkers = 64
cpuMultiplier = 4
maxDirWorkers = 32
openCommandTimeout = 10 * time.Second
)
var foldDirs = map[string]bool{
// VCS.
".git": true,
".svn": true,
".hg": true,
// JavaScript/Node.
"node_modules": true,
".npm": true,
"_npx": true,
"_cacache": true,
"_logs": true,
"_locks": true,
"_quick": true,
"_libvips": true,
"_prebuilds": true,
"_update-notifier-last-checked": true,
".yarn": true,
".pnpm-store": true,
".next": true,
".nuxt": true,
"bower_components": true,
".vite": true,
".turbo": true,
".parcel-cache": true,
".nx": true,
".rush": true,
"tnpm": true,
".tnpm": true,
".bun": true,
".deno": true,
// Python.
"__pycache__": true,
".pytest_cache": true,
".mypy_cache": true,
".ruff_cache": true,
"venv": true,
".venv": true,
"virtualenv": true,
".tox": true,
"site-packages": true,
".eggs": true,
"*.egg-info": true,
".pyenv": true,
".poetry": true,
".pip": true,
".pipx": true,
// Ruby/Go/PHP (vendor), Java/Kotlin/Scala/Rust (target).
"vendor": true,
".bundle": true,
"gems": true,
".rbenv": true,
"target": true,
".gradle": true,
".m2": true,
".ivy2": true,
"out": true,
"pkg": true,
"composer.phar": true,
".composer": true,
".cargo": true,
// Build outputs.
"build": true,
"dist": true,
".output": true,
"coverage": true,
".coverage": true,
// IDE.
".idea": true,
".vscode": true,
".vs": true,
".fleet": true,
// Cache directories.
".cache": true,
"__MACOSX": true,
".DS_Store": true,
".Trash": true,
"Caches": true,
".Spotlight-V100": true,
".fseventsd": true,
".DocumentRevisions-V100": true,
".TemporaryItems": true,
"$RECYCLE.BIN": true,
".temp": true,
".tmp": true,
"_temp": true,
"_tmp": true,
".Homebrew": true,
".rustup": true,
".sdkman": true,
".nvm": true,
// macOS.
"Application Scripts": true,
"Saved Application State": true,
// iCloud.
"Mobile Documents": true,
// Containers.
".docker": true,
".containerd": true,
// Mobile development.
"Pods": true,
"DerivedData": true,
".build": true,
"xcuserdata": true,
"Carthage": true,
".dart_tool": true,
// Web frameworks.
".angular": true,
".svelte-kit": true,
".astro": true,
".solid": true,
// Databases.
".mysql": true,
".postgres": true,
"mongodb": true,
// Other.
".terraform": true,
".vagrant": true,
"tmp": true,
"temp": true,
}
var skipSystemDirs = map[string]bool{
"dev": true,
"tmp": true,
"private": true,
"cores": true,
"net": true,
"home": true,
"System": true,
"sbin": true,
"bin": true,
"etc": true,
"var": true,
"opt": false,
"usr": false,
"Volumes": true,
"Network": true,
".vol": true,
".Spotlight-V100": true,
".fseventsd": true,
".DocumentRevisions-V100": true,
".TemporaryItems": true,
".MobileBackups": true,
}
var defaultSkipDirs = map[string]bool{
"nfs": true,
"PHD": true,
"Permissions": true,
}
var skipExtensions = map[string]bool{
".go": true,
".js": true,
".ts": true,
".tsx": true,
".jsx": true,
".json": true,
".md": true,
".txt": true,
".yml": true,
".yaml": true,
".xml": true,
".html": true,
".css": true,
".scss": true,
".sass": true,
".less": true,
".py": true,
".rb": true,
".java": true,
".kt": true,
".rs": true,
".swift": true,
".m": true,
".mm": true,
".c": true,
".cpp": true,
".h": true,
".hpp": true,
".cs": true,
".sql": true,
".db": true,
".lock": true,
".gradle": true,
".mjs": true,
".cjs": true,
".coffee": true,
".dart": true,
".svelte": true,
".vue": true,
".nim": true,
".hx": true,
}
var spinnerFrames = []string{"|", "/", "-", "\\", "|", "/", "-", "\\"}
const (
colorPurple = "\033[0;35m"
colorPurpleBold = "\033[1;35m"
colorGray = "\033[0;90m"
colorRed = "\033[0;31m"
colorYellow = "\033[0;33m"
colorGreen = "\033[0;32m"
colorBlue = "\033[0;34m"
colorCyan = "\033[0;36m"
colorReset = "\033[0m"
colorBold = "\033[1m"
)

View File

@@ -1,146 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"sync/atomic"
"time"
tea "github.com/charmbracelet/bubbletea"
)
const trashTimeout = 30 * time.Second
func deletePathCmd(path string, counter *int64) tea.Cmd {
return func() tea.Msg {
count, err := trashPathWithProgress(path, counter)
return deleteProgressMsg{
done: true,
err: err,
count: count,
path: path,
}
}
}
// deleteMultiplePathsCmd moves paths to Trash and aggregates results.
func deleteMultiplePathsCmd(paths []string, counter *int64) tea.Cmd {
return func() tea.Msg {
var totalCount int64
var errors []string
// Process deeper paths first to avoid parent/child conflicts.
pathsToDelete := append([]string(nil), paths...)
sort.Slice(pathsToDelete, func(i, j int) bool {
return strings.Count(pathsToDelete[i], string(filepath.Separator)) > strings.Count(pathsToDelete[j], string(filepath.Separator))
})
for _, path := range pathsToDelete {
count, err := trashPathWithProgress(path, counter)
totalCount += count
if err != nil {
if os.IsNotExist(err) {
continue
}
errors = append(errors, err.Error())
}
}
var resultErr error
if len(errors) > 0 {
resultErr = &multiDeleteError{errors: errors}
}
return deleteProgressMsg{
done: true,
err: resultErr,
count: totalCount,
path: "",
}
}
}
// multiDeleteError holds multiple deletion errors.
type multiDeleteError struct {
errors []string
}
func (e *multiDeleteError) Error() string {
if len(e.errors) == 1 {
return e.errors[0]
}
return strings.Join(e.errors[:min(3, len(e.errors))], "; ")
}
// trashPathWithProgress moves a path to Trash using Finder.
// This allows users to recover accidentally deleted files.
func trashPathWithProgress(root string, counter *int64) (int64, error) {
// Verify path exists (use Lstat to handle broken symlinks).
info, err := os.Lstat(root)
if err != nil {
return 0, err
}
// Count items for progress reporting.
var count int64
if info.IsDir() {
_ = filepath.WalkDir(root, func(_ string, d os.DirEntry, err error) error {
if err != nil {
return nil
}
if !d.IsDir() {
count++
if counter != nil {
atomic.StoreInt64(counter, count)
}
}
return nil
})
} else {
count = 1
if counter != nil {
atomic.StoreInt64(counter, 1)
}
}
// Move to Trash using Finder AppleScript.
if err := moveToTrash(root); err != nil {
return 0, err
}
return count, nil
}
// moveToTrash uses macOS Finder to move a file/directory to Trash.
// This is the safest method as it uses the system's native trash mechanism.
func moveToTrash(path string) error {
absPath, err := filepath.Abs(path)
if err != nil {
return fmt.Errorf("failed to resolve path: %w", err)
}
// Escape path for AppleScript (handle quotes and backslashes).
escapedPath := strings.ReplaceAll(absPath, "\\", "\\\\")
escapedPath = strings.ReplaceAll(escapedPath, "\"", "\\\"")
script := fmt.Sprintf(`tell application "Finder" to delete POSIX file "%s"`, escapedPath)
ctx, cancel := context.WithTimeout(context.Background(), trashTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, "osascript", "-e", script)
output, err := cmd.CombinedOutput()
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return fmt.Errorf("timeout moving to Trash")
}
return fmt.Errorf("failed to move to Trash: %s", strings.TrimSpace(string(output)))
}
return nil
}

View File

@@ -1,87 +0,0 @@
package main
import (
"os"
"path/filepath"
"testing"
)
func TestTrashPathWithProgress(t *testing.T) {
// Skip in CI environments where Finder may not be available.
if os.Getenv("CI") != "" {
t.Skip("Skipping Finder-dependent test in CI")
}
parent := t.TempDir()
target := filepath.Join(parent, "target")
if err := os.MkdirAll(target, 0o755); err != nil {
t.Fatalf("create target: %v", err)
}
files := []string{
filepath.Join(target, "one.txt"),
filepath.Join(target, "two.txt"),
}
for _, f := range files {
if err := os.WriteFile(f, []byte("content"), 0o644); err != nil {
t.Fatalf("write %s: %v", f, err)
}
}
var counter int64
count, err := trashPathWithProgress(target, &counter)
if err != nil {
t.Fatalf("trashPathWithProgress returned error: %v", err)
}
if count != int64(len(files)) {
t.Fatalf("expected %d files trashed, got %d", len(files), count)
}
if _, err := os.Stat(target); !os.IsNotExist(err) {
t.Fatalf("expected target to be moved to Trash, stat err=%v", err)
}
}
func TestDeleteMultiplePathsCmdHandlesParentChild(t *testing.T) {
// Skip in CI environments where Finder may not be available.
if os.Getenv("CI") != "" {
t.Skip("Skipping Finder-dependent test in CI")
}
base := t.TempDir()
parent := filepath.Join(base, "parent")
child := filepath.Join(parent, "child")
// Structure: parent/fileA, parent/child/fileC.
if err := os.MkdirAll(child, 0o755); err != nil {
t.Fatalf("mkdir: %v", err)
}
if err := os.WriteFile(filepath.Join(parent, "fileA"), []byte("a"), 0o644); err != nil {
t.Fatalf("write fileA: %v", err)
}
if err := os.WriteFile(filepath.Join(child, "fileC"), []byte("c"), 0o644); err != nil {
t.Fatalf("write fileC: %v", err)
}
var counter int64
msg := deleteMultiplePathsCmd([]string{parent, child}, &counter)()
progress, ok := msg.(deleteProgressMsg)
if !ok {
t.Fatalf("expected deleteProgressMsg, got %T", msg)
}
if progress.err != nil {
t.Fatalf("unexpected error: %v", progress.err)
}
if progress.count != 2 {
t.Fatalf("expected 2 files trashed, got %d", progress.count)
}
if _, err := os.Stat(parent); !os.IsNotExist(err) {
t.Fatalf("expected parent to be moved to Trash, err=%v", err)
}
}
func TestMoveToTrashNonExistent(t *testing.T) {
err := moveToTrash("/nonexistent/path/that/does/not/exist")
if err == nil {
t.Fatal("expected error for non-existent path")
}
}

View File

@@ -1,247 +0,0 @@
package main
import (
"fmt"
"os"
"strings"
"time"
)
func displayPath(path string) string {
home, err := os.UserHomeDir()
if err != nil || home == "" {
return path
}
if strings.HasPrefix(path, home) {
return strings.Replace(path, home, "~", 1)
}
return path
}
// truncateMiddle trims the middle, keeping head and tail.
func truncateMiddle(s string, maxWidth int) string {
runes := []rune(s)
currentWidth := displayWidth(s)
if currentWidth <= maxWidth {
return s
}
if maxWidth < 10 {
width := 0
for i, r := range runes {
width += runeWidth(r)
if width > maxWidth {
return string(runes[:i])
}
}
return s
}
targetHeadWidth := (maxWidth - 3) / 3
targetTailWidth := maxWidth - 3 - targetHeadWidth
headWidth := 0
headIdx := 0
for i, r := range runes {
w := runeWidth(r)
if headWidth+w > targetHeadWidth {
break
}
headWidth += w
headIdx = i + 1
}
tailWidth := 0
tailIdx := len(runes)
for i := len(runes) - 1; i >= 0; i-- {
w := runeWidth(runes[i])
if tailWidth+w > targetTailWidth {
break
}
tailWidth += w
tailIdx = i
}
return string(runes[:headIdx]) + "..." + string(runes[tailIdx:])
}
func formatNumber(n int64) string {
if n < 1000 {
return fmt.Sprintf("%d", n)
}
if n < 1000000 {
return fmt.Sprintf("%.1fk", float64(n)/1000)
}
return fmt.Sprintf("%.1fM", float64(n)/1000000)
}
func humanizeBytes(size int64) string {
if size < 0 {
return "0 B"
}
const unit = 1024
if size < unit {
return fmt.Sprintf("%d B", size)
}
div, exp := int64(unit), 0
for n := size / unit; n >= unit; n /= unit {
div *= unit
exp++
}
value := float64(size) / float64(div)
return fmt.Sprintf("%.1f %cB", value, "KMGTPE"[exp])
}
func coloredProgressBar(value, maxValue int64, percent float64) string {
if maxValue <= 0 {
return colorGray + strings.Repeat("░", barWidth) + colorReset
}
filled := min(int((value*int64(barWidth))/maxValue), barWidth)
var barColor string
if percent >= 50 {
barColor = colorRed
} else if percent >= 20 {
barColor = colorYellow
} else if percent >= 5 {
barColor = colorBlue
} else {
barColor = colorGreen
}
var bar strings.Builder
bar.WriteString(barColor)
for i := range barWidth {
if i < filled {
if i < filled-1 {
bar.WriteString("█")
} else {
remainder := (value * int64(barWidth)) % maxValue
if remainder > maxValue/2 {
bar.WriteString("█")
} else if remainder > maxValue/4 {
bar.WriteString("▓")
} else {
bar.WriteString("▒")
}
}
} else {
bar.WriteString(colorGray + "░" + barColor)
}
}
return bar.String() + colorReset
}
// runeWidth returns display width for wide characters and emoji.
func runeWidth(r rune) int {
if r >= 0x4E00 && r <= 0x9FFF || // CJK Unified Ideographs
r >= 0x3400 && r <= 0x4DBF || // CJK Extension A
r >= 0x20000 && r <= 0x2A6DF || // CJK Extension B
r >= 0x2A700 && r <= 0x2B73F || // CJK Extension C
r >= 0x2B740 && r <= 0x2B81F || // CJK Extension D
r >= 0x2B820 && r <= 0x2CEAF || // CJK Extension E
r >= 0x3040 && r <= 0x30FF || // Hiragana and Katakana
r >= 0x31F0 && r <= 0x31FF || // Katakana Phonetic Extensions
r >= 0xAC00 && r <= 0xD7AF || // Hangul Syllables
r >= 0xFF00 && r <= 0xFFEF || // Fullwidth Forms
r >= 0x1F300 && r <= 0x1F6FF || // Miscellaneous Symbols and Pictographs (includes Transport)
r >= 0x1F900 && r <= 0x1F9FF || // Supplemental Symbols and Pictographs
r >= 0x2600 && r <= 0x26FF || // Miscellaneous Symbols
r >= 0x2700 && r <= 0x27BF || // Dingbats
r >= 0xFE10 && r <= 0xFE1F || // Vertical Forms
r >= 0x1F000 && r <= 0x1F02F { // Mahjong Tiles
return 2
}
return 1
}
func displayWidth(s string) int {
width := 0
for _, r := range s {
width += runeWidth(r)
}
return width
}
// calculateNameWidth computes name column width from terminal width.
func calculateNameWidth(termWidth int) int {
const fixedWidth = 61
available := termWidth - fixedWidth
if available < 24 {
return 24
}
if available > 60 {
return 60
}
return available
}
func trimNameWithWidth(name string, maxWidth int) string {
const (
ellipsis = "..."
ellipsisWidth = 3
)
runes := []rune(name)
widths := make([]int, len(runes))
for i, r := range runes {
widths[i] = runeWidth(r)
}
currentWidth := 0
for i, w := range widths {
if currentWidth+w > maxWidth {
subWidth := currentWidth
j := i
for j > 0 && subWidth+ellipsisWidth > maxWidth {
j--
subWidth -= widths[j]
}
if j == 0 {
return ellipsis
}
return string(runes[:j]) + ellipsis
}
currentWidth += w
}
return name
}
func padName(name string, targetWidth int) string {
currentWidth := displayWidth(name)
if currentWidth >= targetWidth {
return name
}
return name + strings.Repeat(" ", targetWidth-currentWidth)
}
// formatUnusedTime formats time since last access.
func formatUnusedTime(lastAccess time.Time) string {
if lastAccess.IsZero() {
return ""
}
duration := time.Since(lastAccess)
days := int(duration.Hours() / 24)
if days < 90 {
return ""
}
months := days / 30
years := days / 365
if years >= 2 {
return fmt.Sprintf(">%dyr", years)
} else if years >= 1 {
return ">1yr"
} else if months >= 3 {
return fmt.Sprintf(">%dmo", months)
}
return ""
}

View File

@@ -1,309 +0,0 @@
package main
import (
"strings"
"testing"
)
func TestRuneWidth(t *testing.T) {
tests := []struct {
name string
input rune
want int
}{
{"ASCII letter", 'a', 1},
{"ASCII digit", '5', 1},
{"Chinese character", '中', 2},
{"Japanese hiragana", 'あ', 2},
{"Korean hangul", '한', 2},
{"CJK ideograph", '語', 2},
{"Full-width number", '', 2},
{"ASCII space", ' ', 1},
{"Tab", '\t', 1},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := runeWidth(tt.input); got != tt.want {
t.Errorf("runeWidth(%q) = %d, want %d", tt.input, got, tt.want)
}
})
}
}
func TestDisplayWidth(t *testing.T) {
tests := []struct {
name string
input string
want int
}{
{"Empty string", "", 0},
{"ASCII only", "hello", 5},
{"Chinese only", "你好", 4},
{"Mixed ASCII and CJK", "hello世界", 9}, // 5 + 4
{"Path with CJK", "/Users/张三/文件", 16}, // 7 (ASCII) + 4 (张三) + 4 (文件) + 1 (/) = 16
{"Full-width chars", "", 6},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := displayWidth(tt.input); got != tt.want {
t.Errorf("displayWidth(%q) = %d, want %d", tt.input, got, tt.want)
}
})
}
}
func TestHumanizeBytes(t *testing.T) {
tests := []struct {
input int64
want string
}{
{-100, "0 B"},
{0, "0 B"},
{512, "512 B"},
{1023, "1023 B"},
{1024, "1.0 KB"},
{1536, "1.5 KB"},
{10240, "10.0 KB"},
{1048576, "1.0 MB"},
{1572864, "1.5 MB"},
{1073741824, "1.0 GB"},
{1099511627776, "1.0 TB"},
{1125899906842624, "1.0 PB"},
}
for _, tt := range tests {
got := humanizeBytes(tt.input)
if got != tt.want {
t.Errorf("humanizeBytes(%d) = %q, want %q", tt.input, got, tt.want)
}
}
}
func TestFormatNumber(t *testing.T) {
tests := []struct {
input int64
want string
}{
{0, "0"},
{500, "500"},
{999, "999"},
{1000, "1.0k"},
{1500, "1.5k"},
{999999, "1000.0k"},
{1000000, "1.0M"},
{1500000, "1.5M"},
}
for _, tt := range tests {
got := formatNumber(tt.input)
if got != tt.want {
t.Errorf("formatNumber(%d) = %q, want %q", tt.input, got, tt.want)
}
}
}
func TestTruncateMiddle(t *testing.T) {
tests := []struct {
name string
input string
maxWidth int
check func(t *testing.T, result string)
}{
{
name: "No truncation needed",
input: "short",
maxWidth: 10,
check: func(t *testing.T, result string) {
if result != "short" {
t.Errorf("Should not truncate short string, got %q", result)
}
},
},
{
name: "Truncate long ASCII",
input: "verylongfilename.txt",
maxWidth: 15,
check: func(t *testing.T, result string) {
if !strings.Contains(result, "...") {
t.Errorf("Truncated string should contain '...', got %q", result)
}
if displayWidth(result) > 15 {
t.Errorf("Truncated width %d exceeds max %d", displayWidth(result), 15)
}
},
},
{
name: "Truncate with CJK characters",
input: "非常长的中文文件名称.txt",
maxWidth: 20,
check: func(t *testing.T, result string) {
if !strings.Contains(result, "...") {
t.Errorf("Should truncate CJK string, got %q", result)
}
if displayWidth(result) > 20 {
t.Errorf("Truncated width %d exceeds max %d", displayWidth(result), 20)
}
},
},
{
name: "Very small width",
input: "longname",
maxWidth: 5,
check: func(t *testing.T, result string) {
if displayWidth(result) > 5 {
t.Errorf("Width %d exceeds max %d", displayWidth(result), 5)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := truncateMiddle(tt.input, tt.maxWidth)
tt.check(t, result)
})
}
}
func TestDisplayPath(t *testing.T) {
tests := []struct {
name string
setup func() string
check func(t *testing.T, result string)
}{
{
name: "Replace home directory",
setup: func() string {
home := t.TempDir()
t.Setenv("HOME", home)
return home + "/Documents/file.txt"
},
check: func(t *testing.T, result string) {
if !strings.HasPrefix(result, "~/") {
t.Errorf("Expected path to start with ~/, got %q", result)
}
if !strings.HasSuffix(result, "Documents/file.txt") {
t.Errorf("Expected path to end with Documents/file.txt, got %q", result)
}
},
},
{
name: "Keep absolute path outside home",
setup: func() string {
t.Setenv("HOME", "/Users/test")
return "/var/log/system.log"
},
check: func(t *testing.T, result string) {
if result != "/var/log/system.log" {
t.Errorf("Expected unchanged path, got %q", result)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
path := tt.setup()
result := displayPath(path)
tt.check(t, result)
})
}
}
func TestPadName(t *testing.T) {
tests := []struct {
name string
input string
targetWidth int
wantWidth int
}{
{"Pad ASCII", "test", 10, 10},
{"No padding needed", "longname", 5, 8},
{"Pad CJK", "中文", 10, 10},
{"Mixed CJK and ASCII", "hello世", 15, 15},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := padName(tt.input, tt.targetWidth)
gotWidth := displayWidth(result)
if gotWidth < tt.wantWidth && displayWidth(tt.input) < tt.targetWidth {
t.Errorf("padName(%q, %d) width = %d, want >= %d", tt.input, tt.targetWidth, gotWidth, tt.wantWidth)
}
})
}
}
func TestTrimNameWithWidth(t *testing.T) {
tests := []struct {
name string
input string
maxWidth int
check func(t *testing.T, result string)
}{
{
name: "Trim ASCII name",
input: "verylongfilename.txt",
maxWidth: 10,
check: func(t *testing.T, result string) {
if displayWidth(result) > 10 {
t.Errorf("Width exceeds max: %d > 10", displayWidth(result))
}
if !strings.HasSuffix(result, "...") {
t.Errorf("Expected ellipsis, got %q", result)
}
},
},
{
name: "Trim CJK name",
input: "很长的文件名称.txt",
maxWidth: 12,
check: func(t *testing.T, result string) {
if displayWidth(result) > 12 {
t.Errorf("Width exceeds max: %d > 12", displayWidth(result))
}
},
},
{
name: "No trimming needed",
input: "short.txt",
maxWidth: 20,
check: func(t *testing.T, result string) {
if result != "short.txt" {
t.Errorf("Should not trim, got %q", result)
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := trimNameWithWidth(tt.input, tt.maxWidth)
tt.check(t, result)
})
}
}
func TestCalculateNameWidth(t *testing.T) {
tests := []struct {
termWidth int
wantMin int
wantMax int
}{
{80, 19, 60}, // 80 - 61 = 19
{120, 59, 60}, // 120 - 61 = 59
{200, 60, 60}, // Capped at 60
{70, 24, 60}, // Below minimum, use 24
{50, 24, 60}, // Very small, use minimum
}
for _, tt := range tests {
got := calculateNameWidth(tt.termWidth)
if got < tt.wantMin || got > tt.wantMax {
t.Errorf("calculateNameWidth(%d) = %d, want between %d and %d",
tt.termWidth, got, tt.wantMin, tt.wantMax)
}
}
}

View File

@@ -1,39 +0,0 @@
package main
// entryHeap is a min-heap of dirEntry used to keep Top N largest entries.
type entryHeap []dirEntry
func (h entryHeap) Len() int { return len(h) }
func (h entryHeap) Less(i, j int) bool { return h[i].Size < h[j].Size }
func (h entryHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *entryHeap) Push(x any) {
*h = append(*h, x.(dirEntry))
}
func (h *entryHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// largeFileHeap is a min-heap for fileEntry.
type largeFileHeap []fileEntry
func (h largeFileHeap) Len() int { return len(h) }
func (h largeFileHeap) Less(i, j int) bool { return h[i].Size < h[j].Size }
func (h largeFileHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *largeFileHeap) Push(x any) {
*h = append(*h, x.(fileEntry))
}
func (h *largeFileHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,663 +0,0 @@
package main
import (
"bytes"
"container/heap"
"context"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"golang.org/x/sync/singleflight"
)
var scanGroup singleflight.Group
func scanPathConcurrent(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) (scanResult, error) {
children, err := os.ReadDir(root)
if err != nil {
return scanResult{}, err
}
var total int64
// Keep Top N heaps.
entriesHeap := &entryHeap{}
heap.Init(entriesHeap)
largeFilesHeap := &largeFileHeap{}
heap.Init(largeFilesHeap)
// Worker pool sized for I/O-bound scanning.
numWorkers := max(runtime.NumCPU()*cpuMultiplier, minWorkers)
if numWorkers > maxWorkers {
numWorkers = maxWorkers
}
if numWorkers > len(children) {
numWorkers = len(children)
}
if numWorkers < 1 {
numWorkers = 1
}
sem := make(chan struct{}, numWorkers)
var wg sync.WaitGroup
// Collect results via channels.
entryChan := make(chan dirEntry, len(children))
largeFileChan := make(chan fileEntry, maxLargeFiles*2)
var collectorWg sync.WaitGroup
collectorWg.Add(2)
go func() {
defer collectorWg.Done()
for entry := range entryChan {
if entriesHeap.Len() < maxEntries {
heap.Push(entriesHeap, entry)
} else if entry.Size > (*entriesHeap)[0].Size {
heap.Pop(entriesHeap)
heap.Push(entriesHeap, entry)
}
}
}()
go func() {
defer collectorWg.Done()
for file := range largeFileChan {
if largeFilesHeap.Len() < maxLargeFiles {
heap.Push(largeFilesHeap, file)
} else if file.Size > (*largeFilesHeap)[0].Size {
heap.Pop(largeFilesHeap)
heap.Push(largeFilesHeap, file)
}
}
}()
isRootDir := root == "/"
home := os.Getenv("HOME")
isHomeDir := home != "" && root == home
for _, child := range children {
fullPath := filepath.Join(root, child.Name())
// Skip symlinks to avoid following unexpected targets.
if child.Type()&fs.ModeSymlink != 0 {
targetInfo, err := os.Stat(fullPath)
isDir := false
if err == nil && targetInfo.IsDir() {
isDir = true
}
// Count link size only to avoid double-counting targets.
info, err := child.Info()
if err != nil {
continue
}
size := getActualFileSize(fullPath, info)
atomic.AddInt64(&total, size)
entryChan <- dirEntry{
Name: child.Name() + " →",
Path: fullPath,
Size: size,
IsDir: isDir,
LastAccess: getLastAccessTimeFromInfo(info),
}
continue
}
if child.IsDir() {
if defaultSkipDirs[child.Name()] {
continue
}
// Skip system dirs at root.
if isRootDir && skipSystemDirs[child.Name()] {
continue
}
// ~/Library is scanned separately; reuse cache when possible.
if isHomeDir && child.Name() == "Library" {
wg.Add(1)
go func(name, path string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
var size int64
if cached, err := loadStoredOverviewSize(path); err == nil && cached > 0 {
size = cached
} else if cached, err := loadCacheFromDisk(path); err == nil {
size = cached.TotalSize
} else {
size = calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
}
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
entryChan <- dirEntry{
Name: name,
Path: path,
Size: size,
IsDir: true,
LastAccess: time.Time{},
}
}(child.Name(), fullPath)
continue
}
// Folded dirs: fast size without expanding.
if shouldFoldDirWithPath(child.Name(), fullPath) {
wg.Add(1)
go func(name, path string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
size, err := getDirectorySizeFromDu(path)
if err != nil || size <= 0 {
size = calculateDirSizeFast(path, filesScanned, dirsScanned, bytesScanned, currentPath)
}
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
entryChan <- dirEntry{
Name: name,
Path: path,
Size: size,
IsDir: true,
LastAccess: time.Time{},
}
}(child.Name(), fullPath)
continue
}
wg.Add(1)
go func(name, path string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
size := calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
entryChan <- dirEntry{
Name: name,
Path: path,
Size: size,
IsDir: true,
LastAccess: time.Time{},
}
}(child.Name(), fullPath)
continue
}
info, err := child.Info()
if err != nil {
continue
}
// Actual disk usage for sparse/cloud files.
size := getActualFileSize(fullPath, info)
atomic.AddInt64(&total, size)
atomic.AddInt64(filesScanned, 1)
atomic.AddInt64(bytesScanned, size)
entryChan <- dirEntry{
Name: child.Name(),
Path: fullPath,
Size: size,
IsDir: false,
LastAccess: getLastAccessTimeFromInfo(info),
}
// Track large files only.
if !shouldSkipFileForLargeTracking(fullPath) && size >= minLargeFileSize {
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
}
}
wg.Wait()
// Close channels and wait for collectors.
close(entryChan)
close(largeFileChan)
collectorWg.Wait()
// Convert heaps to sorted slices (descending).
entries := make([]dirEntry, entriesHeap.Len())
for i := len(entries) - 1; i >= 0; i-- {
entries[i] = heap.Pop(entriesHeap).(dirEntry)
}
largeFiles := make([]fileEntry, largeFilesHeap.Len())
for i := len(largeFiles) - 1; i >= 0; i-- {
largeFiles[i] = heap.Pop(largeFilesHeap).(fileEntry)
}
// Use Spotlight for large files when available.
if spotlightFiles := findLargeFilesWithSpotlight(root, minLargeFileSize); len(spotlightFiles) > 0 {
largeFiles = spotlightFiles
}
return scanResult{
Entries: entries,
LargeFiles: largeFiles,
TotalSize: total,
TotalFiles: atomic.LoadInt64(filesScanned),
}, nil
}
func shouldFoldDirWithPath(name, path string) bool {
if foldDirs[name] {
return true
}
// Handle npm cache structure.
if strings.Contains(path, "/.npm/") || strings.Contains(path, "/.tnpm/") {
parent := filepath.Base(filepath.Dir(path))
if parent == ".npm" || parent == ".tnpm" || strings.HasPrefix(parent, "_") {
return true
}
if len(name) == 1 {
return true
}
}
return false
}
func shouldSkipFileForLargeTracking(path string) bool {
ext := strings.ToLower(filepath.Ext(path))
return skipExtensions[ext]
}
// calculateDirSizeFast performs concurrent dir sizing using os.ReadDir.
func calculateDirSizeFast(root string, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
var total int64
var wg sync.WaitGroup
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
concurrency := min(runtime.NumCPU()*4, 64)
sem := make(chan struct{}, concurrency)
var walk func(string)
walk = func(dirPath string) {
select {
case <-ctx.Done():
return
default:
}
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
*currentPath = dirPath
}
entries, err := os.ReadDir(dirPath)
if err != nil {
return
}
var localBytes, localFiles int64
for _, entry := range entries {
if entry.IsDir() {
wg.Add(1)
subDir := filepath.Join(dirPath, entry.Name())
go func(p string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
walk(p)
}(subDir)
atomic.AddInt64(dirsScanned, 1)
} else {
info, err := entry.Info()
if err == nil {
size := getActualFileSize(filepath.Join(dirPath, entry.Name()), info)
localBytes += size
localFiles++
}
}
}
if localBytes > 0 {
atomic.AddInt64(&total, localBytes)
atomic.AddInt64(bytesScanned, localBytes)
}
if localFiles > 0 {
atomic.AddInt64(filesScanned, localFiles)
}
}
walk(root)
wg.Wait()
return total
}
// Use Spotlight (mdfind) to quickly find large files.
func findLargeFilesWithSpotlight(root string, minSize int64) []fileEntry {
query := fmt.Sprintf("kMDItemFSSize >= %d", minSize)
ctx, cancel := context.WithTimeout(context.Background(), mdlsTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, "mdfind", "-onlyin", root, query)
output, err := cmd.Output()
if err != nil {
return nil
}
var files []fileEntry
for line := range strings.Lines(strings.TrimSpace(string(output))) {
if line == "" {
continue
}
// Filter code files first (cheap).
if shouldSkipFileForLargeTracking(line) {
continue
}
// Filter folded directories (cheap string check).
if isInFoldedDir(line) {
continue
}
info, err := os.Lstat(line)
if err != nil {
continue
}
if info.IsDir() || info.Mode()&os.ModeSymlink != 0 {
continue
}
// Actual disk usage for sparse/cloud files.
actualSize := getActualFileSize(line, info)
files = append(files, fileEntry{
Name: filepath.Base(line),
Path: line,
Size: actualSize,
})
}
// Sort by size (descending).
sort.Slice(files, func(i, j int) bool {
return files[i].Size > files[j].Size
})
if len(files) > maxLargeFiles {
files = files[:maxLargeFiles]
}
return files
}
// isInFoldedDir checks if a path is inside a folded directory.
func isInFoldedDir(path string) bool {
parts := strings.SplitSeq(path, string(os.PathSeparator))
for part := range parts {
if foldDirs[part] {
return true
}
}
return false
}
func calculateDirSizeConcurrent(root string, largeFileChan chan<- fileEntry, filesScanned, dirsScanned, bytesScanned *int64, currentPath *string) int64 {
children, err := os.ReadDir(root)
if err != nil {
return 0
}
var total int64
var wg sync.WaitGroup
// Limit concurrent subdirectory scans.
maxConcurrent := min(runtime.NumCPU()*2, maxDirWorkers)
sem := make(chan struct{}, maxConcurrent)
for _, child := range children {
fullPath := filepath.Join(root, child.Name())
if child.Type()&fs.ModeSymlink != 0 {
info, err := child.Info()
if err != nil {
continue
}
size := getActualFileSize(fullPath, info)
total += size
atomic.AddInt64(filesScanned, 1)
atomic.AddInt64(bytesScanned, size)
continue
}
if child.IsDir() {
if shouldFoldDirWithPath(child.Name(), fullPath) {
wg.Add(1)
go func(path string) {
defer wg.Done()
size, err := getDirectorySizeFromDu(path)
if err == nil && size > 0 {
atomic.AddInt64(&total, size)
atomic.AddInt64(bytesScanned, size)
atomic.AddInt64(dirsScanned, 1)
}
}(fullPath)
continue
}
wg.Add(1)
go func(path string) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
size := calculateDirSizeConcurrent(path, largeFileChan, filesScanned, dirsScanned, bytesScanned, currentPath)
atomic.AddInt64(&total, size)
atomic.AddInt64(dirsScanned, 1)
}(fullPath)
continue
}
info, err := child.Info()
if err != nil {
continue
}
size := getActualFileSize(fullPath, info)
total += size
atomic.AddInt64(filesScanned, 1)
atomic.AddInt64(bytesScanned, size)
if !shouldSkipFileForLargeTracking(fullPath) && size >= minLargeFileSize {
largeFileChan <- fileEntry{Name: child.Name(), Path: fullPath, Size: size}
}
// Update current path occasionally to prevent UI jitter.
if currentPath != nil && atomic.LoadInt64(filesScanned)%int64(batchUpdateSize) == 0 {
*currentPath = fullPath
}
}
wg.Wait()
return total
}
// measureOverviewSize calculates the size of a directory using multiple strategies.
// When scanning Home, it excludes ~/Library to avoid duplicate counting.
func measureOverviewSize(path string) (int64, error) {
if path == "" {
return 0, fmt.Errorf("empty path")
}
path = filepath.Clean(path)
if !filepath.IsAbs(path) {
return 0, fmt.Errorf("path must be absolute: %s", path)
}
if _, err := os.Stat(path); err != nil {
return 0, fmt.Errorf("cannot access path: %v", err)
}
// Determine if we should exclude ~/Library (when scanning Home)
home := os.Getenv("HOME")
excludePath := ""
if home != "" && path == home {
excludePath = filepath.Join(home, "Library")
}
if cached, err := loadStoredOverviewSize(path); err == nil && cached > 0 {
return cached, nil
}
if duSize, err := getDirectorySizeFromDuWithExclude(path, excludePath); err == nil && duSize > 0 {
_ = storeOverviewSize(path, duSize)
return duSize, nil
}
if logicalSize, err := getDirectoryLogicalSizeWithExclude(path, excludePath); err == nil && logicalSize > 0 {
_ = storeOverviewSize(path, logicalSize)
return logicalSize, nil
}
if cached, err := loadCacheFromDisk(path); err == nil {
_ = storeOverviewSize(path, cached.TotalSize)
return cached.TotalSize, nil
}
return 0, fmt.Errorf("unable to measure directory size with fast methods")
}
func getDirectorySizeFromDu(path string) (int64, error) {
return getDirectorySizeFromDuWithExclude(path, "")
}
func getDirectorySizeFromDuWithExclude(path string, excludePath string) (int64, error) {
runDuSize := func(target string) (int64, error) {
if _, err := os.Stat(target); err != nil {
return 0, err
}
ctx, cancel := context.WithTimeout(context.Background(), duTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, "du", "-sk", target)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
if ctx.Err() == context.DeadlineExceeded {
return 0, fmt.Errorf("du timeout after %v", duTimeout)
}
if stderr.Len() > 0 {
return 0, fmt.Errorf("du failed: %v (%s)", err, stderr.String())
}
return 0, fmt.Errorf("du failed: %v", err)
}
fields := strings.Fields(stdout.String())
if len(fields) == 0 {
return 0, fmt.Errorf("du output empty")
}
kb, err := strconv.ParseInt(fields[0], 10, 64)
if err != nil {
return 0, fmt.Errorf("failed to parse du output: %v", err)
}
if kb <= 0 {
return 0, fmt.Errorf("du size invalid: %d", kb)
}
return kb * 1024, nil
}
// When excluding a path (e.g., ~/Library), subtract only that exact directory instead of ignoring every "Library"
if excludePath != "" {
totalSize, err := runDuSize(path)
if err != nil {
return 0, err
}
excludeSize, err := runDuSize(excludePath)
if err != nil {
if !os.IsNotExist(err) {
return 0, err
}
excludeSize = 0
}
if excludeSize > totalSize {
excludeSize = 0
}
return totalSize - excludeSize, nil
}
return runDuSize(path)
}
func getDirectoryLogicalSizeWithExclude(path string, excludePath string) (int64, error) {
var total int64
err := filepath.WalkDir(path, func(p string, d fs.DirEntry, err error) error {
if err != nil {
if os.IsPermission(err) {
return filepath.SkipDir
}
return nil
}
// Skip excluded path
if excludePath != "" && p == excludePath {
return filepath.SkipDir
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return nil
}
total += getActualFileSize(p, info)
return nil
})
if err != nil && err != filepath.SkipDir {
return 0, err
}
return total, nil
}
func getActualFileSize(_ string, info fs.FileInfo) int64 {
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return info.Size()
}
actualSize := stat.Blocks * 512
if actualSize < info.Size() {
return actualSize
}
return info.Size()
}
func getLastAccessTime(path string) time.Time {
info, err := os.Stat(path)
if err != nil {
return time.Time{}
}
return getLastAccessTimeFromInfo(info)
}
func getLastAccessTimeFromInfo(info fs.FileInfo) time.Time {
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return time.Time{}
}
return time.Unix(stat.Atimespec.Sec, stat.Atimespec.Nsec)
}

View File

@@ -1,45 +0,0 @@
package main
import (
"os"
"path/filepath"
"testing"
)
func writeFileWithSize(t *testing.T, path string, size int) {
t.Helper()
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
t.Fatalf("mkdir %s: %v", path, err)
}
content := make([]byte, size)
if err := os.WriteFile(path, content, 0o644); err != nil {
t.Fatalf("write %s: %v", path, err)
}
}
func TestGetDirectoryLogicalSizeWithExclude(t *testing.T) {
base := t.TempDir()
homeFile := filepath.Join(base, "fileA")
libFile := filepath.Join(base, "Library", "fileB")
projectLibFile := filepath.Join(base, "Projects", "Library", "fileC")
writeFileWithSize(t, homeFile, 100)
writeFileWithSize(t, libFile, 200)
writeFileWithSize(t, projectLibFile, 300)
total, err := getDirectoryLogicalSizeWithExclude(base, "")
if err != nil {
t.Fatalf("getDirectoryLogicalSizeWithExclude (no exclude) error: %v", err)
}
if total != 600 {
t.Fatalf("expected total 600 bytes, got %d", total)
}
excluding, err := getDirectoryLogicalSizeWithExclude(base, filepath.Join(base, "Library"))
if err != nil {
t.Fatalf("getDirectoryLogicalSizeWithExclude (exclude Library) error: %v", err)
}
if excluding != 400 {
t.Fatalf("expected 400 bytes when excluding top-level Library, got %d", excluding)
}
}

View File

@@ -1,428 +0,0 @@
//go:build darwin
package main
import (
"fmt"
"strings"
"sync/atomic"
)
// View renders the TUI.
func (m model) View() string {
var b strings.Builder
fmt.Fprintln(&b)
if m.inOverviewMode() {
fmt.Fprintf(&b, "%sAnalyze Disk%s\n", colorPurpleBold, colorReset)
if m.overviewScanning {
allPending := true
for _, entry := range m.entries {
if entry.Size >= 0 {
allPending = false
break
}
}
if allPending {
fmt.Fprintf(&b, "%s%s%s%s Analyzing disk usage, please wait...%s\n",
colorCyan, colorBold,
spinnerFrames[m.spinner],
colorReset, colorReset)
return b.String()
} else {
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
fmt.Fprintf(&b, "%s%s%s%s Scanning...\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset)
}
} else {
hasPending := false
for _, entry := range m.entries {
if entry.Size < 0 {
hasPending = true
break
}
}
if hasPending {
fmt.Fprintf(&b, "%sSelect a location to explore:%s ", colorGray, colorReset)
fmt.Fprintf(&b, "%s%s%s%s Scanning...\n\n", colorCyan, colorBold, spinnerFrames[m.spinner], colorReset)
} else {
fmt.Fprintf(&b, "%sSelect a location to explore:%s\n\n", colorGray, colorReset)
}
}
} else {
fmt.Fprintf(&b, "%sAnalyze Disk%s %s%s%s", colorPurpleBold, colorReset, colorGray, displayPath(m.path), colorReset)
if !m.scanning {
fmt.Fprintf(&b, " | Total: %s", humanizeBytes(m.totalSize))
}
fmt.Fprintf(&b, "\n\n")
}
if m.deleting {
count := int64(0)
if m.deleteCount != nil {
count = atomic.LoadInt64(m.deleteCount)
}
fmt.Fprintf(&b, "%s%s%s%s Deleting: %s%s items%s removed, please wait...\n",
colorCyan, colorBold,
spinnerFrames[m.spinner],
colorReset,
colorYellow, formatNumber(count), colorReset)
return b.String()
}
if m.scanning {
filesScanned, dirsScanned, bytesScanned := m.getScanProgress()
progressPrefix := ""
if m.lastTotalFiles > 0 {
percent := float64(filesScanned) / float64(m.lastTotalFiles) * 100
// Cap at 100% generally
if percent > 100 {
percent = 100
}
// While strictly scanning, cap at 99% to avoid "100% but still working" confusion
if m.scanning && percent >= 100 {
percent = 99
}
progressPrefix = fmt.Sprintf(" %s(%.0f%%)%s", colorCyan, percent, colorReset)
}
fmt.Fprintf(&b, "%s%s%s%s Scanning%s: %s%s files%s, %s%s dirs%s, %s%s%s\n",
colorCyan, colorBold,
spinnerFrames[m.spinner],
colorReset,
progressPrefix,
colorYellow, formatNumber(filesScanned), colorReset,
colorYellow, formatNumber(dirsScanned), colorReset,
colorGreen, humanizeBytes(bytesScanned), colorReset)
if m.currentPath != nil {
currentPath := *m.currentPath
if currentPath != "" {
shortPath := displayPath(currentPath)
shortPath = truncateMiddle(shortPath, 50)
fmt.Fprintf(&b, "%s%s%s\n", colorGray, shortPath, colorReset)
}
}
return b.String()
}
if m.showLargeFiles {
if len(m.largeFiles) == 0 {
fmt.Fprintln(&b, " No large files found (>=100MB)")
} else {
viewport := calculateViewport(m.height, true)
start := max(m.largeOffset, 0)
end := min(start+viewport, len(m.largeFiles))
maxLargeSize := int64(1)
for _, file := range m.largeFiles {
if file.Size > maxLargeSize {
maxLargeSize = file.Size
}
}
nameWidth := calculateNameWidth(m.width)
for idx := start; idx < end; idx++ {
file := m.largeFiles[idx]
shortPath := displayPath(file.Path)
shortPath = truncateMiddle(shortPath, nameWidth)
paddedPath := padName(shortPath, nameWidth)
entryPrefix := " "
nameColor := ""
sizeColor := colorGray
numColor := ""
isMultiSelected := m.largeMultiSelected != nil && m.largeMultiSelected[file.Path]
selectIcon := "○"
if isMultiSelected {
selectIcon = fmt.Sprintf("%s●%s", colorGreen, colorReset)
nameColor = colorGreen
}
if idx == m.largeSelected {
entryPrefix = fmt.Sprintf(" %s%s▶%s ", colorCyan, colorBold, colorReset)
if !isMultiSelected {
nameColor = colorCyan
}
sizeColor = colorCyan
numColor = colorCyan
}
size := humanizeBytes(file.Size)
bar := coloredProgressBar(file.Size, maxLargeSize, 0)
fmt.Fprintf(&b, "%s%s %s%2d.%s %s | 📄 %s%s%s %s%10s%s\n",
entryPrefix, selectIcon, numColor, idx+1, colorReset, bar, nameColor, paddedPath, colorReset, sizeColor, size, colorReset)
}
}
} else {
if len(m.entries) == 0 {
fmt.Fprintln(&b, " Empty directory")
} else {
if m.inOverviewMode() {
maxSize := int64(1)
for _, entry := range m.entries {
if entry.Size > maxSize {
maxSize = entry.Size
}
}
totalSize := m.totalSize
// Overview paths are short; fixed width keeps layout stable.
nameWidth := 20
for idx, entry := range m.entries {
icon := "📁"
sizeVal := entry.Size
barValue := max(sizeVal, 0)
var percent float64
if totalSize > 0 && sizeVal >= 0 {
percent = float64(sizeVal) / float64(totalSize) * 100
} else {
percent = 0
}
percentStr := fmt.Sprintf("%5.1f%%", percent)
if totalSize == 0 || sizeVal < 0 {
percentStr = " -- "
}
bar := coloredProgressBar(barValue, maxSize, percent)
sizeText := "pending.."
if sizeVal >= 0 {
sizeText = humanizeBytes(sizeVal)
}
sizeColor := colorGray
if sizeVal >= 0 && totalSize > 0 {
switch {
case percent >= 50:
sizeColor = colorRed
case percent >= 20:
sizeColor = colorYellow
case percent >= 5:
sizeColor = colorBlue
default:
sizeColor = colorGray
}
}
entryPrefix := " "
name := trimNameWithWidth(entry.Name, nameWidth)
paddedName := padName(name, nameWidth)
nameSegment := fmt.Sprintf("%s %s", icon, paddedName)
numColor := ""
percentColor := ""
if idx == m.selected {
entryPrefix = fmt.Sprintf(" %s%s▶%s ", colorCyan, colorBold, colorReset)
nameSegment = fmt.Sprintf("%s%s %s%s", colorCyan, icon, paddedName, colorReset)
numColor = colorCyan
percentColor = colorCyan
sizeColor = colorCyan
}
displayIndex := idx + 1
var hintLabel string
if entry.IsDir && isCleanableDir(entry.Path) {
hintLabel = fmt.Sprintf("%s🧹%s", colorYellow, colorReset)
} else {
lastAccess := entry.LastAccess
if lastAccess.IsZero() && entry.Path != "" {
lastAccess = getLastAccessTime(entry.Path)
}
if unusedTime := formatUnusedTime(lastAccess); unusedTime != "" {
hintLabel = fmt.Sprintf("%s%s%s", colorGray, unusedTime, colorReset)
}
}
if hintLabel == "" {
fmt.Fprintf(&b, "%s%s%2d.%s %s %s%s%s | %s %s%10s%s\n",
entryPrefix, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
nameSegment, sizeColor, sizeText, colorReset)
} else {
fmt.Fprintf(&b, "%s%s%2d.%s %s %s%s%s | %s %s%10s%s %s\n",
entryPrefix, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
nameSegment, sizeColor, sizeText, colorReset, hintLabel)
}
}
} else {
maxSize := int64(1)
for _, entry := range m.entries {
if entry.Size > maxSize {
maxSize = entry.Size
}
}
viewport := calculateViewport(m.height, false)
nameWidth := calculateNameWidth(m.width)
start := max(m.offset, 0)
end := min(start+viewport, len(m.entries))
for idx := start; idx < end; idx++ {
entry := m.entries[idx]
icon := "📄"
if entry.IsDir {
icon = "📁"
}
size := humanizeBytes(entry.Size)
name := trimNameWithWidth(entry.Name, nameWidth)
paddedName := padName(name, nameWidth)
percent := float64(entry.Size) / float64(m.totalSize) * 100
percentStr := fmt.Sprintf("%5.1f%%", percent)
bar := coloredProgressBar(entry.Size, maxSize, percent)
var sizeColor string
if percent >= 50 {
sizeColor = colorRed
} else if percent >= 20 {
sizeColor = colorYellow
} else if percent >= 5 {
sizeColor = colorBlue
} else {
sizeColor = colorGray
}
isMultiSelected := m.multiSelected != nil && m.multiSelected[entry.Path]
selectIcon := "○"
nameColor := ""
if isMultiSelected {
selectIcon = fmt.Sprintf("%s●%s", colorGreen, colorReset)
nameColor = colorGreen
}
entryPrefix := " "
nameSegment := fmt.Sprintf("%s %s", icon, paddedName)
if nameColor != "" {
nameSegment = fmt.Sprintf("%s%s %s%s", nameColor, icon, paddedName, colorReset)
}
numColor := ""
percentColor := ""
if idx == m.selected {
entryPrefix = fmt.Sprintf(" %s%s▶%s ", colorCyan, colorBold, colorReset)
if !isMultiSelected {
nameSegment = fmt.Sprintf("%s%s %s%s", colorCyan, icon, paddedName, colorReset)
}
numColor = colorCyan
percentColor = colorCyan
sizeColor = colorCyan
}
displayIndex := idx + 1
var hintLabel string
if entry.IsDir && isCleanableDir(entry.Path) {
hintLabel = fmt.Sprintf("%s🧹%s", colorYellow, colorReset)
} else {
lastAccess := entry.LastAccess
if lastAccess.IsZero() && entry.Path != "" {
lastAccess = getLastAccessTime(entry.Path)
}
if unusedTime := formatUnusedTime(lastAccess); unusedTime != "" {
hintLabel = fmt.Sprintf("%s%s%s", colorGray, unusedTime, colorReset)
}
}
if hintLabel == "" {
fmt.Fprintf(&b, "%s%s %s%2d.%s %s %s%s%s | %s %s%10s%s\n",
entryPrefix, selectIcon, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
nameSegment, sizeColor, size, colorReset)
} else {
fmt.Fprintf(&b, "%s%s %s%2d.%s %s %s%s%s | %s %s%10s%s %s\n",
entryPrefix, selectIcon, numColor, displayIndex, colorReset, bar, percentColor, percentStr, colorReset,
nameSegment, sizeColor, size, colorReset, hintLabel)
}
}
}
}
}
fmt.Fprintln(&b)
if m.inOverviewMode() {
if len(m.history) > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Enter | R Refresh | O Open | F File | ← Back | Q Quit%s\n", colorGray, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓→ | Enter | R Refresh | O Open | F File | Q Quit%s\n", colorGray, colorReset)
}
} else if m.showLargeFiles {
selectCount := len(m.largeMultiSelected)
if selectCount > 0 {
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del(%d) | ← Back | Q Quit%s\n", colorGray, selectCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓← | Space Select | R Refresh | O Open | F File | ⌫ Del | ← Back | Q Quit%s\n", colorGray, colorReset)
}
} else {
largeFileCount := len(m.largeFiles)
selectCount := len(m.multiSelected)
if selectCount > 0 {
if largeFileCount > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del(%d) | T Top(%d) | Q Quit%s\n", colorGray, selectCount, largeFileCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del(%d) | Q Quit%s\n", colorGray, selectCount, colorReset)
}
} else {
if largeFileCount > 0 {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | T Top(%d) | Q Quit%s\n", colorGray, largeFileCount, colorReset)
} else {
fmt.Fprintf(&b, "%s↑↓←→ | Space Select | Enter | R Refresh | O Open | F File | ⌫ Del | Q Quit%s\n", colorGray, colorReset)
}
}
}
if m.deleteConfirm && m.deleteTarget != nil {
fmt.Fprintln(&b)
var deleteCount int
var totalDeleteSize int64
if m.showLargeFiles && len(m.largeMultiSelected) > 0 {
deleteCount = len(m.largeMultiSelected)
for path := range m.largeMultiSelected {
for _, file := range m.largeFiles {
if file.Path == path {
totalDeleteSize += file.Size
break
}
}
}
} else if !m.showLargeFiles && len(m.multiSelected) > 0 {
deleteCount = len(m.multiSelected)
for path := range m.multiSelected {
for _, entry := range m.entries {
if entry.Path == path {
totalDeleteSize += entry.Size
break
}
}
}
}
if deleteCount > 1 {
fmt.Fprintf(&b, "%sDelete:%s %d items (%s) %sPress Enter to confirm | ESC cancel%s\n",
colorRed, colorReset,
deleteCount, humanizeBytes(totalDeleteSize),
colorGray, colorReset)
} else {
fmt.Fprintf(&b, "%sDelete:%s %s (%s) %sPress Enter to confirm | ESC cancel%s\n",
colorRed, colorReset,
m.deleteTarget.Name, humanizeBytes(m.deleteTarget.Size),
colorGray, colorReset)
}
}
return b.String()
}
// calculateViewport returns visible rows for the current terminal height.
func calculateViewport(termHeight int, isLargeFiles bool) int {
if termHeight <= 0 {
return defaultViewport
}
reserved := 6 // Header + footer
if isLargeFiles {
reserved = 5
}
available := termHeight - reserved
if available < 1 {
return 1
}
if available > 30 {
return 30
}
return available
}

View File

@@ -1,200 +1,674 @@
// Package main provides the mo status command for real-time system monitoring.
//go:build windows
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"os/exec"
"runtime"
"strconv"
"strings"
"sync"
"time"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/shirou/gopsutil/v3/cpu"
"github.com/shirou/gopsutil/v3/disk"
"github.com/shirou/gopsutil/v3/host"
"github.com/shirou/gopsutil/v3/mem"
"github.com/shirou/gopsutil/v3/net"
"github.com/shirou/gopsutil/v3/process"
)
const refreshInterval = time.Second
// Styles
var (
Version = "dev"
BuildTime = ""
titleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#C79FD7")).Bold(true)
headerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#87CEEB")).Bold(true)
labelStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#888888"))
valueStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FFFFFF"))
okStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#A5D6A7"))
warnStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FFD75F"))
dangerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FF5F5F")).Bold(true)
dimStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#666666"))
cardStyle = lipgloss.NewStyle().Border(lipgloss.RoundedBorder()).BorderForeground(lipgloss.Color("#444444")).Padding(0, 1)
)
type tickMsg struct{}
type animTickMsg struct{}
// Metrics snapshot
type MetricsSnapshot struct {
CollectedAt time.Time
HealthScore int
HealthMessage string
type metricsMsg struct {
data MetricsSnapshot
err error
// Hardware
Hostname string
OS string
Platform string
Uptime time.Duration
// CPU
CPUModel string
CPUCores int
CPUPercent float64
CPUPerCore []float64
// Memory
MemTotal uint64
MemUsed uint64
MemPercent float64
SwapTotal uint64
SwapUsed uint64
SwapPercent float64
// Disk
Disks []DiskInfo
// Network
Networks []NetworkInfo
// Processes
TopProcesses []ProcessInfo
}
type DiskInfo struct {
Device string
Mountpoint string
Total uint64
Used uint64
Free uint64
UsedPercent float64
Fstype string
}
type NetworkInfo struct {
Name string
BytesSent uint64
BytesRecv uint64
PacketsSent uint64
PacketsRecv uint64
}
type ProcessInfo struct {
PID int32
Name string
CPU float64
Memory float32
}
// Collector
type Collector struct {
prevNet map[string]net.IOCountersStat
prevNetTime time.Time
mu sync.Mutex
}
func NewCollector() *Collector {
return &Collector{
prevNet: make(map[string]net.IOCountersStat),
}
}
func (c *Collector) Collect() MetricsSnapshot {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
var (
snapshot MetricsSnapshot
wg sync.WaitGroup
mu sync.Mutex
)
snapshot.CollectedAt = time.Now()
// Host info
wg.Add(1)
go func() {
defer wg.Done()
if info, err := host.InfoWithContext(ctx); err == nil {
mu.Lock()
snapshot.Hostname = info.Hostname
snapshot.OS = info.OS
snapshot.Platform = fmt.Sprintf("%s %s", info.Platform, info.PlatformVersion)
snapshot.Uptime = time.Duration(info.Uptime) * time.Second
mu.Unlock()
}
}()
// CPU info
wg.Add(1)
go func() {
defer wg.Done()
if cpuInfo, err := cpu.InfoWithContext(ctx); err == nil && len(cpuInfo) > 0 {
mu.Lock()
snapshot.CPUModel = cpuInfo[0].ModelName
snapshot.CPUCores = runtime.NumCPU()
mu.Unlock()
}
if percent, err := cpu.PercentWithContext(ctx, 500*time.Millisecond, false); err == nil && len(percent) > 0 {
mu.Lock()
snapshot.CPUPercent = percent[0]
mu.Unlock()
}
if perCore, err := cpu.PercentWithContext(ctx, 500*time.Millisecond, true); err == nil {
mu.Lock()
snapshot.CPUPerCore = perCore
mu.Unlock()
}
}()
// Memory
wg.Add(1)
go func() {
defer wg.Done()
if memInfo, err := mem.VirtualMemoryWithContext(ctx); err == nil {
mu.Lock()
snapshot.MemTotal = memInfo.Total
snapshot.MemUsed = memInfo.Used
snapshot.MemPercent = memInfo.UsedPercent
mu.Unlock()
}
if swapInfo, err := mem.SwapMemoryWithContext(ctx); err == nil {
mu.Lock()
snapshot.SwapTotal = swapInfo.Total
snapshot.SwapUsed = swapInfo.Used
snapshot.SwapPercent = swapInfo.UsedPercent
mu.Unlock()
}
}()
// Disk
wg.Add(1)
go func() {
defer wg.Done()
if partitions, err := disk.PartitionsWithContext(ctx, false); err == nil {
var disks []DiskInfo
for _, p := range partitions {
// Skip non-physical drives
if !strings.HasPrefix(p.Device, "C:") &&
!strings.HasPrefix(p.Device, "D:") &&
!strings.HasPrefix(p.Device, "E:") &&
!strings.HasPrefix(p.Device, "F:") {
continue
}
if usage, err := disk.UsageWithContext(ctx, p.Mountpoint); err == nil {
disks = append(disks, DiskInfo{
Device: p.Device,
Mountpoint: p.Mountpoint,
Total: usage.Total,
Used: usage.Used,
Free: usage.Free,
UsedPercent: usage.UsedPercent,
Fstype: p.Fstype,
})
}
}
mu.Lock()
snapshot.Disks = disks
mu.Unlock()
}
}()
// Network
wg.Add(1)
go func() {
defer wg.Done()
if netIO, err := net.IOCountersWithContext(ctx, true); err == nil {
var networks []NetworkInfo
for _, io := range netIO {
// Skip loopback and inactive interfaces
if io.Name == "Loopback Pseudo-Interface 1" || (io.BytesSent == 0 && io.BytesRecv == 0) {
continue
}
networks = append(networks, NetworkInfo{
Name: io.Name,
BytesSent: io.BytesSent,
BytesRecv: io.BytesRecv,
PacketsSent: io.PacketsSent,
PacketsRecv: io.PacketsRecv,
})
}
mu.Lock()
snapshot.Networks = networks
mu.Unlock()
}
}()
// Top Processes
wg.Add(1)
go func() {
defer wg.Done()
procs, err := process.ProcessesWithContext(ctx)
if err != nil {
return
}
var procInfos []ProcessInfo
for _, p := range procs {
name, err := p.NameWithContext(ctx)
if err != nil {
continue
}
cpuPercent, _ := p.CPUPercentWithContext(ctx)
memPercent, _ := p.MemoryPercentWithContext(ctx)
if cpuPercent > 0.1 || memPercent > 0.1 {
procInfos = append(procInfos, ProcessInfo{
PID: p.Pid,
Name: name,
CPU: cpuPercent,
Memory: memPercent,
})
}
}
// Sort by CPU usage
for i := 0; i < len(procInfos)-1; i++ {
for j := i + 1; j < len(procInfos); j++ {
if procInfos[j].CPU > procInfos[i].CPU {
procInfos[i], procInfos[j] = procInfos[j], procInfos[i]
}
}
}
// Take top 5
if len(procInfos) > 5 {
procInfos = procInfos[:5]
}
mu.Lock()
snapshot.TopProcesses = procInfos
mu.Unlock()
}()
wg.Wait()
// Calculate health score
snapshot.HealthScore, snapshot.HealthMessage = calculateHealthScore(snapshot)
return snapshot
}
func calculateHealthScore(s MetricsSnapshot) (int, string) {
score := 100
var issues []string
// CPU penalty (30% weight)
if s.CPUPercent > 90 {
score -= 30
issues = append(issues, "High CPU")
} else if s.CPUPercent > 70 {
score -= 15
issues = append(issues, "Elevated CPU")
}
// Memory penalty (25% weight)
if s.MemPercent > 90 {
score -= 25
issues = append(issues, "High Memory")
} else if s.MemPercent > 80 {
score -= 12
issues = append(issues, "Elevated Memory")
}
// Disk penalty (20% weight)
for _, d := range s.Disks {
if d.UsedPercent > 95 {
score -= 20
issues = append(issues, fmt.Sprintf("Disk %s Critical", d.Device))
break
} else if d.UsedPercent > 85 {
score -= 10
issues = append(issues, fmt.Sprintf("Disk %s Low", d.Device))
break
}
}
// Swap penalty (10% weight)
if s.SwapPercent > 80 {
score -= 10
issues = append(issues, "High Swap")
}
if score < 0 {
score = 0
}
msg := "Excellent"
if len(issues) > 0 {
msg = strings.Join(issues, ", ")
} else if score >= 90 {
msg = "Excellent"
} else if score >= 70 {
msg = "Good"
} else if score >= 50 {
msg = "Fair"
} else {
msg = "Poor"
}
return score, msg
}
// Model for Bubble Tea
type model struct {
collector *Collector
width int
height int
metrics MetricsSnapshot
errMessage string
ready bool
lastUpdated time.Time
collecting bool
animFrame int
catHidden bool // true = hidden, false = visible
collector *Collector
metrics MetricsSnapshot
animFrame int
catHidden bool
ready bool
collecting bool
width int
height int
}
// getConfigPath returns the path to the status preferences file.
func getConfigPath() string {
home, err := os.UserHomeDir()
if err != nil {
return ""
}
return filepath.Join(home, ".config", "mole", "status_prefs")
}
// loadCatHidden loads the cat hidden preference from config file.
func loadCatHidden() bool {
path := getConfigPath()
if path == "" {
return false
}
data, err := os.ReadFile(path)
if err != nil {
return false
}
return strings.TrimSpace(string(data)) == "cat_hidden=true"
}
// saveCatHidden saves the cat hidden preference to config file.
func saveCatHidden(hidden bool) {
path := getConfigPath()
if path == "" {
return
}
// Ensure directory exists
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return
}
value := "cat_hidden=false"
if hidden {
value = "cat_hidden=true"
}
_ = os.WriteFile(path, []byte(value+"\n"), 0644)
}
// Messages
type tickMsg time.Time
type metricsMsg MetricsSnapshot
func newModel() model {
return model{
collector: NewCollector(),
catHidden: loadCatHidden(),
animFrame: 0,
}
}
func (m model) Init() tea.Cmd {
return tea.Batch(tickAfter(0), animTick())
return tea.Batch(
m.collectMetrics(),
tickCmd(),
)
}
func tickCmd() tea.Cmd {
return tea.Tick(time.Second, func(t time.Time) tea.Msg {
return tickMsg(t)
})
}
func (m model) collectMetrics() tea.Cmd {
return func() tea.Msg {
return metricsMsg(m.collector.Collect())
}
}
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "q", "esc", "ctrl+c":
case "q", "ctrl+c":
return m, tea.Quit
case "k":
// Toggle cat visibility and persist preference
case "c":
m.catHidden = !m.catHidden
saveCatHidden(m.catHidden)
return m, nil
case "r":
m.collecting = true
return m, m.collectMetrics()
}
case tea.WindowSizeMsg:
m.width = msg.Width
m.height = msg.Height
return m, nil
case tickMsg:
if m.collecting {
return m, nil
}
m.collecting = true
return m, m.collectCmd()
case metricsMsg:
if msg.err != nil {
m.errMessage = msg.err.Error()
} else {
m.errMessage = ""
}
m.metrics = msg.data
m.lastUpdated = msg.data.CollectedAt
m.collecting = false
// Mark ready after first successful data collection.
if !m.ready {
m.ready = true
}
return m, tickAfter(refreshInterval)
case animTickMsg:
m.animFrame++
return m, animTickWithSpeed(m.metrics.CPU.Usage)
if m.animFrame%2 == 0 && !m.collecting {
return m, tea.Batch(
m.collectMetrics(),
tickCmd(),
)
}
return m, tickCmd()
case metricsMsg:
m.metrics = MetricsSnapshot(msg)
m.ready = true
m.collecting = false
}
return m, nil
}
func (m model) View() string {
if !m.ready {
return "Loading..."
return "\n Loading system metrics..."
}
header := renderHeader(m.metrics, m.errMessage, m.animFrame, m.width, m.catHidden)
cardWidth := 0
if m.width > 80 {
cardWidth = maxInt(24, m.width/2-4)
}
cards := buildCards(m.metrics, cardWidth)
var b strings.Builder
if m.width <= 80 {
var rendered []string
for i, c := range cards {
if i > 0 {
rendered = append(rendered, "")
// Header with mole animation
moleFrame := getMoleFrame(m.animFrame, m.catHidden)
b.WriteString("\n")
b.WriteString(titleStyle.Render(" 🐹 Mole System Status"))
b.WriteString(" ")
b.WriteString(moleFrame)
b.WriteString("\n\n")
// Health score
healthColor := okStyle
if m.metrics.HealthScore < 50 {
healthColor = dangerStyle
} else if m.metrics.HealthScore < 70 {
healthColor = warnStyle
}
b.WriteString(fmt.Sprintf(" Health: %s %s\n\n",
healthColor.Render(fmt.Sprintf("%d%%", m.metrics.HealthScore)),
dimStyle.Render(m.metrics.HealthMessage),
))
// System info
b.WriteString(headerStyle.Render(" 📍 System"))
b.WriteString("\n")
b.WriteString(fmt.Sprintf(" %s %s\n", labelStyle.Render("Host:"), valueStyle.Render(m.metrics.Hostname)))
b.WriteString(fmt.Sprintf(" %s %s\n", labelStyle.Render("OS:"), valueStyle.Render(m.metrics.Platform)))
b.WriteString(fmt.Sprintf(" %s %s\n", labelStyle.Render("Uptime:"), valueStyle.Render(formatDuration(m.metrics.Uptime))))
b.WriteString("\n")
// CPU
b.WriteString(headerStyle.Render(" ⚡ CPU"))
b.WriteString("\n")
cpuColor := getPercentColor(m.metrics.CPUPercent)
b.WriteString(fmt.Sprintf(" %s %s\n", labelStyle.Render("Model:"), valueStyle.Render(truncateString(m.metrics.CPUModel, 50))))
b.WriteString(fmt.Sprintf(" %s %s (%d cores)\n",
labelStyle.Render("Usage:"),
cpuColor.Render(fmt.Sprintf("%.1f%%", m.metrics.CPUPercent)),
m.metrics.CPUCores,
))
b.WriteString(fmt.Sprintf(" %s\n", renderProgressBar(m.metrics.CPUPercent, 30)))
b.WriteString("\n")
// Memory
b.WriteString(headerStyle.Render(" 🧠 Memory"))
b.WriteString("\n")
memColor := getPercentColor(m.metrics.MemPercent)
b.WriteString(fmt.Sprintf(" %s %s / %s %s\n",
labelStyle.Render("RAM:"),
memColor.Render(formatBytes(m.metrics.MemUsed)),
valueStyle.Render(formatBytes(m.metrics.MemTotal)),
memColor.Render(fmt.Sprintf("(%.1f%%)", m.metrics.MemPercent)),
))
b.WriteString(fmt.Sprintf(" %s\n", renderProgressBar(m.metrics.MemPercent, 30)))
if m.metrics.SwapTotal > 0 {
b.WriteString(fmt.Sprintf(" %s %s / %s\n",
labelStyle.Render("Swap:"),
valueStyle.Render(formatBytes(m.metrics.SwapUsed)),
valueStyle.Render(formatBytes(m.metrics.SwapTotal)),
))
}
b.WriteString("\n")
// Disk
b.WriteString(headerStyle.Render(" 💾 Disks"))
b.WriteString("\n")
for _, d := range m.metrics.Disks {
diskColor := getPercentColor(d.UsedPercent)
b.WriteString(fmt.Sprintf(" %s %s / %s %s\n",
labelStyle.Render(d.Device),
diskColor.Render(formatBytes(d.Used)),
valueStyle.Render(formatBytes(d.Total)),
diskColor.Render(fmt.Sprintf("(%.1f%%)", d.UsedPercent)),
))
b.WriteString(fmt.Sprintf(" %s\n", renderProgressBar(d.UsedPercent, 30)))
}
b.WriteString("\n")
// Top Processes
if len(m.metrics.TopProcesses) > 0 {
b.WriteString(headerStyle.Render(" 📊 Top Processes"))
b.WriteString("\n")
for _, p := range m.metrics.TopProcesses {
b.WriteString(fmt.Sprintf(" %s %s (CPU: %.1f%%, Mem: %.1f%%)\n",
dimStyle.Render(fmt.Sprintf("[%d]", p.PID)),
valueStyle.Render(truncateString(p.Name, 20)),
p.CPU,
p.Memory,
))
}
b.WriteString("\n")
}
// Network
if len(m.metrics.Networks) > 0 {
b.WriteString(headerStyle.Render(" 🌐 Network"))
b.WriteString("\n")
for i, n := range m.metrics.Networks {
if i >= 3 {
break
}
rendered = append(rendered, renderCard(c, cardWidth, 0))
b.WriteString(fmt.Sprintf(" %s ↑%s ↓%s\n",
labelStyle.Render(truncateString(n.Name, 20)+":"),
valueStyle.Render(formatBytes(n.BytesSent)),
valueStyle.Render(formatBytes(n.BytesRecv)),
))
}
result := header + "\n" + lipgloss.JoinVertical(lipgloss.Left, rendered...)
// Add extra newline if cat is hidden for better spacing
if m.catHidden {
result = header + "\n\n" + lipgloss.JoinVertical(lipgloss.Left, rendered...)
}
return result
b.WriteString("\n")
}
twoCol := renderTwoColumns(cards, m.width)
// Add extra newline if cat is hidden for better spacing
if m.catHidden {
return header + "\n\n" + twoCol
// Footer
b.WriteString(dimStyle.Render(" [q] quit [r] refresh [c] toggle mole"))
b.WriteString("\n")
return b.String()
}
func getMoleFrame(frame int, hidden bool) string {
if hidden {
return ""
}
return header + "\n" + twoCol
}
func (m model) collectCmd() tea.Cmd {
return func() tea.Msg {
data, err := m.collector.Collect()
return metricsMsg{data: data, err: err}
frames := []string{
"🐹",
"🐹.",
"🐹..",
"🐹...",
}
return frames[frame%len(frames)]
}
func tickAfter(delay time.Duration) tea.Cmd {
return tea.Tick(delay, func(time.Time) tea.Msg { return tickMsg{} })
func renderProgressBar(percent float64, width int) string {
filled := int(percent / 100 * float64(width))
if filled > width {
filled = width
}
if filled < 0 {
filled = 0
}
color := okStyle
if percent > 85 {
color = dangerStyle
} else if percent > 70 {
color = warnStyle
}
bar := strings.Repeat("█", filled) + strings.Repeat("░", width-filled)
return color.Render(bar)
}
func animTick() tea.Cmd {
return tea.Tick(200*time.Millisecond, func(time.Time) tea.Msg { return animTickMsg{} })
func getPercentColor(percent float64) lipgloss.Style {
if percent > 85 {
return dangerStyle
} else if percent > 70 {
return warnStyle
}
return okStyle
}
func animTickWithSpeed(cpuUsage float64) tea.Cmd {
// Higher CPU = faster animation.
interval := max(300-int(cpuUsage*2.5), 50)
return tea.Tick(time.Duration(interval)*time.Millisecond, func(time.Time) tea.Msg { return animTickMsg{} })
func formatBytes(bytes uint64) string {
const unit = 1024
if bytes < unit {
return fmt.Sprintf("%d B", bytes)
}
div, exp := uint64(unit), 0
for n := bytes / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(bytes)/float64(div), "KMGTPE"[exp])
}
func formatDuration(d time.Duration) string {
days := int(d.Hours() / 24)
hours := int(d.Hours()) % 24
minutes := int(d.Minutes()) % 60
if days > 0 {
return fmt.Sprintf("%dd %dh %dm", days, hours, minutes)
}
if hours > 0 {
return fmt.Sprintf("%dh %dm", hours, minutes)
}
return fmt.Sprintf("%dm", minutes)
}
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen-3] + "..."
}
// getWindowsVersion gets detailed Windows version using PowerShell
func getWindowsVersion() string {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "powershell", "-Command",
"(Get-CimInstance Win32_OperatingSystem).Caption")
output, err := cmd.Output()
if err != nil {
return "Windows"
}
return strings.TrimSpace(string(output))
}
// getBatteryInfo gets battery info on Windows (for laptops)
func getBatteryInfo() (int, bool, bool) {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "powershell", "-Command",
"(Get-CimInstance Win32_Battery).EstimatedChargeRemaining")
output, err := cmd.Output()
if err != nil {
return 0, false, false
}
percent, err := strconv.Atoi(strings.TrimSpace(string(output)))
if err != nil {
return 0, false, false
}
// Check if charging
cmdStatus := exec.CommandContext(ctx, "powershell", "-Command",
"(Get-CimInstance Win32_Battery).BatteryStatus")
statusOutput, _ := cmdStatus.Output()
status, _ := strconv.Atoi(strings.TrimSpace(string(statusOutput)))
isCharging := status == 2 // 2 = AC Power
return percent, isCharging, true
}
func main() {
p := tea.NewProgram(newModel(), tea.WithAltScreen())
if _, err := p.Run(); err != nil {
fmt.Fprintf(os.Stderr, "system status error: %v\n", err)
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
}

View File

@@ -1,294 +0,0 @@
package main
import (
"context"
"fmt"
"os/exec"
"sync"
"time"
"github.com/shirou/gopsutil/v3/disk"
"github.com/shirou/gopsutil/v3/host"
"github.com/shirou/gopsutil/v3/net"
)
type MetricsSnapshot struct {
CollectedAt time.Time
Host string
Platform string
Uptime string
Procs uint64
Hardware HardwareInfo
HealthScore int // 0-100 system health score
HealthScoreMsg string // Brief explanation
CPU CPUStatus
GPU []GPUStatus
Memory MemoryStatus
Disks []DiskStatus
DiskIO DiskIOStatus
Network []NetworkStatus
Proxy ProxyStatus
Batteries []BatteryStatus
Thermal ThermalStatus
Sensors []SensorReading
Bluetooth []BluetoothDevice
TopProcesses []ProcessInfo
}
type HardwareInfo struct {
Model string // MacBook Pro 14-inch, 2021
CPUModel string // Apple M1 Pro / Intel Core i7
TotalRAM string // 16GB
DiskSize string // 512GB
OSVersion string // macOS Sonoma 14.5
RefreshRate string // 120Hz / 60Hz
}
type DiskIOStatus struct {
ReadRate float64 // MB/s
WriteRate float64 // MB/s
}
type ProcessInfo struct {
Name string
CPU float64
Memory float64
}
type CPUStatus struct {
Usage float64
PerCore []float64
PerCoreEstimated bool
Load1 float64
Load5 float64
Load15 float64
CoreCount int
LogicalCPU int
PCoreCount int // Performance cores (Apple Silicon)
ECoreCount int // Efficiency cores (Apple Silicon)
}
type GPUStatus struct {
Name string
Usage float64
MemoryUsed float64
MemoryTotal float64
CoreCount int
Note string
}
type MemoryStatus struct {
Used uint64
Total uint64
UsedPercent float64
SwapUsed uint64
SwapTotal uint64
Cached uint64 // File cache that can be freed if needed
Pressure string // macOS memory pressure: normal/warn/critical
}
type DiskStatus struct {
Mount string
Device string
Used uint64
Total uint64
UsedPercent float64
Fstype string
External bool
}
type NetworkStatus struct {
Name string
RxRateMBs float64
TxRateMBs float64
IP string
}
type ProxyStatus struct {
Enabled bool
Type string // HTTP, SOCKS, System
Host string
}
type BatteryStatus struct {
Percent float64
Status string
TimeLeft string
Health string
CycleCount int
Capacity int // Maximum capacity percentage (e.g., 85 means 85% of original)
}
type ThermalStatus struct {
CPUTemp float64
GPUTemp float64
FanSpeed int
FanCount int
SystemPower float64 // System power consumption in Watts
AdapterPower float64 // AC adapter max power in Watts
BatteryPower float64 // Battery charge/discharge power in Watts (positive = discharging)
}
type SensorReading struct {
Label string
Value float64
Unit string
Note string
}
type BluetoothDevice struct {
Name string
Connected bool
Battery string
}
type Collector struct {
// Static cache.
cachedHW HardwareInfo
lastHWAt time.Time
hasStatic bool
// Slow cache (30s-1m).
lastBTAt time.Time
lastBT []BluetoothDevice
// Fast metrics (1s).
prevNet map[string]net.IOCountersStat
lastNetAt time.Time
lastGPUAt time.Time
cachedGPU []GPUStatus
prevDiskIO disk.IOCountersStat
lastDiskAt time.Time
}
func NewCollector() *Collector {
return &Collector{
prevNet: make(map[string]net.IOCountersStat),
}
}
func (c *Collector) Collect() (MetricsSnapshot, error) {
now := time.Now()
// Host info is cached by gopsutil; fetch once.
hostInfo, _ := host.Info()
var (
wg sync.WaitGroup
errMu sync.Mutex
mergeErr error
cpuStats CPUStatus
memStats MemoryStatus
diskStats []DiskStatus
diskIO DiskIOStatus
netStats []NetworkStatus
proxyStats ProxyStatus
batteryStats []BatteryStatus
thermalStats ThermalStatus
sensorStats []SensorReading
gpuStats []GPUStatus
btStats []BluetoothDevice
topProcs []ProcessInfo
)
// Helper to launch concurrent collection.
collect := func(fn func() error) {
wg.Add(1)
go func() {
defer wg.Done()
if err := fn(); err != nil {
errMu.Lock()
if mergeErr == nil {
mergeErr = err
} else {
mergeErr = fmt.Errorf("%v; %w", mergeErr, err)
}
errMu.Unlock()
}
}()
}
// Launch independent collection tasks.
collect(func() (err error) { cpuStats, err = collectCPU(); return })
collect(func() (err error) { memStats, err = collectMemory(); return })
collect(func() (err error) { diskStats, err = collectDisks(); return })
collect(func() (err error) { diskIO = c.collectDiskIO(now); return nil })
collect(func() (err error) { netStats, err = c.collectNetwork(now); return })
collect(func() (err error) { proxyStats = collectProxy(); return nil })
collect(func() (err error) { batteryStats, _ = collectBatteries(); return nil })
collect(func() (err error) { thermalStats = collectThermal(); return nil })
collect(func() (err error) { sensorStats, _ = collectSensors(); return nil })
collect(func() (err error) { gpuStats, err = c.collectGPU(now); return })
collect(func() (err error) {
// Bluetooth is slow; cache for 30s.
if now.Sub(c.lastBTAt) > 30*time.Second || len(c.lastBT) == 0 {
btStats = c.collectBluetooth(now)
c.lastBT = btStats
c.lastBTAt = now
} else {
btStats = c.lastBT
}
return nil
})
collect(func() (err error) { topProcs = collectTopProcesses(); return nil })
// Wait for all to complete.
wg.Wait()
// Dependent tasks (post-collect).
// Cache hardware info as it's expensive and rarely changes.
if !c.hasStatic || now.Sub(c.lastHWAt) > 10*time.Minute {
c.cachedHW = collectHardware(memStats.Total, diskStats)
c.lastHWAt = now
c.hasStatic = true
}
hwInfo := c.cachedHW
score, scoreMsg := calculateHealthScore(cpuStats, memStats, diskStats, diskIO, thermalStats)
return MetricsSnapshot{
CollectedAt: now,
Host: hostInfo.Hostname,
Platform: fmt.Sprintf("%s %s", hostInfo.Platform, hostInfo.PlatformVersion),
Uptime: formatUptime(hostInfo.Uptime),
Procs: hostInfo.Procs,
Hardware: hwInfo,
HealthScore: score,
HealthScoreMsg: scoreMsg,
CPU: cpuStats,
GPU: gpuStats,
Memory: memStats,
Disks: diskStats,
DiskIO: diskIO,
Network: netStats,
Proxy: proxyStats,
Batteries: batteryStats,
Thermal: thermalStats,
Sensors: sensorStats,
Bluetooth: btStats,
TopProcesses: topProcs,
}, mergeErr
}
func runCmd(ctx context.Context, name string, args ...string) (string, error) {
cmd := exec.CommandContext(ctx, name, args...)
output, err := cmd.Output()
if err != nil {
return "", err
}
return string(output), nil
}
func commandExists(name string) bool {
if name == "" {
return false
}
defer func() {
// Treat LookPath panics as "missing".
_ = recover()
}()
_, err := exec.LookPath(name)
return err == nil
}

View File

@@ -1,289 +0,0 @@
package main
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/shirou/gopsutil/v3/host"
)
var (
// Cache for heavy system_profiler output.
lastPowerAt time.Time
cachedPower string
powerCacheTTL = 30 * time.Second
)
func collectBatteries() (batts []BatteryStatus, err error) {
defer func() {
if r := recover(); r != nil {
// Swallow panics to keep UI alive.
err = fmt.Errorf("battery collection failed: %v", r)
}
}()
// macOS: pmset for real-time percentage/status.
if runtime.GOOS == "darwin" && commandExists("pmset") {
if out, err := runCmd(context.Background(), "pmset", "-g", "batt"); err == nil {
// Health/cycles/capacity from cached system_profiler.
health, cycles, capacity := getCachedPowerData()
if batts := parsePMSet(out, health, cycles, capacity); len(batts) > 0 {
return batts, nil
}
}
}
// Linux: /sys/class/power_supply.
matches, _ := filepath.Glob("/sys/class/power_supply/BAT*/capacity")
for _, capFile := range matches {
statusFile := filepath.Join(filepath.Dir(capFile), "status")
capData, err := os.ReadFile(capFile)
if err != nil {
continue
}
statusData, _ := os.ReadFile(statusFile)
percentStr := strings.TrimSpace(string(capData))
percent, _ := strconv.ParseFloat(percentStr, 64)
status := strings.TrimSpace(string(statusData))
if status == "" {
status = "Unknown"
}
batts = append(batts, BatteryStatus{
Percent: percent,
Status: status,
})
}
if len(batts) > 0 {
return batts, nil
}
return nil, errors.New("no battery data found")
}
func parsePMSet(raw string, health string, cycles int, capacity int) []BatteryStatus {
var out []BatteryStatus
var timeLeft string
for line := range strings.Lines(raw) {
// Time remaining.
if strings.Contains(line, "remaining") {
parts := strings.Fields(line)
for i, p := range parts {
if p == "remaining" && i > 0 {
timeLeft = parts[i-1]
}
}
}
if !strings.Contains(line, "%") {
continue
}
fields := strings.Fields(line)
var (
percent float64
found bool
status = "Unknown"
)
for i, f := range fields {
if strings.Contains(f, "%") {
value := strings.TrimSuffix(strings.TrimSuffix(f, ";"), "%")
if p, err := strconv.ParseFloat(value, 64); err == nil {
percent = p
found = true
if i+1 < len(fields) {
status = strings.TrimSuffix(fields[i+1], ";")
}
}
break
}
}
if !found {
continue
}
out = append(out, BatteryStatus{
Percent: percent,
Status: status,
TimeLeft: timeLeft,
Health: health,
CycleCount: cycles,
Capacity: capacity,
})
}
return out
}
// getCachedPowerData returns condition, cycles, and capacity from cached system_profiler.
func getCachedPowerData() (health string, cycles int, capacity int) {
out := getSystemPowerOutput()
if out == "" {
return "", 0, 0
}
for line := range strings.Lines(out) {
lower := strings.ToLower(line)
if strings.Contains(lower, "cycle count") {
if _, after, found := strings.Cut(line, ":"); found {
cycles, _ = strconv.Atoi(strings.TrimSpace(after))
}
}
if strings.Contains(lower, "condition") {
if _, after, found := strings.Cut(line, ":"); found {
health = strings.TrimSpace(after)
}
}
if strings.Contains(lower, "maximum capacity") {
if _, after, found := strings.Cut(line, ":"); found {
capacityStr := strings.TrimSpace(after)
capacityStr = strings.TrimSuffix(capacityStr, "%")
capacity, _ = strconv.Atoi(strings.TrimSpace(capacityStr))
}
}
}
return health, cycles, capacity
}
func getSystemPowerOutput() string {
if runtime.GOOS != "darwin" {
return ""
}
now := time.Now()
if cachedPower != "" && now.Sub(lastPowerAt) < powerCacheTTL {
return cachedPower
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
out, err := runCmd(ctx, "system_profiler", "SPPowerDataType")
if err == nil {
cachedPower = out
lastPowerAt = now
}
return cachedPower
}
func collectThermal() ThermalStatus {
if runtime.GOOS != "darwin" {
return ThermalStatus{}
}
var thermal ThermalStatus
// Fan info from cached system_profiler.
out := getSystemPowerOutput()
if out != "" {
for line := range strings.Lines(out) {
lower := strings.ToLower(line)
if strings.Contains(lower, "fan") && strings.Contains(lower, "speed") {
if _, after, found := strings.Cut(line, ":"); found {
numStr := strings.TrimSpace(after)
numStr, _, _ = strings.Cut(numStr, " ")
thermal.FanSpeed, _ = strconv.Atoi(numStr)
}
}
}
}
// Power metrics from ioreg (fast, real-time).
ctxPower, cancelPower := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancelPower()
if out, err := runCmd(ctxPower, "ioreg", "-rn", "AppleSmartBattery"); err == nil {
for line := range strings.Lines(out) {
line = strings.TrimSpace(line)
// Battery temperature ("Temperature" = 3055).
if _, after, found := strings.Cut(line, "\"Temperature\" = "); found {
valStr := strings.TrimSpace(after)
if tempRaw, err := strconv.Atoi(valStr); err == nil && tempRaw > 0 {
thermal.CPUTemp = float64(tempRaw) / 100.0
}
}
// Adapter power (Watts) from current adapter.
if strings.Contains(line, "\"AdapterDetails\" = {") && !strings.Contains(line, "AppleRaw") {
if _, after, found := strings.Cut(line, "\"Watts\"="); found {
valStr := strings.TrimSpace(after)
valStr, _, _ = strings.Cut(valStr, ",")
valStr, _, _ = strings.Cut(valStr, "}")
valStr = strings.TrimSpace(valStr)
if watts, err := strconv.ParseFloat(valStr, 64); err == nil && watts > 0 {
thermal.AdapterPower = watts
}
}
}
// System power consumption (mW -> W).
if _, after, found := strings.Cut(line, "\"SystemPowerIn\"="); found {
valStr := strings.TrimSpace(after)
valStr, _, _ = strings.Cut(valStr, ",")
valStr, _, _ = strings.Cut(valStr, "}")
valStr = strings.TrimSpace(valStr)
if powerMW, err := strconv.ParseFloat(valStr, 64); err == nil && powerMW > 0 {
thermal.SystemPower = powerMW / 1000.0
}
}
// Battery power (mW -> W, positive = discharging).
if _, after, found := strings.Cut(line, "\"BatteryPower\"="); found {
valStr := strings.TrimSpace(after)
valStr, _, _ = strings.Cut(valStr, ",")
valStr, _, _ = strings.Cut(valStr, "}")
valStr = strings.TrimSpace(valStr)
// Parse as int64 first to handle negative values (charging)
if powerMW, err := strconv.ParseInt(valStr, 10, 64); err == nil {
thermal.BatteryPower = float64(powerMW) / 1000.0
}
}
}
}
// Fallback: thermal level proxy.
if thermal.CPUTemp == 0 {
ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel2()
out2, err := runCmd(ctx2, "sysctl", "-n", "machdep.xcpm.cpu_thermal_level")
if err == nil {
level, _ := strconv.Atoi(strings.TrimSpace(out2))
if level >= 0 {
thermal.CPUTemp = 45 + float64(level)*0.5
}
}
}
return thermal
}
func collectSensors() ([]SensorReading, error) {
temps, err := host.SensorsTemperatures()
if err != nil {
return nil, err
}
var out []SensorReading
for _, t := range temps {
if t.Temperature <= 0 || t.Temperature > 150 {
continue
}
out = append(out, SensorReading{
Label: prettifyLabel(t.SensorKey),
Value: t.Temperature,
Unit: "°C",
})
}
return out, nil
}
func prettifyLabel(key string) string {
key = strings.TrimSpace(key)
key = strings.TrimPrefix(key, "TC")
key = strings.ReplaceAll(key, "_", " ")
return key
}

View File

@@ -1,138 +0,0 @@
package main
import (
"context"
"errors"
"runtime"
"strings"
"time"
)
const (
bluetoothCacheTTL = 30 * time.Second
bluetoothctlTimeout = 1500 * time.Millisecond
)
func (c *Collector) collectBluetooth(now time.Time) []BluetoothDevice {
if len(c.lastBT) > 0 && !c.lastBTAt.IsZero() && now.Sub(c.lastBTAt) < bluetoothCacheTTL {
return c.lastBT
}
if devs, err := readSystemProfilerBluetooth(); err == nil && len(devs) > 0 {
c.lastBTAt = now
c.lastBT = devs
return devs
}
if devs, err := readBluetoothCTLDevices(); err == nil && len(devs) > 0 {
c.lastBTAt = now
c.lastBT = devs
return devs
}
c.lastBTAt = now
if len(c.lastBT) == 0 {
c.lastBT = []BluetoothDevice{{Name: "No Bluetooth info", Connected: false}}
}
return c.lastBT
}
func readSystemProfilerBluetooth() ([]BluetoothDevice, error) {
if runtime.GOOS != "darwin" || !commandExists("system_profiler") {
return nil, errors.New("system_profiler unavailable")
}
ctx, cancel := context.WithTimeout(context.Background(), systemProfilerTimeout)
defer cancel()
out, err := runCmd(ctx, "system_profiler", "SPBluetoothDataType")
if err != nil {
return nil, err
}
return parseSPBluetooth(out), nil
}
func readBluetoothCTLDevices() ([]BluetoothDevice, error) {
if !commandExists("bluetoothctl") {
return nil, errors.New("bluetoothctl unavailable")
}
ctx, cancel := context.WithTimeout(context.Background(), bluetoothctlTimeout)
defer cancel()
out, err := runCmd(ctx, "bluetoothctl", "info")
if err != nil {
return nil, err
}
return parseBluetoothctl(out), nil
}
func parseSPBluetooth(raw string) []BluetoothDevice {
var devices []BluetoothDevice
var currentName string
var connected bool
var battery string
for line := range strings.Lines(raw) {
trim := strings.TrimSpace(line)
if len(trim) == 0 {
continue
}
if !strings.HasPrefix(line, " ") && strings.HasSuffix(trim, ":") {
// Reset at top-level sections.
currentName = ""
connected = false
battery = ""
continue
}
if strings.HasPrefix(line, " ") && strings.HasSuffix(trim, ":") {
if currentName != "" {
devices = append(devices, BluetoothDevice{Name: currentName, Connected: connected, Battery: battery})
}
currentName = strings.TrimSuffix(trim, ":")
connected = false
battery = ""
continue
}
if strings.Contains(trim, "Connected:") {
connected = strings.Contains(trim, "Yes")
}
if strings.Contains(trim, "Battery Level:") {
battery = strings.TrimSpace(strings.TrimPrefix(trim, "Battery Level:"))
}
}
if currentName != "" {
devices = append(devices, BluetoothDevice{Name: currentName, Connected: connected, Battery: battery})
}
if len(devices) == 0 {
return []BluetoothDevice{{Name: "No devices", Connected: false}}
}
return devices
}
func parseBluetoothctl(raw string) []BluetoothDevice {
var devices []BluetoothDevice
current := BluetoothDevice{}
for line := range strings.Lines(raw) {
trim := strings.TrimSpace(line)
if strings.HasPrefix(trim, "Device ") {
if current.Name != "" {
devices = append(devices, current)
}
current = BluetoothDevice{Name: strings.TrimPrefix(trim, "Device "), Connected: false}
}
if after, ok := strings.CutPrefix(trim, "Name:"); ok {
current.Name = strings.TrimSpace(after)
}
if strings.HasPrefix(trim, "Connected:") {
current.Connected = strings.Contains(trim, "yes")
}
}
if current.Name != "" {
devices = append(devices, current)
}
if len(devices) == 0 {
return []BluetoothDevice{{Name: "No devices", Connected: false}}
}
return devices
}

View File

@@ -1,261 +0,0 @@
package main
import (
"bufio"
"context"
"errors"
"runtime"
"strconv"
"strings"
"time"
"github.com/shirou/gopsutil/v3/cpu"
"github.com/shirou/gopsutil/v3/load"
)
const (
cpuSampleInterval = 200 * time.Millisecond
)
func collectCPU() (CPUStatus, error) {
counts, countsErr := cpu.Counts(false)
if countsErr != nil || counts == 0 {
counts = runtime.NumCPU()
}
logical, logicalErr := cpu.Counts(true)
if logicalErr != nil || logical == 0 {
logical = runtime.NumCPU()
}
if logical <= 0 {
logical = 1
}
// Two-call pattern for more reliable CPU usage.
warmUpCPU()
time.Sleep(cpuSampleInterval)
percents, err := cpu.Percent(0, true)
var totalPercent float64
perCoreEstimated := false
if err != nil || len(percents) == 0 {
fallbackUsage, fallbackPerCore, fallbackErr := fallbackCPUUtilization(logical)
if fallbackErr != nil {
if err != nil {
return CPUStatus{}, err
}
return CPUStatus{}, fallbackErr
}
totalPercent = fallbackUsage
percents = fallbackPerCore
perCoreEstimated = true
} else {
for _, v := range percents {
totalPercent += v
}
totalPercent /= float64(len(percents))
}
loadStats, loadErr := load.Avg()
var loadAvg load.AvgStat
if loadStats != nil {
loadAvg = *loadStats
}
if loadErr != nil || isZeroLoad(loadAvg) {
if fallback, err := fallbackLoadAvgFromUptime(); err == nil {
loadAvg = fallback
}
}
// P/E core counts for Apple Silicon.
pCores, eCores := getCoreTopology()
return CPUStatus{
Usage: totalPercent,
PerCore: percents,
PerCoreEstimated: perCoreEstimated,
Load1: loadAvg.Load1,
Load5: loadAvg.Load5,
Load15: loadAvg.Load15,
CoreCount: counts,
LogicalCPU: logical,
PCoreCount: pCores,
ECoreCount: eCores,
}, nil
}
func isZeroLoad(avg load.AvgStat) bool {
return avg.Load1 == 0 && avg.Load5 == 0 && avg.Load15 == 0
}
var (
// Cache for core topology.
lastTopologyAt time.Time
cachedP, cachedE int
topologyTTL = 10 * time.Minute
)
// getCoreTopology returns P/E core counts on Apple Silicon.
func getCoreTopology() (pCores, eCores int) {
if runtime.GOOS != "darwin" {
return 0, 0
}
now := time.Now()
if cachedP > 0 || cachedE > 0 {
if now.Sub(lastTopologyAt) < topologyTTL {
return cachedP, cachedE
}
}
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
out, err := runCmd(ctx, "sysctl", "-n",
"hw.perflevel0.logicalcpu",
"hw.perflevel0.name",
"hw.perflevel1.logicalcpu",
"hw.perflevel1.name")
if err != nil {
return 0, 0
}
var lines []string
for line := range strings.Lines(strings.TrimSpace(out)) {
lines = append(lines, line)
}
if len(lines) < 4 {
return 0, 0
}
level0Count, _ := strconv.Atoi(strings.TrimSpace(lines[0]))
level0Name := strings.ToLower(strings.TrimSpace(lines[1]))
level1Count, _ := strconv.Atoi(strings.TrimSpace(lines[2]))
level1Name := strings.ToLower(strings.TrimSpace(lines[3]))
if strings.Contains(level0Name, "performance") {
pCores = level0Count
} else if strings.Contains(level0Name, "efficiency") {
eCores = level0Count
}
if strings.Contains(level1Name, "performance") {
pCores = level1Count
} else if strings.Contains(level1Name, "efficiency") {
eCores = level1Count
}
cachedP, cachedE = pCores, eCores
lastTopologyAt = now
return pCores, eCores
}
func fallbackLoadAvgFromUptime() (load.AvgStat, error) {
if !commandExists("uptime") {
return load.AvgStat{}, errors.New("uptime command unavailable")
}
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
out, err := runCmd(ctx, "uptime")
if err != nil {
return load.AvgStat{}, err
}
markers := []string{"load averages:", "load average:"}
idx := -1
for _, marker := range markers {
if pos := strings.LastIndex(out, marker); pos != -1 {
idx = pos + len(marker)
break
}
}
if idx == -1 {
return load.AvgStat{}, errors.New("load averages not found in uptime output")
}
segment := strings.TrimSpace(out[idx:])
fields := strings.Fields(segment)
var values []float64
for _, field := range fields {
field = strings.Trim(field, ",;")
if field == "" {
continue
}
val, err := strconv.ParseFloat(field, 64)
if err != nil {
continue
}
values = append(values, val)
if len(values) == 3 {
break
}
}
if len(values) < 3 {
return load.AvgStat{}, errors.New("could not parse load averages from uptime output")
}
return load.AvgStat{
Load1: values[0],
Load5: values[1],
Load15: values[2],
}, nil
}
func fallbackCPUUtilization(logical int) (float64, []float64, error) {
if logical <= 0 {
logical = runtime.NumCPU()
}
if logical <= 0 {
logical = 1
}
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
out, err := runCmd(ctx, "ps", "-Aceo", "pcpu")
if err != nil {
return 0, nil, err
}
scanner := bufio.NewScanner(strings.NewReader(out))
total := 0.0
lineIndex := 0
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue
}
lineIndex++
if lineIndex == 1 && (strings.Contains(strings.ToLower(line), "cpu") || strings.Contains(line, "%")) {
continue
}
val, parseErr := strconv.ParseFloat(line, 64)
if parseErr != nil {
continue
}
total += val
}
if scanErr := scanner.Err(); scanErr != nil {
return 0, nil, scanErr
}
maxTotal := float64(logical * 100)
if total < 0 {
total = 0
} else if total > maxTotal {
total = maxTotal
}
avg := total / float64(logical)
perCore := make([]float64, logical)
for i := range perCore {
perCore[i] = avg
}
return avg, perCore, nil
}
func warmUpCPU() {
cpu.Percent(0, true) //nolint:errcheck
}

View File

@@ -1,214 +0,0 @@
package main
import (
"context"
"errors"
"fmt"
"runtime"
"sort"
"strings"
"time"
"github.com/shirou/gopsutil/v3/disk"
)
var skipDiskMounts = map[string]bool{
"/System/Volumes/VM": true,
"/System/Volumes/Preboot": true,
"/System/Volumes/Update": true,
"/System/Volumes/xarts": true,
"/System/Volumes/Hardware": true,
"/System/Volumes/Data": true,
"/dev": true,
}
func collectDisks() ([]DiskStatus, error) {
partitions, err := disk.Partitions(false)
if err != nil {
return nil, err
}
var (
disks []DiskStatus
seenDevice = make(map[string]bool)
seenVolume = make(map[string]bool)
)
for _, part := range partitions {
if strings.HasPrefix(part.Device, "/dev/loop") {
continue
}
if skipDiskMounts[part.Mountpoint] {
continue
}
if strings.HasPrefix(part.Mountpoint, "/System/Volumes/") {
continue
}
// Skip /private mounts.
if strings.HasPrefix(part.Mountpoint, "/private/") {
continue
}
baseDevice := baseDeviceName(part.Device)
if baseDevice == "" {
baseDevice = part.Device
}
if seenDevice[baseDevice] {
continue
}
usage, err := disk.Usage(part.Mountpoint)
if err != nil || usage.Total == 0 {
continue
}
// Skip <1GB volumes.
if usage.Total < 1<<30 {
continue
}
// Use size-based dedupe key for shared pools.
volKey := fmt.Sprintf("%s:%d", part.Fstype, usage.Total)
if seenVolume[volKey] {
continue
}
disks = append(disks, DiskStatus{
Mount: part.Mountpoint,
Device: part.Device,
Used: usage.Used,
Total: usage.Total,
UsedPercent: usage.UsedPercent,
Fstype: part.Fstype,
})
seenDevice[baseDevice] = true
seenVolume[volKey] = true
}
annotateDiskTypes(disks)
sort.Slice(disks, func(i, j int) bool {
return disks[i].Total > disks[j].Total
})
if len(disks) > 3 {
disks = disks[:3]
}
return disks, nil
}
var (
// External disk cache.
lastDiskCacheAt time.Time
diskTypeCache = make(map[string]bool)
diskCacheTTL = 2 * time.Minute
)
func annotateDiskTypes(disks []DiskStatus) {
if len(disks) == 0 || runtime.GOOS != "darwin" || !commandExists("diskutil") {
return
}
now := time.Now()
// Clear stale cache.
if now.Sub(lastDiskCacheAt) > diskCacheTTL {
diskTypeCache = make(map[string]bool)
lastDiskCacheAt = now
}
for i := range disks {
base := baseDeviceName(disks[i].Device)
if base == "" {
base = disks[i].Device
}
if val, ok := diskTypeCache[base]; ok {
disks[i].External = val
continue
}
external, err := isExternalDisk(base)
if err != nil {
external = strings.HasPrefix(disks[i].Mount, "/Volumes/")
}
disks[i].External = external
diskTypeCache[base] = external
}
}
func baseDeviceName(device string) string {
device = strings.TrimPrefix(device, "/dev/")
if !strings.HasPrefix(device, "disk") {
return device
}
for i := 4; i < len(device); i++ {
if device[i] == 's' {
return device[:i]
}
}
return device
}
func isExternalDisk(device string) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
out, err := runCmd(ctx, "diskutil", "info", device)
if err != nil {
return false, err
}
var (
found bool
external bool
)
for line := range strings.Lines(out) {
trim := strings.TrimSpace(line)
if strings.HasPrefix(trim, "Internal:") {
found = true
external = strings.Contains(trim, "No")
break
}
if strings.HasPrefix(trim, "Device Location:") {
found = true
external = strings.Contains(trim, "External")
}
}
if !found {
return false, errors.New("diskutil info missing Internal field")
}
return external, nil
}
func (c *Collector) collectDiskIO(now time.Time) DiskIOStatus {
counters, err := disk.IOCounters()
if err != nil || len(counters) == 0 {
return DiskIOStatus{}
}
var total disk.IOCountersStat
for _, v := range counters {
total.ReadBytes += v.ReadBytes
total.WriteBytes += v.WriteBytes
}
if c.lastDiskAt.IsZero() {
c.prevDiskIO = total
c.lastDiskAt = now
return DiskIOStatus{}
}
elapsed := now.Sub(c.lastDiskAt).Seconds()
if elapsed <= 0 {
elapsed = 1
}
readRate := float64(total.ReadBytes-c.prevDiskIO.ReadBytes) / 1024 / 1024 / elapsed
writeRate := float64(total.WriteBytes-c.prevDiskIO.WriteBytes) / 1024 / 1024 / elapsed
c.prevDiskIO = total
c.lastDiskAt = now
if readRate < 0 {
readRate = 0
}
if writeRate < 0 {
writeRate = 0
}
return DiskIOStatus{ReadRate: readRate, WriteRate: writeRate}
}

View File

@@ -1,184 +0,0 @@
package main
import (
"context"
"encoding/json"
"errors"
"regexp"
"runtime"
"strconv"
"strings"
"time"
)
const (
systemProfilerTimeout = 4 * time.Second
macGPUInfoTTL = 10 * time.Minute
powermetricsTimeout = 2 * time.Second
)
// Regex for GPU usage parsing.
var (
gpuActiveResidencyRe = regexp.MustCompile(`GPU HW active residency:\s+([\d.]+)%`)
gpuIdleResidencyRe = regexp.MustCompile(`GPU idle residency:\s+([\d.]+)%`)
)
func (c *Collector) collectGPU(now time.Time) ([]GPUStatus, error) {
if runtime.GOOS == "darwin" {
// Static GPU info (cached 10 min).
if len(c.cachedGPU) == 0 || c.lastGPUAt.IsZero() || now.Sub(c.lastGPUAt) >= macGPUInfoTTL {
if gpus, err := readMacGPUInfo(); err == nil && len(gpus) > 0 {
c.cachedGPU = gpus
c.lastGPUAt = now
}
}
// Real-time GPU usage.
if len(c.cachedGPU) > 0 {
usage := getMacGPUUsage()
result := make([]GPUStatus, len(c.cachedGPU))
copy(result, c.cachedGPU)
// Apply usage to first GPU (Apple Silicon).
if len(result) > 0 {
result[0].Usage = usage
}
return result, nil
}
}
ctx, cancel := context.WithTimeout(context.Background(), 600*time.Millisecond)
defer cancel()
if !commandExists("nvidia-smi") {
return []GPUStatus{{
Name: "No GPU metrics available",
Note: "Install nvidia-smi or use platform-specific metrics",
}}, nil
}
out, err := runCmd(ctx, "nvidia-smi", "--query-gpu=utilization.gpu,memory.used,memory.total,name", "--format=csv,noheader,nounits")
if err != nil {
return nil, err
}
var gpus []GPUStatus
for line := range strings.Lines(strings.TrimSpace(out)) {
fields := strings.Split(line, ",")
if len(fields) < 4 {
continue
}
util, _ := strconv.ParseFloat(strings.TrimSpace(fields[0]), 64)
memUsed, _ := strconv.ParseFloat(strings.TrimSpace(fields[1]), 64)
memTotal, _ := strconv.ParseFloat(strings.TrimSpace(fields[2]), 64)
name := strings.TrimSpace(fields[3])
gpus = append(gpus, GPUStatus{
Name: name,
Usage: util,
MemoryUsed: memUsed,
MemoryTotal: memTotal,
})
}
if len(gpus) == 0 {
return []GPUStatus{{
Name: "GPU read failed",
Note: "Verify nvidia-smi availability",
}}, nil
}
return gpus, nil
}
func readMacGPUInfo() ([]GPUStatus, error) {
ctx, cancel := context.WithTimeout(context.Background(), systemProfilerTimeout)
defer cancel()
if !commandExists("system_profiler") {
return nil, errors.New("system_profiler unavailable")
}
out, err := runCmd(ctx, "system_profiler", "-json", "SPDisplaysDataType")
if err != nil {
return nil, err
}
var data struct {
Displays []struct {
Name string `json:"_name"`
VRAM string `json:"spdisplays_vram"`
Vendor string `json:"spdisplays_vendor"`
Metal string `json:"spdisplays_metal"`
Cores string `json:"sppci_cores"`
} `json:"SPDisplaysDataType"`
}
if err := json.Unmarshal([]byte(out), &data); err != nil {
return nil, err
}
var gpus []GPUStatus
for _, d := range data.Displays {
if d.Name == "" {
continue
}
noteParts := []string{}
if d.VRAM != "" {
noteParts = append(noteParts, "VRAM "+d.VRAM)
}
if d.Metal != "" {
noteParts = append(noteParts, d.Metal)
}
if d.Vendor != "" {
noteParts = append(noteParts, d.Vendor)
}
note := strings.Join(noteParts, " · ")
coreCount, _ := strconv.Atoi(d.Cores)
gpus = append(gpus, GPUStatus{
Name: d.Name,
Usage: -1, // Will be updated with real-time data
CoreCount: coreCount,
Note: note,
})
}
if len(gpus) == 0 {
return []GPUStatus{{
Name: "GPU info unavailable",
Note: "Unable to parse system_profiler output",
}}, nil
}
return gpus, nil
}
// getMacGPUUsage reads GPU active residency from powermetrics.
func getMacGPUUsage() float64 {
ctx, cancel := context.WithTimeout(context.Background(), powermetricsTimeout)
defer cancel()
// powermetrics may require root.
out, err := runCmd(ctx, "powermetrics", "--samplers", "gpu_power", "-i", "500", "-n", "1")
if err != nil {
return -1
}
// Parse "GPU HW active residency: X.XX%".
matches := gpuActiveResidencyRe.FindStringSubmatch(out)
if len(matches) >= 2 {
usage, err := strconv.ParseFloat(matches[1], 64)
if err == nil {
return usage
}
}
// Fallback: parse idle residency and derive active.
matchesIdle := gpuIdleResidencyRe.FindStringSubmatch(out)
if len(matchesIdle) >= 2 {
idle, err := strconv.ParseFloat(matchesIdle[1], 64)
if err == nil {
return 100.0 - idle
}
}
return -1
}

View File

@@ -1,137 +0,0 @@
package main
import (
"context"
"fmt"
"runtime"
"strings"
"time"
)
func collectHardware(totalRAM uint64, disks []DiskStatus) HardwareInfo {
if runtime.GOOS != "darwin" {
return HardwareInfo{
Model: "Unknown",
CPUModel: runtime.GOARCH,
TotalRAM: humanBytes(totalRAM),
DiskSize: "Unknown",
OSVersion: runtime.GOOS,
RefreshRate: "",
}
}
// Model and CPU from system_profiler.
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
var model, cpuModel, osVersion, refreshRate string
out, err := runCmd(ctx, "system_profiler", "SPHardwareDataType")
if err == nil {
for line := range strings.Lines(out) {
lower := strings.ToLower(strings.TrimSpace(line))
// Prefer "Model Name" over "Model Identifier".
if strings.Contains(lower, "model name:") {
parts := strings.Split(line, ":")
if len(parts) == 2 {
model = strings.TrimSpace(parts[1])
}
}
if strings.Contains(lower, "chip:") {
parts := strings.Split(line, ":")
if len(parts) == 2 {
cpuModel = strings.TrimSpace(parts[1])
}
}
if strings.Contains(lower, "processor name:") && cpuModel == "" {
parts := strings.Split(line, ":")
if len(parts) == 2 {
cpuModel = strings.TrimSpace(parts[1])
}
}
}
}
ctx2, cancel2 := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel2()
out2, err := runCmd(ctx2, "sw_vers", "-productVersion")
if err == nil {
osVersion = "macOS " + strings.TrimSpace(out2)
}
// Get refresh rate from display info (use mini detail to keep it fast).
ctx3, cancel3 := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel3()
out3, err := runCmd(ctx3, "system_profiler", "-detailLevel", "mini", "SPDisplaysDataType")
if err == nil {
refreshRate = parseRefreshRate(out3)
}
diskSize := "Unknown"
if len(disks) > 0 {
diskSize = humanBytes(disks[0].Total)
}
return HardwareInfo{
Model: model,
CPUModel: cpuModel,
TotalRAM: humanBytes(totalRAM),
DiskSize: diskSize,
OSVersion: osVersion,
RefreshRate: refreshRate,
}
}
// parseRefreshRate extracts the highest refresh rate from system_profiler display output.
func parseRefreshRate(output string) string {
maxHz := 0
for line := range strings.Lines(output) {
lower := strings.ToLower(line)
// Look for patterns like "@ 60Hz", "@ 60.00Hz", or "Refresh Rate: 120 Hz".
if strings.Contains(lower, "hz") {
fields := strings.Fields(lower)
for i, field := range fields {
if field == "hz" && i > 0 {
if hz := parseInt(fields[i-1]); hz > maxHz && hz < 500 {
maxHz = hz
}
continue
}
if numStr, ok := strings.CutSuffix(field, "hz"); ok {
if numStr == "" && i > 0 {
numStr = fields[i-1]
}
if hz := parseInt(numStr); hz > maxHz && hz < 500 {
maxHz = hz
}
}
}
}
}
if maxHz > 0 {
return fmt.Sprintf("%dHz", maxHz)
}
return ""
}
// parseInt safely parses an integer from a string.
func parseInt(s string) int {
// Trim away non-numeric padding, keep digits and '.' for decimals.
cleaned := strings.TrimSpace(s)
cleaned = strings.TrimLeftFunc(cleaned, func(r rune) bool {
return (r < '0' || r > '9') && r != '.'
})
cleaned = strings.TrimRightFunc(cleaned, func(r rune) bool {
return (r < '0' || r > '9') && r != '.'
})
if cleaned == "" {
return 0
}
var num int
if _, err := fmt.Sscanf(cleaned, "%d", &num); err != nil {
return 0
}
return num
}

View File

@@ -1,168 +0,0 @@
package main
import (
"fmt"
"strings"
)
// Health score weights and thresholds.
const (
// Weights.
healthCPUWeight = 30.0
healthMemWeight = 25.0
healthDiskWeight = 20.0
healthThermalWeight = 15.0
healthIOWeight = 10.0
// CPU.
cpuNormalThreshold = 30.0
cpuHighThreshold = 70.0
// Memory.
memNormalThreshold = 50.0
memHighThreshold = 80.0
memPressureWarnPenalty = 5.0
memPressureCritPenalty = 15.0
// Disk.
diskWarnThreshold = 70.0
diskCritThreshold = 90.0
// Thermal.
thermalNormalThreshold = 60.0
thermalHighThreshold = 85.0
// Disk IO (MB/s).
ioNormalThreshold = 50.0
ioHighThreshold = 150.0
)
func calculateHealthScore(cpu CPUStatus, mem MemoryStatus, disks []DiskStatus, diskIO DiskIOStatus, thermal ThermalStatus) (int, string) {
score := 100.0
issues := []string{}
// CPU penalty.
cpuPenalty := 0.0
if cpu.Usage > cpuNormalThreshold {
if cpu.Usage > cpuHighThreshold {
cpuPenalty = healthCPUWeight * (cpu.Usage - cpuNormalThreshold) / cpuHighThreshold
} else {
cpuPenalty = (healthCPUWeight / 2) * (cpu.Usage - cpuNormalThreshold) / (cpuHighThreshold - cpuNormalThreshold)
}
}
score -= cpuPenalty
if cpu.Usage > cpuHighThreshold {
issues = append(issues, "High CPU")
}
// Memory penalty.
memPenalty := 0.0
if mem.UsedPercent > memNormalThreshold {
if mem.UsedPercent > memHighThreshold {
memPenalty = healthMemWeight * (mem.UsedPercent - memNormalThreshold) / memNormalThreshold
} else {
memPenalty = (healthMemWeight / 2) * (mem.UsedPercent - memNormalThreshold) / (memHighThreshold - memNormalThreshold)
}
}
score -= memPenalty
if mem.UsedPercent > memHighThreshold {
issues = append(issues, "High Memory")
}
// Memory pressure penalty.
// Memory pressure penalty.
switch mem.Pressure {
case "warn":
score -= memPressureWarnPenalty
issues = append(issues, "Memory Pressure")
case "critical":
score -= memPressureCritPenalty
issues = append(issues, "Critical Memory")
}
// Disk penalty.
diskPenalty := 0.0
if len(disks) > 0 {
diskUsage := disks[0].UsedPercent
if diskUsage > diskWarnThreshold {
if diskUsage > diskCritThreshold {
diskPenalty = healthDiskWeight * (diskUsage - diskWarnThreshold) / (100 - diskWarnThreshold)
} else {
diskPenalty = (healthDiskWeight / 2) * (diskUsage - diskWarnThreshold) / (diskCritThreshold - diskWarnThreshold)
}
}
score -= diskPenalty
if diskUsage > diskCritThreshold {
issues = append(issues, "Disk Almost Full")
}
}
// Thermal penalty.
thermalPenalty := 0.0
if thermal.CPUTemp > 0 {
if thermal.CPUTemp > thermalNormalThreshold {
if thermal.CPUTemp > thermalHighThreshold {
thermalPenalty = healthThermalWeight
issues = append(issues, "Overheating")
} else {
thermalPenalty = healthThermalWeight * (thermal.CPUTemp - thermalNormalThreshold) / (thermalHighThreshold - thermalNormalThreshold)
}
}
score -= thermalPenalty
}
// Disk IO penalty.
ioPenalty := 0.0
totalIO := diskIO.ReadRate + diskIO.WriteRate
if totalIO > ioNormalThreshold {
if totalIO > ioHighThreshold {
ioPenalty = healthIOWeight
issues = append(issues, "Heavy Disk IO")
} else {
ioPenalty = healthIOWeight * (totalIO - ioNormalThreshold) / (ioHighThreshold - ioNormalThreshold)
}
}
score -= ioPenalty
// Clamp score.
if score < 0 {
score = 0
}
if score > 100 {
score = 100
}
// Build message.
var msg string
switch {
case score >= 90:
msg = "Excellent"
case score >= 75:
msg = "Good"
case score >= 60:
msg = "Fair"
case score >= 40:
msg = "Poor"
default:
msg = "Critical"
}
if len(issues) > 0 {
msg = msg + ": " + strings.Join(issues, ", ")
}
return int(score), msg
}
func formatUptime(secs uint64) string {
days := secs / 86400
hours := (secs % 86400) / 3600
mins := (secs % 3600) / 60
if days > 0 {
return fmt.Sprintf("%dd %dh %dm", days, hours, mins)
}
if hours > 0 {
return fmt.Sprintf("%dh %dm", hours, mins)
}
return fmt.Sprintf("%dm", mins)
}

View File

@@ -1,58 +0,0 @@
package main
import (
"strings"
"testing"
)
func TestCalculateHealthScorePerfect(t *testing.T) {
score, msg := calculateHealthScore(
CPUStatus{Usage: 10},
MemoryStatus{UsedPercent: 20, Pressure: "normal"},
[]DiskStatus{{UsedPercent: 30}},
DiskIOStatus{ReadRate: 5, WriteRate: 5},
ThermalStatus{CPUTemp: 40},
)
if score != 100 {
t.Fatalf("expected perfect score 100, got %d", score)
}
if msg != "Excellent" {
t.Fatalf("unexpected message %q", msg)
}
}
func TestCalculateHealthScoreDetectsIssues(t *testing.T) {
score, msg := calculateHealthScore(
CPUStatus{Usage: 95},
MemoryStatus{UsedPercent: 90, Pressure: "critical"},
[]DiskStatus{{UsedPercent: 95}},
DiskIOStatus{ReadRate: 120, WriteRate: 80},
ThermalStatus{CPUTemp: 90},
)
if score >= 40 {
t.Fatalf("expected heavy penalties bringing score down, got %d", score)
}
if msg == "Excellent" {
t.Fatalf("expected message to include issues, got %q", msg)
}
if !strings.Contains(msg, "High CPU") {
t.Fatalf("message should mention CPU issue: %q", msg)
}
if !strings.Contains(msg, "Disk Almost Full") {
t.Fatalf("message should mention disk issue: %q", msg)
}
}
func TestFormatUptime(t *testing.T) {
if got := formatUptime(65); got != "1m" {
t.Fatalf("expected 1m, got %s", got)
}
if got := formatUptime(3600 + 120); got != "1h 2m" {
t.Fatalf("expected \"1h 2m\", got %s", got)
}
if got := formatUptime(86400*2 + 3600*3 + 60*5); got != "2d 3h 5m" {
t.Fatalf("expected \"2d 3h 5m\", got %s", got)
}
}

View File

@@ -1,99 +0,0 @@
package main
import (
"context"
"runtime"
"strconv"
"strings"
"time"
"github.com/shirou/gopsutil/v3/mem"
)
func collectMemory() (MemoryStatus, error) {
vm, err := mem.VirtualMemory()
if err != nil {
return MemoryStatus{}, err
}
swap, _ := mem.SwapMemory()
pressure := getMemoryPressure()
// On macOS, vm.Cached is 0, so we calculate from file-backed pages.
cached := vm.Cached
if runtime.GOOS == "darwin" && cached == 0 {
cached = getFileBackedMemory()
}
return MemoryStatus{
Used: vm.Used,
Total: vm.Total,
UsedPercent: vm.UsedPercent,
SwapUsed: swap.Used,
SwapTotal: swap.Total,
Cached: cached,
Pressure: pressure,
}, nil
}
func getFileBackedMemory() uint64 {
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
out, err := runCmd(ctx, "vm_stat")
if err != nil {
return 0
}
// Parse page size from first line: "Mach Virtual Memory Statistics: (page size of 16384 bytes)"
var pageSize uint64 = 4096 // Default
firstLine := true
for line := range strings.Lines(out) {
if firstLine {
firstLine = false
if strings.Contains(line, "page size of") {
if _, after, found := strings.Cut(line, "page size of "); found {
if before, _, found := strings.Cut(after, " bytes"); found {
if size, err := strconv.ParseUint(strings.TrimSpace(before), 10, 64); err == nil {
pageSize = size
}
}
}
}
}
// Parse "File-backed pages: 388975."
if strings.Contains(line, "File-backed pages:") {
if _, after, found := strings.Cut(line, ":"); found {
numStr := strings.TrimSpace(after)
numStr = strings.TrimSuffix(numStr, ".")
if pages, err := strconv.ParseUint(numStr, 10, 64); err == nil {
return pages * pageSize
}
}
}
}
return 0
}
func getMemoryPressure() string {
if runtime.GOOS != "darwin" {
return ""
}
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
out, err := runCmd(ctx, "memory_pressure")
if err != nil {
return ""
}
lower := strings.ToLower(out)
if strings.Contains(lower, "critical") {
return "critical"
}
if strings.Contains(lower, "warn") {
return "warn"
}
if strings.Contains(lower, "normal") {
return "normal"
}
return ""
}

View File

@@ -1,142 +0,0 @@
package main
import (
"context"
"os"
"runtime"
"sort"
"strings"
"time"
"github.com/shirou/gopsutil/v3/net"
)
func (c *Collector) collectNetwork(now time.Time) ([]NetworkStatus, error) {
stats, err := net.IOCounters(true)
if err != nil {
return nil, err
}
// Map interface IPs.
ifAddrs := getInterfaceIPs()
if c.lastNetAt.IsZero() {
c.lastNetAt = now
for _, s := range stats {
c.prevNet[s.Name] = s
}
return nil, nil
}
elapsed := now.Sub(c.lastNetAt).Seconds()
if elapsed <= 0 {
elapsed = 1
}
var result []NetworkStatus
for _, cur := range stats {
if isNoiseInterface(cur.Name) {
continue
}
prev, ok := c.prevNet[cur.Name]
if !ok {
continue
}
rx := float64(cur.BytesRecv-prev.BytesRecv) / 1024.0 / 1024.0 / elapsed
tx := float64(cur.BytesSent-prev.BytesSent) / 1024.0 / 1024.0 / elapsed
if rx < 0 {
rx = 0
}
if tx < 0 {
tx = 0
}
result = append(result, NetworkStatus{
Name: cur.Name,
RxRateMBs: rx,
TxRateMBs: tx,
IP: ifAddrs[cur.Name],
})
}
c.lastNetAt = now
for _, s := range stats {
c.prevNet[s.Name] = s
}
sort.Slice(result, func(i, j int) bool {
return result[i].RxRateMBs+result[i].TxRateMBs > result[j].RxRateMBs+result[j].TxRateMBs
})
if len(result) > 3 {
result = result[:3]
}
return result, nil
}
func getInterfaceIPs() map[string]string {
result := make(map[string]string)
ifaces, err := net.Interfaces()
if err != nil {
return result
}
for _, iface := range ifaces {
for _, addr := range iface.Addrs {
// IPv4 only.
if strings.Contains(addr.Addr, ".") && !strings.HasPrefix(addr.Addr, "127.") {
ip := strings.Split(addr.Addr, "/")[0]
result[iface.Name] = ip
break
}
}
}
return result
}
func isNoiseInterface(name string) bool {
lower := strings.ToLower(name)
noiseList := []string{"lo", "awdl", "utun", "llw", "bridge", "gif", "stf", "xhc", "anpi", "ap"}
for _, prefix := range noiseList {
if strings.HasPrefix(lower, prefix) {
return true
}
}
return false
}
func collectProxy() ProxyStatus {
// Check environment variables first.
for _, env := range []string{"https_proxy", "HTTPS_PROXY", "http_proxy", "HTTP_PROXY"} {
if val := os.Getenv(env); val != "" {
proxyType := "HTTP"
if strings.HasPrefix(val, "socks") {
proxyType = "SOCKS"
}
// Extract host.
host := val
if strings.Contains(host, "://") {
host = strings.SplitN(host, "://", 2)[1]
}
if idx := strings.Index(host, "@"); idx >= 0 {
host = host[idx+1:]
}
return ProxyStatus{Enabled: true, Type: proxyType, Host: host}
}
}
// macOS: check system proxy via scutil.
if runtime.GOOS == "darwin" {
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
out, err := runCmd(ctx, "scutil", "--proxy")
if err == nil {
if strings.Contains(out, "HTTPEnable : 1") || strings.Contains(out, "HTTPSEnable : 1") {
return ProxyStatus{Enabled: true, Type: "System", Host: "System Proxy"}
}
if strings.Contains(out, "SOCKSEnable : 1") {
return ProxyStatus{Enabled: true, Type: "SOCKS", Host: "System Proxy"}
}
}
}
return ProxyStatus{Enabled: false}
}

View File

@@ -1,53 +0,0 @@
package main
import (
"context"
"runtime"
"strconv"
"strings"
"time"
)
func collectTopProcesses() []ProcessInfo {
if runtime.GOOS != "darwin" {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
// Use ps to get top processes by CPU.
out, err := runCmd(ctx, "ps", "-Aceo", "pcpu,pmem,comm", "-r")
if err != nil {
return nil
}
var procs []ProcessInfo
i := 0
for line := range strings.Lines(strings.TrimSpace(out)) {
if i == 0 {
i++
continue
}
if i > 5 {
break
}
i++
fields := strings.Fields(line)
if len(fields) < 3 {
continue
}
cpuVal, _ := strconv.ParseFloat(fields[0], 64)
memVal, _ := strconv.ParseFloat(fields[1], 64)
name := fields[len(fields)-1]
// Strip path from command name.
if idx := strings.LastIndex(name, "/"); idx >= 0 {
name = name[idx+1:]
}
procs = append(procs, ProcessInfo{
Name: name,
CPU: cpuVal,
Memory: memVal,
})
}
return procs
}

View File

@@ -1,758 +0,0 @@
package main
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/charmbracelet/lipgloss"
)
var (
titleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#C79FD7")).Bold(true)
subtleStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#737373"))
warnStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FFD75F"))
dangerStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#FF5F5F")).Bold(true)
okStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#A5D6A7"))
lineStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#404040"))
primaryStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#BD93F9"))
)
const (
colWidth = 38
iconCPU = "◉"
iconMemory = "◫"
iconGPU = "◧"
iconDisk = "▥"
iconNetwork = "⇅"
iconBattery = "◪"
iconSensors = "◈"
iconProcs = "❊"
)
// Mole body frames (facing right).
var moleBody = [][]string{
{
` /\_/\`,
` ___/ o o \`,
`/___ =-= /`,
`\____)-m-m)`,
},
{
` /\_/\`,
` ___/ o o \`,
`/___ =-= /`,
`\____)mm__)`,
},
{
` /\_/\`,
` ___/ · · \`,
`/___ =-= /`,
`\___)-m__m)`,
},
{
` /\_/\`,
` ___/ o o \`,
`/___ =-= /`,
`\____)-mm-)`,
},
}
// Mirror mole body frames (facing left).
var moleBodyMirror = [][]string{
{
` /\_/\`,
` / o o \___`,
` \ =-= ___\`,
` (m-m-(____/`,
},
{
` /\_/\`,
` / o o \___`,
` \ =-= ___\`,
` (__mm(____/`,
},
{
` /\_/\`,
` / · · \___`,
` \ =-= ___\`,
` (m__m-(___/`,
},
{
` /\_/\`,
` / o o \___`,
` \ =-= ___\`,
` (-mm-(____/`,
},
}
// getMoleFrame renders the animated mole.
func getMoleFrame(animFrame int, termWidth int) string {
moleWidth := 15
maxPos := max(termWidth-moleWidth, 0)
cycleLength := maxPos * 2
if cycleLength == 0 {
cycleLength = 1
}
pos := animFrame % cycleLength
movingLeft := pos > maxPos
if movingLeft {
pos = cycleLength - pos
}
// Use mirror frames when moving left
var frames [][]string
if movingLeft {
frames = moleBodyMirror
} else {
frames = moleBody
}
bodyIdx := animFrame % len(frames)
body := frames[bodyIdx]
padding := strings.Repeat(" ", pos)
var lines []string
for _, line := range body {
lines = append(lines, padding+line)
}
return strings.Join(lines, "\n")
}
type cardData struct {
icon string
title string
lines []string
}
func renderHeader(m MetricsSnapshot, errMsg string, animFrame int, termWidth int, catHidden bool) string {
title := titleStyle.Render("Mole Status")
scoreStyle := getScoreStyle(m.HealthScore)
scoreText := subtleStyle.Render("Health ") + scoreStyle.Render(fmt.Sprintf("● %d", m.HealthScore))
// Hardware info for a single line.
infoParts := []string{}
if m.Hardware.Model != "" {
infoParts = append(infoParts, primaryStyle.Render(m.Hardware.Model))
}
if m.Hardware.CPUModel != "" {
cpuInfo := m.Hardware.CPUModel
// Append GPU core count when available.
if len(m.GPU) > 0 && m.GPU[0].CoreCount > 0 {
cpuInfo += fmt.Sprintf(" (%dGPU)", m.GPU[0].CoreCount)
}
infoParts = append(infoParts, cpuInfo)
}
var specs []string
if m.Hardware.TotalRAM != "" {
specs = append(specs, m.Hardware.TotalRAM)
}
if m.Hardware.DiskSize != "" {
specs = append(specs, m.Hardware.DiskSize)
}
if len(specs) > 0 {
infoParts = append(infoParts, strings.Join(specs, "/"))
}
if m.Hardware.RefreshRate != "" {
infoParts = append(infoParts, m.Hardware.RefreshRate)
}
if m.Hardware.OSVersion != "" {
infoParts = append(infoParts, m.Hardware.OSVersion)
}
headerLine := title + " " + scoreText + " " + strings.Join(infoParts, " · ")
// Show cat unless hidden
var mole string
if !catHidden {
mole = getMoleFrame(animFrame, termWidth)
}
if errMsg != "" {
if mole == "" {
return lipgloss.JoinVertical(lipgloss.Left, headerLine, "", dangerStyle.Render("ERROR: "+errMsg), "")
}
return lipgloss.JoinVertical(lipgloss.Left, headerLine, "", mole, dangerStyle.Render("ERROR: "+errMsg), "")
}
if mole == "" {
return headerLine
}
return headerLine + "\n" + mole
}
func getScoreStyle(score int) lipgloss.Style {
switch {
case score >= 90:
return lipgloss.NewStyle().Foreground(lipgloss.Color("#87FF87")).Bold(true)
case score >= 75:
return lipgloss.NewStyle().Foreground(lipgloss.Color("#87D787")).Bold(true)
case score >= 60:
return lipgloss.NewStyle().Foreground(lipgloss.Color("#FFD75F")).Bold(true)
case score >= 40:
return lipgloss.NewStyle().Foreground(lipgloss.Color("#FFAF5F")).Bold(true)
default:
return lipgloss.NewStyle().Foreground(lipgloss.Color("#FF6B6B")).Bold(true)
}
}
func buildCards(m MetricsSnapshot, _ int) []cardData {
cards := []cardData{
renderCPUCard(m.CPU),
renderMemoryCard(m.Memory),
renderDiskCard(m.Disks, m.DiskIO),
renderBatteryCard(m.Batteries, m.Thermal),
renderProcessCard(m.TopProcesses),
renderNetworkCard(m.Network, m.Proxy),
}
if hasSensorData(m.Sensors) {
cards = append(cards, renderSensorsCard(m.Sensors))
}
return cards
}
func hasSensorData(sensors []SensorReading) bool {
for _, s := range sensors {
if s.Note == "" && s.Value > 0 {
return true
}
}
return false
}
func renderCPUCard(cpu CPUStatus) cardData {
var lines []string
lines = append(lines, fmt.Sprintf("Total %s %5.1f%%", progressBar(cpu.Usage), cpu.Usage))
if cpu.PerCoreEstimated {
lines = append(lines, subtleStyle.Render("Per-core data unavailable (using averaged load)"))
} else if len(cpu.PerCore) > 0 {
type coreUsage struct {
idx int
val float64
}
var cores []coreUsage
for i, v := range cpu.PerCore {
cores = append(cores, coreUsage{i, v})
}
sort.Slice(cores, func(i, j int) bool { return cores[i].val > cores[j].val })
maxCores := min(len(cores), 3)
for i := 0; i < maxCores; i++ {
c := cores[i]
lines = append(lines, fmt.Sprintf("Core%-2d %s %5.1f%%", c.idx+1, progressBar(c.val), c.val))
}
}
// Load line at the end
if cpu.PCoreCount > 0 && cpu.ECoreCount > 0 {
lines = append(lines, fmt.Sprintf("Load %.2f / %.2f / %.2f (%dP+%dE)",
cpu.Load1, cpu.Load5, cpu.Load15, cpu.PCoreCount, cpu.ECoreCount))
} else {
lines = append(lines, fmt.Sprintf("Load %.2f / %.2f / %.2f (%d cores)",
cpu.Load1, cpu.Load5, cpu.Load15, cpu.LogicalCPU))
}
return cardData{icon: iconCPU, title: "CPU", lines: lines}
}
func renderMemoryCard(mem MemoryStatus) cardData {
// Check if swap is being used (or at least allocated).
hasSwap := mem.SwapTotal > 0 || mem.SwapUsed > 0
var lines []string
// Line 1: Used
lines = append(lines, fmt.Sprintf("Used %s %5.1f%%", progressBar(mem.UsedPercent), mem.UsedPercent))
// Line 2: Free
freePercent := 100 - mem.UsedPercent
lines = append(lines, fmt.Sprintf("Free %s %5.1f%%", progressBar(freePercent), freePercent))
if hasSwap {
// Layout with Swap:
// 3. Swap (progress bar + text)
// 4. Total
// 5. Avail
var swapPercent float64
if mem.SwapTotal > 0 {
swapPercent = (float64(mem.SwapUsed) / float64(mem.SwapTotal)) * 100.0
}
swapText := fmt.Sprintf("(%s/%s)", humanBytesCompact(mem.SwapUsed), humanBytesCompact(mem.SwapTotal))
lines = append(lines, fmt.Sprintf("Swap %s %5.1f%% %s", progressBar(swapPercent), swapPercent, swapText))
lines = append(lines, fmt.Sprintf("Total %s / %s", humanBytes(mem.Used), humanBytes(mem.Total)))
lines = append(lines, fmt.Sprintf("Avail %s", humanBytes(mem.Total-mem.Used))) // Simplified avail logic for consistency
} else {
// Layout without Swap:
// 3. Total
// 4. Cached (if > 0)
// 5. Avail
lines = append(lines, fmt.Sprintf("Total %s / %s", humanBytes(mem.Used), humanBytes(mem.Total)))
if mem.Cached > 0 {
lines = append(lines, fmt.Sprintf("Cached %s", humanBytes(mem.Cached)))
}
// Calculate available if not provided directly, or use Total-Used as proxy if needed,
// but typically available is more nuanced. Using what we have.
// Re-calculating available based on logic if needed, but mem.Total - mem.Used is often "Avail"
// in simple terms for this view or we could use the passed definition.
// Original code calculated: available := mem.Total - mem.Used
available := mem.Total - mem.Used
lines = append(lines, fmt.Sprintf("Avail %s", humanBytes(available)))
}
// Memory pressure status.
if mem.Pressure != "" {
pressureStyle := okStyle
pressureText := "Status " + mem.Pressure
switch mem.Pressure {
case "warn":
pressureStyle = warnStyle
case "critical":
pressureStyle = dangerStyle
}
lines = append(lines, pressureStyle.Render(pressureText))
}
return cardData{icon: iconMemory, title: "Memory", lines: lines}
}
func renderDiskCard(disks []DiskStatus, io DiskIOStatus) cardData {
var lines []string
if len(disks) == 0 {
lines = append(lines, subtleStyle.Render("Collecting..."))
} else {
internal, external := splitDisks(disks)
addGroup := func(prefix string, list []DiskStatus) {
if len(list) == 0 {
return
}
for i, d := range list {
label := diskLabel(prefix, i, len(list))
lines = append(lines, formatDiskLine(label, d))
}
}
addGroup("INTR", internal)
addGroup("EXTR", external)
if len(lines) == 0 {
lines = append(lines, subtleStyle.Render("No disks detected"))
}
}
readBar := ioBar(io.ReadRate)
writeBar := ioBar(io.WriteRate)
lines = append(lines, fmt.Sprintf("Read %s %.1f MB/s", readBar, io.ReadRate))
lines = append(lines, fmt.Sprintf("Write %s %.1f MB/s", writeBar, io.WriteRate))
return cardData{icon: iconDisk, title: "Disk", lines: lines}
}
func splitDisks(disks []DiskStatus) (internal, external []DiskStatus) {
for _, d := range disks {
if d.External {
external = append(external, d)
} else {
internal = append(internal, d)
}
}
return internal, external
}
func diskLabel(prefix string, index int, total int) string {
if total <= 1 {
return prefix
}
return fmt.Sprintf("%s%d", prefix, index+1)
}
func formatDiskLine(label string, d DiskStatus) string {
if label == "" {
label = "DISK"
}
bar := progressBar(d.UsedPercent)
used := humanBytesShort(d.Used)
total := humanBytesShort(d.Total)
return fmt.Sprintf("%-6s %s %5.1f%% (%s/%s)", label, bar, d.UsedPercent, used, total)
}
func ioBar(rate float64) string {
filled := min(int(rate/10.0), 5)
if filled < 0 {
filled = 0
}
bar := strings.Repeat("▮", filled) + strings.Repeat("▯", 5-filled)
if rate > 80 {
return dangerStyle.Render(bar)
}
if rate > 30 {
return warnStyle.Render(bar)
}
return okStyle.Render(bar)
}
func renderProcessCard(procs []ProcessInfo) cardData {
var lines []string
maxProcs := 3
for i, p := range procs {
if i >= maxProcs {
break
}
name := shorten(p.Name, 12)
cpuBar := miniBar(p.CPU)
lines = append(lines, fmt.Sprintf("%-12s %s %5.1f%%", name, cpuBar, p.CPU))
}
if len(lines) == 0 {
lines = append(lines, subtleStyle.Render("No data"))
}
return cardData{icon: iconProcs, title: "Processes", lines: lines}
}
func miniBar(percent float64) string {
filled := min(int(percent/20), 5)
if filled < 0 {
filled = 0
}
return colorizePercent(percent, strings.Repeat("▮", filled)+strings.Repeat("▯", 5-filled))
}
func renderNetworkCard(netStats []NetworkStatus, proxy ProxyStatus) cardData {
var lines []string
var totalRx, totalTx float64
var primaryIP string
for _, n := range netStats {
totalRx += n.RxRateMBs
totalTx += n.TxRateMBs
if primaryIP == "" && n.IP != "" && n.Name == "en0" {
primaryIP = n.IP
}
}
if len(netStats) == 0 {
lines = []string{subtleStyle.Render("Collecting...")}
} else {
rxBar := netBar(totalRx)
txBar := netBar(totalTx)
lines = append(lines, fmt.Sprintf("Down %s %s", rxBar, formatRate(totalRx)))
lines = append(lines, fmt.Sprintf("Up %s %s", txBar, formatRate(totalTx)))
// Show proxy and IP on one line.
var infoParts []string
if proxy.Enabled {
infoParts = append(infoParts, "Proxy "+proxy.Type)
}
if primaryIP != "" {
infoParts = append(infoParts, primaryIP)
}
if len(infoParts) > 0 {
lines = append(lines, strings.Join(infoParts, " · "))
}
}
return cardData{icon: iconNetwork, title: "Network", lines: lines}
}
func netBar(rate float64) string {
filled := min(int(rate/2.0), 5)
if filled < 0 {
filled = 0
}
bar := strings.Repeat("▮", filled) + strings.Repeat("▯", 5-filled)
if rate > 8 {
return dangerStyle.Render(bar)
}
if rate > 3 {
return warnStyle.Render(bar)
}
return okStyle.Render(bar)
}
func renderBatteryCard(batts []BatteryStatus, thermal ThermalStatus) cardData {
var lines []string
if len(batts) == 0 {
lines = append(lines, subtleStyle.Render("No battery"))
} else {
b := batts[0]
statusLower := strings.ToLower(b.Status)
percentText := fmt.Sprintf("%5.1f%%", b.Percent)
if b.Percent < 20 && statusLower != "charging" && statusLower != "charged" {
percentText = dangerStyle.Render(percentText)
}
lines = append(lines, fmt.Sprintf("Level %s %s", batteryProgressBar(b.Percent), percentText))
// Add capacity line if available.
if b.Capacity > 0 {
capacityText := fmt.Sprintf("%5d%%", b.Capacity)
if b.Capacity < 70 {
capacityText = dangerStyle.Render(capacityText)
} else if b.Capacity < 85 {
capacityText = warnStyle.Render(capacityText)
}
lines = append(lines, fmt.Sprintf("Health %s %s", batteryProgressBar(float64(b.Capacity)), capacityText))
}
statusIcon := ""
statusStyle := subtleStyle
if statusLower == "charging" || statusLower == "charged" {
statusIcon = " ⚡"
statusStyle = okStyle
} else if b.Percent < 20 {
statusStyle = dangerStyle
}
statusText := b.Status
if len(statusText) > 0 {
statusText = strings.ToUpper(statusText[:1]) + strings.ToLower(statusText[1:])
}
if b.TimeLeft != "" {
statusText += " · " + b.TimeLeft
}
// Add power info.
if statusLower == "charging" || statusLower == "charged" {
if thermal.SystemPower > 0 {
statusText += fmt.Sprintf(" · %.0fW", thermal.SystemPower)
} else if thermal.AdapterPower > 0 {
statusText += fmt.Sprintf(" · %.0fW Adapter", thermal.AdapterPower)
}
} else if thermal.BatteryPower > 0 {
// Only show battery power when discharging (positive value)
statusText += fmt.Sprintf(" · %.0fW", thermal.BatteryPower)
}
lines = append(lines, statusStyle.Render(statusText+statusIcon))
healthParts := []string{}
if b.Health != "" {
healthParts = append(healthParts, b.Health)
}
if b.CycleCount > 0 {
healthParts = append(healthParts, fmt.Sprintf("%d cycles", b.CycleCount))
}
if thermal.CPUTemp > 0 {
tempText := fmt.Sprintf("%.0f°C", thermal.CPUTemp)
if thermal.CPUTemp > 80 {
tempText = dangerStyle.Render(tempText)
} else if thermal.CPUTemp > 60 {
tempText = warnStyle.Render(tempText)
}
healthParts = append(healthParts, tempText)
}
if thermal.FanSpeed > 0 {
healthParts = append(healthParts, fmt.Sprintf("%d RPM", thermal.FanSpeed))
}
if len(healthParts) > 0 {
lines = append(lines, strings.Join(healthParts, " · "))
}
}
return cardData{icon: iconBattery, title: "Power", lines: lines}
}
func renderSensorsCard(sensors []SensorReading) cardData {
var lines []string
for _, s := range sensors {
if s.Note != "" {
continue
}
lines = append(lines, fmt.Sprintf("%-12s %s", shorten(s.Label, 12), colorizeTemp(s.Value)+s.Unit))
}
if len(lines) == 0 {
lines = append(lines, subtleStyle.Render("No sensors"))
}
return cardData{icon: iconSensors, title: "Sensors", lines: lines}
}
func renderCard(data cardData, width int, height int) string {
titleText := data.icon + " " + data.title
lineLen := max(width-lipgloss.Width(titleText)-2, 4)
header := titleStyle.Render(titleText) + " " + lineStyle.Render(strings.Repeat("╌", lineLen))
content := header + "\n" + strings.Join(data.lines, "\n")
lines := strings.Split(content, "\n")
for len(lines) < height {
lines = append(lines, "")
}
return strings.Join(lines, "\n")
}
func progressBar(percent float64) string {
total := 16
if percent < 0 {
percent = 0
}
if percent > 100 {
percent = 100
}
filled := int(percent / 100 * float64(total))
var builder strings.Builder
for i := range total {
if i < filled {
builder.WriteString("█")
} else {
builder.WriteString("░")
}
}
return colorizePercent(percent, builder.String())
}
func batteryProgressBar(percent float64) string {
total := 16
if percent < 0 {
percent = 0
}
if percent > 100 {
percent = 100
}
filled := int(percent / 100 * float64(total))
var builder strings.Builder
for i := range total {
if i < filled {
builder.WriteString("█")
} else {
builder.WriteString("░")
}
}
return colorizeBattery(percent, builder.String())
}
func colorizePercent(percent float64, s string) string {
switch {
case percent >= 85:
return dangerStyle.Render(s)
case percent >= 60:
return warnStyle.Render(s)
default:
return okStyle.Render(s)
}
}
func colorizeBattery(percent float64, s string) string {
switch {
case percent < 20:
return dangerStyle.Render(s)
case percent < 50:
return warnStyle.Render(s)
default:
return okStyle.Render(s)
}
}
func colorizeTemp(t float64) string {
switch {
case t >= 85:
return dangerStyle.Render(fmt.Sprintf("%.1f", t))
case t >= 70:
return warnStyle.Render(fmt.Sprintf("%.1f", t))
default:
return subtleStyle.Render(fmt.Sprintf("%.1f", t))
}
}
func formatRate(mb float64) string {
if mb < 0.01 {
return "0 MB/s"
}
if mb < 1 {
return fmt.Sprintf("%.2f MB/s", mb)
}
if mb < 10 {
return fmt.Sprintf("%.1f MB/s", mb)
}
return fmt.Sprintf("%.0f MB/s", mb)
}
func humanBytes(v uint64) string {
switch {
case v > 1<<40:
return fmt.Sprintf("%.1f TB", float64(v)/(1<<40))
case v > 1<<30:
return fmt.Sprintf("%.1f GB", float64(v)/(1<<30))
case v > 1<<20:
return fmt.Sprintf("%.1f MB", float64(v)/(1<<20))
case v > 1<<10:
return fmt.Sprintf("%.1f KB", float64(v)/(1<<10))
default:
return strconv.FormatUint(v, 10) + " B"
}
}
func humanBytesShort(v uint64) string {
switch {
case v >= 1<<40:
return fmt.Sprintf("%.0fT", float64(v)/(1<<40))
case v >= 1<<30:
return fmt.Sprintf("%.0fG", float64(v)/(1<<30))
case v >= 1<<20:
return fmt.Sprintf("%.0fM", float64(v)/(1<<20))
case v >= 1<<10:
return fmt.Sprintf("%.0fK", float64(v)/(1<<10))
default:
return strconv.FormatUint(v, 10)
}
}
func humanBytesCompact(v uint64) string {
switch {
case v >= 1<<40:
return fmt.Sprintf("%.1fT", float64(v)/(1<<40))
case v >= 1<<30:
return fmt.Sprintf("%.1fG", float64(v)/(1<<30))
case v >= 1<<20:
return fmt.Sprintf("%.1fM", float64(v)/(1<<20))
case v >= 1<<10:
return fmt.Sprintf("%.1fK", float64(v)/(1<<10))
default:
return strconv.FormatUint(v, 10)
}
}
func shorten(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen-1] + "…"
}
func renderTwoColumns(cards []cardData, width int) string {
if len(cards) == 0 {
return ""
}
cw := colWidth
if width > 0 && width/2-2 > cw {
cw = width/2 - 2
}
var rows []string
for i := 0; i < len(cards); i += 2 {
left := renderCard(cards[i], cw, 0)
right := ""
if i+1 < len(cards) {
right = renderCard(cards[i+1], cw, 0)
}
targetHeight := maxInt(lipgloss.Height(left), lipgloss.Height(right))
left = renderCard(cards[i], cw, targetHeight)
if right != "" {
right = renderCard(cards[i+1], cw, targetHeight)
rows = append(rows, lipgloss.JoinHorizontal(lipgloss.Top, left, " ", right))
} else {
rows = append(rows, left)
}
}
var spacedRows []string
for i, r := range rows {
if i > 0 {
spacedRows = append(spacedRows, "")
}
spacedRows = append(spacedRows, r)
}
return lipgloss.JoinVertical(lipgloss.Left, spacedRows...)
}
func maxInt(a, b int) int {
if a > b {
return a
}
return b
}

34
go.mod
View File

@@ -1,38 +1,10 @@
module github.com/tw93/mole
module github.com/tw93/mole/windows
go 1.24.0
toolchain go1.24.6
require (
github.com/cespare/xxhash/v2 v2.3.0
github.com/charmbracelet/bubbletea v1.3.10
github.com/charmbracelet/lipgloss v1.1.0
github.com/shirou/gopsutil/v3 v3.24.5
golang.org/x/sync v0.19.0
)
require (
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
github.com/charmbracelet/x/ansi v0.10.1 // indirect
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/termenv v0.16.0 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/shoenig/go-m1cpu v0.1.7 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/sys v0.36.0 // indirect
golang.org/x/text v0.3.8 // indirect
github.com/yusufpapurcu/wmi v1.2.4
golang.org/x/sys v0.36.0
)

58
go.sum
View File

@@ -1,23 +1,13 @@
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ=
github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE=
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8=
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
github.com/charmbracelet/bubbletea v0.25.0 h1:bAfwk7jRz7FKFl9RzlIULPkStffg5k6pNt5dywy4TcM=
github.com/charmbracelet/bubbletea v0.25.0/go.mod h1:EN3QDR1T5ZdWmdfDzYcqOCAps45+QIJbLOBxmVNWNNg=
github.com/charmbracelet/lipgloss v0.9.1 h1:PNyd3jvaJbg4jRHKWXnCj1akQm4rh8dbEzN1p/u1KWg=
github.com/charmbracelet/lipgloss v0.9.1/go.mod h1:1mPmG4cxScwUQALAAnacHaigiiHB9Pmr+v1VEawJl6I=
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY=
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -27,25 +17,29 @@ github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b h1:1XF24mVaiu7u+CFywTdcDo2ie1pzzhwjt6RHqzpMU34=
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho=
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
github.com/shoenig/go-m1cpu v0.1.7 h1:C76Yd0ObKR82W4vhfjZiCp0HxcSZ8Nqd84v+HZ0qyI0=
@@ -58,22 +52,20 @@ github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFA
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -1,767 +0,0 @@
#!/bin/bash
# Mole - Installer for manual installs.
# Fetches source/binaries and installs to prefix.
# Supports update and edge installs.
set -euo pipefail
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
_SPINNER_PID=""
start_line_spinner() {
local msg="$1"
[[ ! -t 1 ]] && {
echo -e "${BLUE}|${NC} $msg"
return
}
local chars="|/-\\"
[[ -z "$chars" ]] && chars='|/-\\'
local i=0
(while true; do
c="${chars:$((i % ${#chars})):1}"
printf "\r${BLUE}%s${NC} %s" "$c" "$msg"
((i++))
sleep 0.12
done) &
_SPINNER_PID=$!
}
stop_line_spinner() { if [[ -n "$_SPINNER_PID" ]]; then
kill "$_SPINNER_PID" 2> /dev/null || true
wait "$_SPINNER_PID" 2> /dev/null || true
_SPINNER_PID=""
printf "\r\033[K"
fi; }
VERBOSE=1
# Icons duplicated from lib/core/common.sh (install.sh runs standalone).
# Avoid readonly to prevent conflicts when sourcing common.sh later.
ICON_SUCCESS="✓"
ICON_ADMIN="●"
ICON_CONFIRM="◎"
ICON_ERROR="☻"
log_info() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}$1${NC}"; }
log_success() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${GREEN}${ICON_SUCCESS}${NC} $1"; }
log_warning() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${YELLOW}$1${NC}"; }
log_error() { echo -e "${YELLOW}${ICON_ERROR}${NC} $1"; }
log_admin() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_ADMIN}${NC} $1"; }
log_confirm() { [[ ${VERBOSE} -eq 1 ]] && echo -e "${BLUE}${ICON_CONFIRM}${NC} $1"; }
# Install defaults
INSTALL_DIR="/usr/local/bin"
CONFIG_DIR="$HOME/.config/mole"
SOURCE_DIR=""
ACTION="install"
# Resolve source dir (local checkout, env override, or download).
needs_sudo() {
if [[ -e "$INSTALL_DIR" ]]; then
[[ ! -w "$INSTALL_DIR" ]]
return
fi
local parent_dir
parent_dir="$(dirname "$INSTALL_DIR")"
[[ ! -w "$parent_dir" ]]
}
maybe_sudo() {
if needs_sudo; then
sudo "$@"
else
"$@"
fi
}
resolve_source_dir() {
if [[ -n "$SOURCE_DIR" && -d "$SOURCE_DIR" && -f "$SOURCE_DIR/mole" ]]; then
return 0
fi
if [[ -n "${BASH_SOURCE[0]:-}" && -f "${BASH_SOURCE[0]}" ]]; then
local script_dir
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [[ -f "$script_dir/mole" ]]; then
SOURCE_DIR="$script_dir"
return 0
fi
fi
if [[ -n "${CLEAN_SOURCE_DIR:-}" && -d "$CLEAN_SOURCE_DIR" && -f "$CLEAN_SOURCE_DIR/mole" ]]; then
SOURCE_DIR="$CLEAN_SOURCE_DIR"
return 0
fi
local tmp
tmp="$(mktemp -d)"
trap 'stop_line_spinner 2>/dev/null; rm -rf "$tmp"' EXIT
local branch="${MOLE_VERSION:-}"
if [[ -z "$branch" ]]; then
branch="$(get_latest_release_tag || true)"
fi
if [[ -z "$branch" ]]; then
branch="$(get_latest_release_tag_from_git || true)"
fi
if [[ -z "$branch" ]]; then
branch="main"
fi
if [[ "$branch" != "main" && "$branch" != "dev" ]]; then
branch="$(normalize_release_tag "$branch")"
fi
local url="https://github.com/tw93/mole/archive/refs/heads/main.tar.gz"
if [[ "$branch" == "dev" ]]; then
url="https://github.com/tw93/mole/archive/refs/heads/dev.tar.gz"
elif [[ "$branch" != "main" ]]; then
url="https://github.com/tw93/mole/archive/refs/tags/${branch}.tar.gz"
fi
start_line_spinner "Fetching Mole source (${branch})..."
if command -v curl > /dev/null 2>&1; then
if curl -fsSL -o "$tmp/mole.tar.gz" "$url" 2> /dev/null; then
if tar -xzf "$tmp/mole.tar.gz" -C "$tmp" 2> /dev/null; then
stop_line_spinner
local extracted_dir
extracted_dir=$(find "$tmp" -mindepth 1 -maxdepth 1 -type d | head -n 1)
if [[ -n "$extracted_dir" && -f "$extracted_dir/mole" ]]; then
SOURCE_DIR="$extracted_dir"
return 0
fi
fi
else
stop_line_spinner
# Only exit early for version tags (not for main/dev branches)
if [[ "$branch" != "main" && "$branch" != "dev" ]]; then
log_error "Failed to fetch version ${branch}. Check if tag exists."
exit 1
fi
fi
fi
stop_line_spinner
start_line_spinner "Cloning Mole source..."
if command -v git > /dev/null 2>&1; then
local git_args=("--depth=1")
if [[ "$branch" != "main" ]]; then
git_args+=("--branch" "$branch")
fi
if git clone "${git_args[@]}" https://github.com/tw93/mole.git "$tmp/mole" > /dev/null 2>&1; then
stop_line_spinner
SOURCE_DIR="$tmp/mole"
return 0
fi
fi
stop_line_spinner
log_error "Failed to fetch source files. Ensure curl or git is available."
exit 1
}
# Version helpers
get_source_version() {
local source_mole="$SOURCE_DIR/mole"
if [[ -f "$source_mole" ]]; then
sed -n 's/^VERSION="\(.*\)"$/\1/p' "$source_mole" | head -n1
fi
}
get_latest_release_tag() {
local tag
if ! command -v curl > /dev/null 2>&1; then
return 1
fi
tag=$(curl -fsSL --connect-timeout 2 --max-time 3 \
"https://api.github.com/repos/tw93/mole/releases/latest" 2> /dev/null |
sed -n 's/.*"tag_name":[[:space:]]*"\([^"]*\)".*/\1/p' | head -n1)
if [[ -z "$tag" ]]; then
return 1
fi
printf '%s\n' "$tag"
}
get_latest_release_tag_from_git() {
if ! command -v git > /dev/null 2>&1; then
return 1
fi
git ls-remote --tags --refs https://github.com/tw93/mole.git 2> /dev/null |
awk -F/ '{print $NF}' |
grep -E '^V[0-9]' |
sort -V |
tail -n 1
}
normalize_release_tag() {
local tag="$1"
while [[ "$tag" =~ ^[vV] ]]; do
tag="${tag#v}"
tag="${tag#V}"
done
if [[ -n "$tag" ]]; then
printf 'V%s\n' "$tag"
fi
}
get_installed_version() {
local binary="$INSTALL_DIR/mole"
if [[ -x "$binary" ]]; then
local version
version=$("$binary" --version 2> /dev/null | awk '/Mole version/ {print $NF; exit}')
if [[ -n "$version" ]]; then
echo "$version"
else
sed -n 's/^VERSION="\(.*\)"$/\1/p' "$binary" | head -n1
fi
fi
}
# CLI parsing (supports main/latest and version tokens).
parse_args() {
local -a args=("$@")
local version_token=""
local i skip_next=false
for i in "${!args[@]}"; do
local token="${args[$i]}"
[[ -z "$token" ]] && continue
# Skip values for options that take arguments
if [[ "$skip_next" == "true" ]]; then
skip_next=false
continue
fi
if [[ "$token" == "--prefix" || "$token" == "--config" ]]; then
skip_next=true
continue
fi
if [[ "$token" == -* ]]; then
continue
fi
if [[ -n "$version_token" ]]; then
log_error "Unexpected argument: $token"
exit 1
fi
case "$token" in
latest | main)
export MOLE_VERSION="main"
export MOLE_EDGE_INSTALL="true"
version_token="$token"
unset 'args[$i]'
;;
dev)
export MOLE_VERSION="dev"
export MOLE_EDGE_INSTALL="true"
version_token="$token"
unset 'args[$i]'
;;
[0-9]* | V[0-9]* | v[0-9]*)
export MOLE_VERSION="$token"
version_token="$token"
unset 'args[$i]'
;;
*)
log_error "Unknown option: $token"
exit 1
;;
esac
done
if [[ ${#args[@]} -gt 0 ]]; then
set -- ${args[@]+"${args[@]}"}
else
set --
fi
while [[ $# -gt 0 ]]; do
case $1 in
--prefix)
if [[ -z "${2:-}" ]]; then
log_error "Missing value for --prefix"
exit 1
fi
INSTALL_DIR="$2"
shift 2
;;
--config)
if [[ -z "${2:-}" ]]; then
log_error "Missing value for --config"
exit 1
fi
CONFIG_DIR="$2"
shift 2
;;
--update)
ACTION="update"
shift 1
;;
--verbose | -v)
VERBOSE=1
shift 1
;;
--help | -h)
log_error "Unknown option: $1"
exit 1
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
}
# Environment checks and directory setup
check_requirements() {
if [[ "$OSTYPE" != "darwin"* ]]; then
log_error "This tool is designed for macOS only"
exit 1
fi
if command -v brew > /dev/null 2>&1 && brew list mole > /dev/null 2>&1; then
local mole_path
mole_path=$(command -v mole 2> /dev/null || true)
local is_homebrew_binary=false
if [[ -n "$mole_path" && -L "$mole_path" ]]; then
if readlink "$mole_path" | grep -q "Cellar/mole"; then
is_homebrew_binary=true
fi
fi
if [[ "$is_homebrew_binary" == "true" ]]; then
if [[ "$ACTION" == "update" ]]; then
return 0
fi
echo -e "${YELLOW}Mole is installed via Homebrew${NC}"
echo ""
echo "Choose one:"
echo -e " 1. Update via Homebrew: ${GREEN}brew upgrade mole${NC}"
echo -e " 2. Switch to manual: ${GREEN}brew uninstall --force mole${NC} then re-run this"
echo ""
exit 1
else
log_warning "Cleaning up stale Homebrew installation..."
brew uninstall --force mole > /dev/null 2>&1 || true
fi
fi
if [[ ! -d "$(dirname "$INSTALL_DIR")" ]]; then
log_error "Parent directory $(dirname "$INSTALL_DIR") does not exist"
exit 1
fi
}
create_directories() {
if [[ ! -d "$INSTALL_DIR" ]]; then
maybe_sudo mkdir -p "$INSTALL_DIR"
fi
if ! mkdir -p "$CONFIG_DIR" "$CONFIG_DIR/bin" "$CONFIG_DIR/lib"; then
log_error "Failed to create config directory: $CONFIG_DIR"
exit 1
fi
}
# Binary install helpers
build_binary_from_source() {
local binary_name="$1"
local target_path="$2"
local cmd_dir=""
case "$binary_name" in
analyze)
cmd_dir="cmd/analyze"
;;
status)
cmd_dir="cmd/status"
;;
*)
return 1
;;
esac
if ! command -v go > /dev/null 2>&1; then
return 1
fi
if [[ ! -d "$SOURCE_DIR/$cmd_dir" ]]; then
return 1
fi
if [[ -t 1 ]]; then
start_line_spinner "Building ${binary_name} from source..."
else
echo "Building ${binary_name} from source..."
fi
if (cd "$SOURCE_DIR" && go build -ldflags="-s -w" -o "$target_path" "./$cmd_dir" > /dev/null 2>&1); then
if [[ -t 1 ]]; then stop_line_spinner; fi
chmod +x "$target_path"
log_success "Built ${binary_name} from source"
return 0
fi
if [[ -t 1 ]]; then stop_line_spinner; fi
log_warning "Failed to build ${binary_name} from source"
return 1
}
download_binary() {
local binary_name="$1"
local target_path="$CONFIG_DIR/bin/${binary_name}-go"
local arch
arch=$(uname -m)
local arch_suffix="amd64"
if [[ "$arch" == "arm64" ]]; then
arch_suffix="arm64"
fi
if [[ -f "$SOURCE_DIR/bin/${binary_name}-go" ]]; then
cp "$SOURCE_DIR/bin/${binary_name}-go" "$target_path"
chmod +x "$target_path"
log_success "Installed local ${binary_name} binary"
return 0
elif [[ -f "$SOURCE_DIR/bin/${binary_name}-darwin-${arch_suffix}" ]]; then
cp "$SOURCE_DIR/bin/${binary_name}-darwin-${arch_suffix}" "$target_path"
chmod +x "$target_path"
log_success "Installed local ${binary_name} binary"
return 0
fi
if [[ "${MOLE_EDGE_INSTALL:-}" == "true" ]]; then
if build_binary_from_source "$binary_name" "$target_path"; then
return 0
fi
fi
local version
version=$(get_source_version)
if [[ -z "$version" ]]; then
log_warning "Could not determine version for ${binary_name}, trying local build"
if build_binary_from_source "$binary_name" "$target_path"; then
return 0
fi
return 1
fi
local url="https://github.com/tw93/mole/releases/download/V${version}/${binary_name}-darwin-${arch_suffix}"
# Skip preflight network checks to avoid false negatives.
if [[ -t 1 ]]; then
start_line_spinner "Downloading ${binary_name}..."
else
echo "Downloading ${binary_name}..."
fi
if curl -fsSL --connect-timeout 10 --max-time 60 -o "$target_path" "$url"; then
if [[ -t 1 ]]; then stop_line_spinner; fi
chmod +x "$target_path"
log_success "Downloaded ${binary_name} binary"
else
if [[ -t 1 ]]; then stop_line_spinner; fi
log_warning "Could not download ${binary_name} binary (v${version}), trying local build"
if build_binary_from_source "$binary_name" "$target_path"; then
return 0
fi
log_error "Failed to install ${binary_name} binary"
return 1
fi
}
# File installation (bin/lib/scripts + go helpers).
install_files() {
resolve_source_dir
local source_dir_abs
local install_dir_abs
local config_dir_abs
source_dir_abs="$(cd "$SOURCE_DIR" && pwd)"
install_dir_abs="$(cd "$INSTALL_DIR" && pwd)"
config_dir_abs="$(cd "$CONFIG_DIR" && pwd)"
if [[ -f "$SOURCE_DIR/mole" ]]; then
if [[ "$source_dir_abs" != "$install_dir_abs" ]]; then
if needs_sudo; then
log_admin "Admin access required for /usr/local/bin"
fi
# Atomic update: copy to temporary name first, then move
maybe_sudo cp "$SOURCE_DIR/mole" "$INSTALL_DIR/mole.new"
maybe_sudo chmod +x "$INSTALL_DIR/mole.new"
maybe_sudo mv -f "$INSTALL_DIR/mole.new" "$INSTALL_DIR/mole"
log_success "Installed mole to $INSTALL_DIR"
fi
else
log_error "mole executable not found in ${SOURCE_DIR:-unknown}"
exit 1
fi
if [[ -f "$SOURCE_DIR/mo" ]]; then
if [[ "$source_dir_abs" == "$install_dir_abs" ]]; then
log_success "mo alias already present"
else
maybe_sudo cp "$SOURCE_DIR/mo" "$INSTALL_DIR/mo.new"
maybe_sudo chmod +x "$INSTALL_DIR/mo.new"
maybe_sudo mv -f "$INSTALL_DIR/mo.new" "$INSTALL_DIR/mo"
log_success "Installed mo alias"
fi
fi
if [[ -d "$SOURCE_DIR/bin" ]]; then
local source_bin_abs="$(cd "$SOURCE_DIR/bin" && pwd)"
local config_bin_abs="$(cd "$CONFIG_DIR/bin" && pwd)"
if [[ "$source_bin_abs" == "$config_bin_abs" ]]; then
log_success "Modules already synced"
else
local -a bin_files=("$SOURCE_DIR/bin"/*)
if [[ ${#bin_files[@]} -gt 0 ]]; then
cp -r "${bin_files[@]}" "$CONFIG_DIR/bin/"
for file in "$CONFIG_DIR/bin/"*; do
[[ -e "$file" ]] && chmod +x "$file"
done
log_success "Installed modules"
fi
fi
fi
if [[ -d "$SOURCE_DIR/lib" ]]; then
local source_lib_abs="$(cd "$SOURCE_DIR/lib" && pwd)"
local config_lib_abs="$(cd "$CONFIG_DIR/lib" && pwd)"
if [[ "$source_lib_abs" == "$config_lib_abs" ]]; then
log_success "Libraries already synced"
else
local -a lib_files=("$SOURCE_DIR/lib"/*)
if [[ ${#lib_files[@]} -gt 0 ]]; then
cp -r "${lib_files[@]}" "$CONFIG_DIR/lib/"
log_success "Installed libraries"
fi
fi
fi
if [[ "$config_dir_abs" != "$source_dir_abs" ]]; then
for file in README.md LICENSE install.sh; do
if [[ -f "$SOURCE_DIR/$file" ]]; then
cp -f "$SOURCE_DIR/$file" "$CONFIG_DIR/"
fi
done
fi
if [[ -f "$CONFIG_DIR/install.sh" ]]; then
chmod +x "$CONFIG_DIR/install.sh"
fi
if [[ "$source_dir_abs" != "$install_dir_abs" ]]; then
maybe_sudo sed -i '' "s|SCRIPT_DIR=.*|SCRIPT_DIR=\"$CONFIG_DIR\"|" "$INSTALL_DIR/mole"
fi
if ! download_binary "analyze"; then
exit 1
fi
if ! download_binary "status"; then
exit 1
fi
}
# Verification and PATH hint
verify_installation() {
if [[ -x "$INSTALL_DIR/mole" ]] && [[ -f "$CONFIG_DIR/lib/core/common.sh" ]]; then
if "$INSTALL_DIR/mole" --help > /dev/null 2>&1; then
return 0
else
log_warning "Mole command installed but may not be working properly"
fi
else
log_error "Installation verification failed"
exit 1
fi
}
setup_path() {
if [[ ":$PATH:" == *":$INSTALL_DIR:"* ]]; then
return
fi
if [[ "$INSTALL_DIR" != "/usr/local/bin" ]]; then
log_warning "$INSTALL_DIR is not in your PATH"
echo ""
echo "To use mole from anywhere, add this line to your shell profile:"
echo "export PATH=\"$INSTALL_DIR:\$PATH\""
echo ""
echo "For example, add it to ~/.zshrc or ~/.bash_profile"
fi
}
print_usage_summary() {
local action="$1"
local new_version="$2"
local previous_version="${3:-}"
if [[ ${VERBOSE} -ne 1 ]]; then
return
fi
echo ""
local message="Mole ${action} successfully"
if [[ "$action" == "updated" && -n "$previous_version" && -n "$new_version" && "$previous_version" != "$new_version" ]]; then
message+=" (${previous_version} -> ${new_version})"
elif [[ -n "$new_version" ]]; then
message+=" (version ${new_version})"
fi
log_confirm "$message"
echo ""
echo "Usage:"
if [[ ":$PATH:" == *":$INSTALL_DIR:"* ]]; then
echo " mo # Interactive menu"
echo " mo clean # Deep cleanup"
echo " mo uninstall # Remove apps + leftovers"
echo " mo optimize # Check and maintain system"
echo " mo analyze # Explore disk usage"
echo " mo status # Monitor system health"
echo " mo touchid # Configure Touch ID for sudo"
echo " mo update # Update to latest version"
echo " mo --help # Show all commands"
else
echo " $INSTALL_DIR/mo # Interactive menu"
echo " $INSTALL_DIR/mo clean # Deep cleanup"
echo " $INSTALL_DIR/mo uninstall # Remove apps + leftovers"
echo " $INSTALL_DIR/mo optimize # Check and maintain system"
echo " $INSTALL_DIR/mo analyze # Explore disk usage"
echo " $INSTALL_DIR/mo status # Monitor system health"
echo " $INSTALL_DIR/mo touchid # Configure Touch ID for sudo"
echo " $INSTALL_DIR/mo update # Update to latest version"
echo " $INSTALL_DIR/mo --help # Show all commands"
fi
echo ""
}
# Main install/update flows
perform_install() {
resolve_source_dir
local source_version
source_version="$(get_source_version || true)"
check_requirements
create_directories
install_files
verify_installation
setup_path
local installed_version
installed_version="$(get_installed_version || true)"
if [[ -z "$installed_version" ]]; then
installed_version="$source_version"
fi
# Edge installs get a suffix to make the version explicit.
if [[ "${MOLE_EDGE_INSTALL:-}" == "true" ]]; then
installed_version="${installed_version}-edge"
echo ""
local branch_name="${MOLE_VERSION:-main}"
log_warning "Edge version installed on ${branch_name} branch"
log_info "This is a testing version; use 'mo update' to switch to stable"
fi
print_usage_summary "installed" "$installed_version"
}
perform_update() {
check_requirements
if command -v brew > /dev/null 2>&1 && brew list mole > /dev/null 2>&1; then
resolve_source_dir 2> /dev/null || true
local current_version
current_version=$(get_installed_version || echo "unknown")
if [[ -f "$SOURCE_DIR/lib/core/common.sh" ]]; then
# shellcheck disable=SC1090,SC1091
source "$SOURCE_DIR/lib/core/common.sh"
update_via_homebrew "$current_version"
else
log_error "Cannot update Homebrew-managed Mole without full installation"
echo ""
echo "Please update via Homebrew:"
echo -e " ${GREEN}brew upgrade mole${NC}"
exit 1
fi
exit 0
fi
local installed_version
installed_version="$(get_installed_version || true)"
if [[ -z "$installed_version" ]]; then
log_warning "Mole is not currently installed in $INSTALL_DIR. Running fresh installation."
perform_install
return
fi
resolve_source_dir
local target_version
target_version="$(get_source_version || true)"
if [[ -z "$target_version" ]]; then
log_error "Unable to determine the latest Mole version."
exit 1
fi
if [[ "$installed_version" == "$target_version" ]]; then
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version ($installed_version)"
exit 0
fi
local old_verbose=$VERBOSE
VERBOSE=0
create_directories || {
VERBOSE=$old_verbose
log_error "Failed to create directories"
exit 1
}
install_files || {
VERBOSE=$old_verbose
log_error "Failed to install files"
exit 1
}
verify_installation || {
VERBOSE=$old_verbose
log_error "Failed to verify installation"
exit 1
}
setup_path
VERBOSE=$old_verbose
local updated_version
updated_version="$(get_installed_version || true)"
if [[ -z "$updated_version" ]]; then
updated_version="$target_version"
fi
echo -e "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version ($updated_version)"
}
parse_args "$@"
case "$ACTION" in
update)
perform_update
;;
*)
perform_install
;;
esac

View File

@@ -1,595 +0,0 @@
#!/bin/bash
# System Checks Module
# Combines configuration, security, updates, and health checks
set -euo pipefail
# ============================================================================
# Helper Functions
# ============================================================================
list_login_items() {
if ! command -v osascript > /dev/null 2>&1; then
return
fi
local raw_items
raw_items=$(osascript -e 'tell application "System Events" to get the name of every login item' 2> /dev/null || echo "")
[[ -z "$raw_items" || "$raw_items" == "missing value" ]] && return
IFS=',' read -ra login_items_array <<< "$raw_items"
for entry in "${login_items_array[@]}"; do
local trimmed
trimmed=$(echo "$entry" | sed 's/^[[:space:]]*//; s/[[:space:]]*$//')
[[ -n "$trimmed" ]] && printf "%s\n" "$trimmed"
done
}
# ============================================================================
# Configuration Checks
# ============================================================================
check_touchid_sudo() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "check_touchid"; then return; fi
# Check if Touch ID is configured for sudo
local pam_file="/etc/pam.d/sudo"
if [[ -f "$pam_file" ]] && grep -q "pam_tid.so" "$pam_file" 2> /dev/null; then
echo -e " ${GREEN}${NC} Touch ID Biometric authentication enabled"
else
# Check if Touch ID is supported
local is_supported=false
if command -v bioutil > /dev/null 2>&1; then
if bioutil -r 2> /dev/null | grep -q "Touch ID"; then
is_supported=true
fi
elif [[ "$(uname -m)" == "arm64" ]]; then
is_supported=true
fi
if [[ "$is_supported" == "true" ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Touch ID ${YELLOW}Not configured for sudo${NC}"
export TOUCHID_NOT_CONFIGURED=true
fi
fi
}
check_rosetta() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "check_rosetta"; then return; fi
# Check Rosetta 2 (for Apple Silicon Macs)
if [[ "$(uname -m)" == "arm64" ]]; then
if [[ -f "/Library/Apple/usr/share/rosetta/rosetta" ]]; then
echo -e " ${GREEN}${NC} Rosetta 2 Intel app translation ready"
else
echo -e " ${YELLOW}${ICON_WARNING}${NC} Rosetta 2 ${YELLOW}Intel app support missing${NC}"
export ROSETTA_NOT_INSTALLED=true
fi
fi
}
check_git_config() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "check_git_config"; then return; fi
# Check basic Git configuration
if command -v git > /dev/null 2>&1; then
local git_name=$(git config --global user.name 2> /dev/null || echo "")
local git_email=$(git config --global user.email 2> /dev/null || echo "")
if [[ -n "$git_name" && -n "$git_email" ]]; then
echo -e " ${GREEN}${NC} Git Global identity configured"
else
echo -e " ${YELLOW}${ICON_WARNING}${NC} Git ${YELLOW}User identity not set${NC}"
fi
fi
}
check_all_config() {
echo -e "${BLUE}${ICON_ARROW}${NC} System Configuration"
check_touchid_sudo
check_rosetta
check_git_config
}
# ============================================================================
# Security Checks
# ============================================================================
check_filevault() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "check_filevault"; then return; fi
# Check FileVault encryption status
if command -v fdesetup > /dev/null 2>&1; then
local fv_status=$(fdesetup status 2> /dev/null || echo "")
if echo "$fv_status" | grep -q "FileVault is On"; then
echo -e " ${GREEN}${NC} FileVault Disk encryption active"
else
echo -e " ${RED}${NC} FileVault ${RED}Disk encryption disabled${NC}"
export FILEVAULT_DISABLED=true
fi
fi
}
check_firewall() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "firewall"; then return; fi
# Check firewall status using socketfilterfw (more reliable than defaults on modern macOS)
unset FIREWALL_DISABLED
local firewall_output=$(sudo /usr/libexec/ApplicationFirewall/socketfilterfw --getglobalstate 2> /dev/null || echo "")
if [[ "$firewall_output" == *"State = 1"* ]] || [[ "$firewall_output" == *"State = 2"* ]]; then
echo -e " ${GREEN}${NC} Firewall Network protection enabled"
else
echo -e " ${YELLOW}${ICON_WARNING}${NC} Firewall ${YELLOW}Network protection disabled${NC}"
export FIREWALL_DISABLED=true
fi
}
check_gatekeeper() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "gatekeeper"; then return; fi
# Check Gatekeeper status
if command -v spctl > /dev/null 2>&1; then
local gk_status=$(spctl --status 2> /dev/null || echo "")
if echo "$gk_status" | grep -q "enabled"; then
echo -e " ${GREEN}${NC} Gatekeeper App download protection active"
unset GATEKEEPER_DISABLED
else
echo -e " ${YELLOW}${ICON_WARNING}${NC} Gatekeeper ${YELLOW}App security disabled${NC}"
export GATEKEEPER_DISABLED=true
fi
fi
}
check_sip() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "check_sip"; then return; fi
# Check System Integrity Protection
if command -v csrutil > /dev/null 2>&1; then
local sip_status=$(csrutil status 2> /dev/null || echo "")
if echo "$sip_status" | grep -q "enabled"; then
echo -e " ${GREEN}${NC} SIP System integrity protected"
else
echo -e " ${YELLOW}${ICON_WARNING}${NC} SIP ${YELLOW}System protection disabled${NC}"
fi
fi
}
check_all_security() {
echo -e "${BLUE}${ICON_ARROW}${NC} Security Status"
check_filevault
check_firewall
check_gatekeeper
check_sip
}
# ============================================================================
# Software Update Checks
# ============================================================================
# Cache configuration
CACHE_DIR="${HOME}/.cache/mole"
CACHE_TTL=600 # 10 minutes in seconds
# Ensure cache directory exists
ensure_user_dir "$CACHE_DIR"
clear_cache_file() {
local file="$1"
rm -f "$file" 2> /dev/null || true
}
reset_brew_cache() {
clear_cache_file "$CACHE_DIR/brew_updates"
}
reset_softwareupdate_cache() {
clear_cache_file "$CACHE_DIR/softwareupdate_list"
SOFTWARE_UPDATE_LIST=""
}
reset_mole_cache() {
clear_cache_file "$CACHE_DIR/mole_version"
}
# Check if cache is still valid
is_cache_valid() {
local cache_file="$1"
local ttl="${2:-$CACHE_TTL}"
if [[ ! -f "$cache_file" ]]; then
return 1
fi
local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file")))
[[ $cache_age -lt $ttl ]]
}
# Cache software update list to avoid calling softwareupdate twice
SOFTWARE_UPDATE_LIST=""
get_software_updates() {
local cache_file="$CACHE_DIR/softwareupdate_list"
# Optimized: Use defaults to check if updates are pending (much faster)
local pending_updates
pending_updates=$(defaults read /Library/Preferences/com.apple.SoftwareUpdate LastRecommendedUpdatesAvailable 2> /dev/null || echo "0")
if [[ "$pending_updates" -gt 0 ]]; then
echo "Updates Available"
else
echo ""
fi
}
check_appstore_updates() {
# Skipped for speed optimization - consolidated into check_macos_update
# We can't easily distinguish app store vs macos updates without the slow softwareupdate -l call
export APPSTORE_UPDATE_COUNT=0
}
check_macos_update() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "check_macos_updates"; then return; fi
# Fast check using system preferences
local updates_available="false"
if [[ $(get_software_updates) == "Updates Available" ]]; then
updates_available="true"
# Verify with softwareupdate using --no-scan to avoid triggering a fresh scan
# which can timeout. We prioritize avoiding false negatives (missing actual updates)
# over false positives, so we only clear the update flag when softwareupdate
# explicitly reports "No new software available"
local sw_output=""
local sw_status=0
local spinner_started=false
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking macOS updates..."
spinner_started=true
fi
local softwareupdate_timeout=10
if sw_output=$(run_with_timeout "$softwareupdate_timeout" softwareupdate -l --no-scan 2> /dev/null); then
:
else
sw_status=$?
fi
if [[ "$spinner_started" == "true" ]]; then
stop_inline_spinner
fi
# Debug logging for troubleshooting
if [[ -n "${MO_DEBUG:-}" ]]; then
echo "[DEBUG] softwareupdate exit status: $sw_status, output lines: $(echo "$sw_output" | wc -l | tr -d ' ')" >&2
fi
# Prefer avoiding false negatives: if the system indicates updates are pending,
# only clear the flag when softwareupdate returns a list without any update entries.
if [[ $sw_status -eq 0 && -n "$sw_output" ]]; then
if ! echo "$sw_output" | grep -qE '^[[:space:]]*\*'; then
updates_available="false"
fi
fi
fi
export MACOS_UPDATE_AVAILABLE="$updates_available"
if [[ "$updates_available" == "true" ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} macOS ${YELLOW}Update available${NC}"
else
echo -e " ${GREEN}${NC} macOS System up to date"
fi
}
check_mole_update() {
if command -v is_whitelisted > /dev/null && is_whitelisted "check_mole_update"; then return; fi
# Check if Mole has updates
# Auto-detect version from mole main script
local current_version
if [[ -f "${SCRIPT_DIR:-/usr/local/bin}/mole" ]]; then
current_version=$(grep '^VERSION=' "${SCRIPT_DIR:-/usr/local/bin}/mole" 2> /dev/null | head -1 | sed 's/VERSION="\(.*\)"/\1/' || echo "unknown")
else
current_version="${VERSION:-unknown}"
fi
local latest_version=""
local cache_file="$CACHE_DIR/mole_version"
export MOLE_UPDATE_AVAILABLE="false"
# Check cache first
if is_cache_valid "$cache_file"; then
latest_version=$(cat "$cache_file" 2> /dev/null || echo "")
else
# Show spinner while checking
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking Mole version..."
fi
# Try to get latest version from GitHub
if command -v curl > /dev/null 2>&1; then
# Run in background to allow Ctrl+C to interrupt
local temp_version
temp_version=$(mktemp_file "mole_version_check")
curl -fsSL --connect-timeout 3 --max-time 5 https://api.github.com/repos/tw93/mole/releases/latest 2> /dev/null | grep '"tag_name"' | sed -E 's/.*"v?([^"]+)".*/\1/' > "$temp_version" &
local curl_pid=$!
# Wait for curl to complete (allows Ctrl+C to interrupt)
if wait "$curl_pid" 2> /dev/null; then
latest_version=$(cat "$temp_version" 2> /dev/null || echo "")
# Save to cache
if [[ -n "$latest_version" ]]; then
ensure_user_file "$cache_file"
echo "$latest_version" > "$cache_file" 2> /dev/null || true
fi
fi
rm -f "$temp_version" 2> /dev/null || true
fi
# Stop spinner
if [[ -t 1 ]]; then
stop_inline_spinner
fi
fi
# Normalize version strings (remove leading 'v' or 'V')
current_version="${current_version#v}"
current_version="${current_version#V}"
latest_version="${latest_version#v}"
latest_version="${latest_version#V}"
if [[ -n "$latest_version" && "$current_version" != "$latest_version" ]]; then
# Compare versions
if [[ "$(printf '%s\n' "$current_version" "$latest_version" | sort -V | head -1)" == "$current_version" ]]; then
export MOLE_UPDATE_AVAILABLE="true"
echo -e " ${YELLOW}${ICON_WARNING}${NC} Mole ${YELLOW}${latest_version} available${NC} (running ${current_version})"
else
echo -e " ${GREEN}${NC} Mole Latest version ${current_version}"
fi
else
echo -e " ${GREEN}${NC} Mole Latest version ${current_version}"
fi
}
check_all_updates() {
# Reset spinner flag for softwareupdate
unset SOFTWAREUPDATE_SPINNER_SHOWN
# Preload software update data to avoid delays between subsequent checks
# Only redirect stdout, keep stderr for spinner display
get_software_updates > /dev/null
echo -e "${BLUE}${ICON_ARROW}${NC} System Updates"
check_appstore_updates
check_macos_update
check_mole_update
}
get_appstore_update_labels() {
get_software_updates | awk '
/^\*/ {
label=$0
sub(/^[[:space:]]*\* Label: */, "", label)
sub(/,.*/, "", label)
lower=tolower(label)
if (index(lower, "macos") == 0) {
print label
}
}
'
}
get_macos_update_labels() {
get_software_updates | awk '
/^\*/ {
label=$0
sub(/^[[:space:]]*\* Label: */, "", label)
sub(/,.*/, "", label)
lower=tolower(label)
if (index(lower, "macos") != 0) {
print label
}
}
'
}
# ============================================================================
# System Health Checks
# ============================================================================
check_disk_space() {
local free_gb=$(command df -H / | awk 'NR==2 {print $4}' | sed 's/G//')
local free_num=$(echo "$free_gb" | tr -d 'G' | cut -d'.' -f1)
export DISK_FREE_GB=$free_num
if [[ $free_num -lt 20 ]]; then
echo -e " ${RED}${NC} Disk Space ${RED}${free_gb}GB free${NC} (Critical)"
elif [[ $free_num -lt 50 ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Disk Space ${YELLOW}${free_gb}GB free${NC} (Low)"
else
echo -e " ${GREEN}${NC} Disk Space ${free_gb}GB free"
fi
}
check_memory_usage() {
local mem_total
mem_total=$(sysctl -n hw.memsize 2> /dev/null || echo "0")
if [[ -z "$mem_total" || "$mem_total" -le 0 ]]; then
echo -e " ${GRAY}-${NC} Memory Unable to determine"
return
fi
local vm_output
vm_output=$(vm_stat 2> /dev/null || echo "")
local page_size
page_size=$(echo "$vm_output" | awk '/page size of/ {print $8}')
[[ -z "$page_size" ]] && page_size=4096
local free_pages inactive_pages spec_pages
free_pages=$(echo "$vm_output" | awk '/Pages free/ {gsub(/\./,"",$3); print $3}')
inactive_pages=$(echo "$vm_output" | awk '/Pages inactive/ {gsub(/\./,"",$3); print $3}')
spec_pages=$(echo "$vm_output" | awk '/Pages speculative/ {gsub(/\./,"",$3); print $3}')
free_pages=${free_pages:-0}
inactive_pages=${inactive_pages:-0}
spec_pages=${spec_pages:-0}
# Estimate used percent: (total - free - inactive - speculative) / total
local total_pages=$((mem_total / page_size))
local free_total=$((free_pages + inactive_pages + spec_pages))
local used_pages=$((total_pages - free_total))
if ((used_pages < 0)); then
used_pages=0
fi
local used_percent
used_percent=$(awk "BEGIN {printf \"%.0f\", ($used_pages / $total_pages) * 100}")
((used_percent > 100)) && used_percent=100
((used_percent < 0)) && used_percent=0
if [[ $used_percent -gt 90 ]]; then
echo -e " ${RED}${NC} Memory ${RED}${used_percent}% used${NC} (Critical)"
elif [[ $used_percent -gt 80 ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Memory ${YELLOW}${used_percent}% used${NC} (High)"
else
echo -e " ${GREEN}${NC} Memory ${used_percent}% used"
fi
}
check_login_items() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "check_login_items"; then return; fi
local login_items_count=0
local -a login_items_list=()
if [[ -t 0 ]]; then
# Show spinner while getting login items
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking login items..."
fi
while IFS= read -r login_item; do
[[ -n "$login_item" ]] && login_items_list+=("$login_item")
done < <(list_login_items || true)
login_items_count=${#login_items_list[@]}
# Stop spinner before output
if [[ -t 1 ]]; then
stop_inline_spinner
fi
fi
if [[ $login_items_count -gt 15 ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Login Items ${YELLOW}${login_items_count} apps${NC}"
elif [[ $login_items_count -gt 0 ]]; then
echo -e " ${GREEN}${NC} Login Items ${login_items_count} apps"
else
echo -e " ${GREEN}${NC} Login Items None"
return
fi
# Show items in a single line (compact)
local preview_limit=3
((preview_limit > login_items_count)) && preview_limit=$login_items_count
local items_display=""
for ((i = 0; i < preview_limit; i++)); do
if [[ $i -eq 0 ]]; then
items_display="${login_items_list[$i]}"
else
items_display="${items_display}, ${login_items_list[$i]}"
fi
done
if ((login_items_count > preview_limit)); then
local remaining=$((login_items_count - preview_limit))
items_display="${items_display} +${remaining}"
fi
echo -e " ${GRAY}${items_display}${NC}"
}
check_cache_size() {
local cache_size_kb=0
# Check common cache locations
local -a cache_paths=(
"$HOME/Library/Caches"
"$HOME/Library/Logs"
)
# Show spinner while calculating cache size
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning cache..."
fi
for cache_path in "${cache_paths[@]}"; do
if [[ -d "$cache_path" ]]; then
local size_output
size_output=$(get_path_size_kb "$cache_path")
[[ "$size_output" =~ ^[0-9]+$ ]] || size_output=0
cache_size_kb=$((cache_size_kb + size_output))
fi
done
local cache_size_gb=$(echo "scale=1; $cache_size_kb / 1024 / 1024" | bc)
export CACHE_SIZE_GB=$cache_size_gb
# Stop spinner before output
if [[ -t 1 ]]; then
stop_inline_spinner
fi
# Convert to integer for comparison
local cache_size_int=$(echo "$cache_size_gb" | cut -d'.' -f1)
if [[ $cache_size_int -gt 10 ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Cache Size ${YELLOW}${cache_size_gb}GB${NC} cleanable"
elif [[ $cache_size_int -gt 5 ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Cache Size ${YELLOW}${cache_size_gb}GB${NC} cleanable"
else
echo -e " ${GREEN}${NC} Cache Size ${cache_size_gb}GB"
fi
}
check_swap_usage() {
# Check swap usage
if command -v sysctl > /dev/null 2>&1; then
local swap_info=$(sysctl vm.swapusage 2> /dev/null || echo "")
if [[ -n "$swap_info" ]]; then
local swap_used=$(echo "$swap_info" | grep -o "used = [0-9.]*[GM]" | awk 'NR==1{print $3}')
swap_used=${swap_used:-0M}
local swap_num="${swap_used//[GM]/}"
if [[ "$swap_used" == *"G"* ]]; then
local swap_gb=${swap_num%.*}
if [[ $swap_gb -gt 2 ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Swap Usage ${YELLOW}${swap_used}${NC} (High)"
else
echo -e " ${GREEN}${NC} Swap Usage ${swap_used}"
fi
else
echo -e " ${GREEN}${NC} Swap Usage ${swap_used}"
fi
fi
fi
}
check_brew_health() {
# Check whitelist
if command -v is_whitelisted > /dev/null && is_whitelisted "check_brew_health"; then return; fi
}
check_system_health() {
echo -e "${BLUE}${ICON_ARROW}${NC} System Health"
check_disk_space
check_memory_usage
check_swap_usage
check_login_items
check_cache_size
# Time Machine check is optional; skip by default to avoid noise on systems without backups
}

View File

@@ -1,184 +0,0 @@
#!/bin/bash
# System Health Check - JSON Generator
# Extracted from tasks.sh
set -euo pipefail
# Ensure dependencies are loaded (only if running standalone)
if [[ -z "${MOLE_FILE_OPS_LOADED:-}" ]]; then
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
source "$SCRIPT_DIR/lib/core/file_ops.sh"
fi
# Get memory info in GB
get_memory_info() {
local total_bytes used_gb total_gb
# Total memory
total_bytes=$(sysctl -n hw.memsize 2> /dev/null || echo "0")
total_gb=$(LC_ALL=C awk "BEGIN {printf \"%.2f\", $total_bytes / (1024*1024*1024)}" 2> /dev/null || echo "0")
[[ -z "$total_gb" || "$total_gb" == "" ]] && total_gb="0"
# Used memory from vm_stat
local vm_output active wired compressed page_size
vm_output=$(vm_stat 2> /dev/null || echo "")
page_size=4096
active=$(echo "$vm_output" | LC_ALL=C awk '/Pages active:/ {print $NF}' | tr -d '.\n' 2> /dev/null)
wired=$(echo "$vm_output" | LC_ALL=C awk '/Pages wired down:/ {print $NF}' | tr -d '.\n' 2> /dev/null)
compressed=$(echo "$vm_output" | LC_ALL=C awk '/Pages occupied by compressor:/ {print $NF}' | tr -d '.\n' 2> /dev/null)
active=${active:-0}
wired=${wired:-0}
compressed=${compressed:-0}
local used_bytes=$(((active + wired + compressed) * page_size))
used_gb=$(LC_ALL=C awk "BEGIN {printf \"%.2f\", $used_bytes / (1024*1024*1024)}" 2> /dev/null || echo "0")
[[ -z "$used_gb" || "$used_gb" == "" ]] && used_gb="0"
echo "$used_gb $total_gb"
}
# Get disk info
get_disk_info() {
local home="${HOME:-/}"
local df_output total_gb used_gb used_percent
df_output=$(command df -k "$home" 2> /dev/null | tail -1)
local total_kb used_kb
total_kb=$(echo "$df_output" | LC_ALL=C awk 'NR==1{print $2}' 2> /dev/null)
used_kb=$(echo "$df_output" | LC_ALL=C awk 'NR==1{print $3}' 2> /dev/null)
total_kb=${total_kb:-0}
used_kb=${used_kb:-0}
[[ "$total_kb" == "0" ]] && total_kb=1 # Avoid division by zero
total_gb=$(LC_ALL=C awk "BEGIN {printf \"%.2f\", $total_kb / (1024*1024)}" 2> /dev/null || echo "0")
used_gb=$(LC_ALL=C awk "BEGIN {printf \"%.2f\", $used_kb / (1024*1024)}" 2> /dev/null || echo "0")
used_percent=$(LC_ALL=C awk "BEGIN {printf \"%.1f\", ($used_kb / $total_kb) * 100}" 2> /dev/null || echo "0")
[[ -z "$total_gb" || "$total_gb" == "" ]] && total_gb="0"
[[ -z "$used_gb" || "$used_gb" == "" ]] && used_gb="0"
[[ -z "$used_percent" || "$used_percent" == "" ]] && used_percent="0"
echo "$used_gb $total_gb $used_percent"
}
# Get uptime in days
get_uptime_days() {
local boot_output boot_time uptime_days
boot_output=$(sysctl -n kern.boottime 2> /dev/null || echo "")
boot_time=$(echo "$boot_output" | awk -F 'sec = |, usec' '{print $2}' 2> /dev/null || echo "")
if [[ -n "$boot_time" && "$boot_time" =~ ^[0-9]+$ ]]; then
local now
now=$(get_epoch_seconds)
local uptime_sec=$((now - boot_time))
uptime_days=$(LC_ALL=C awk "BEGIN {printf \"%.1f\", $uptime_sec / 86400}" 2> /dev/null || echo "0")
else
uptime_days="0"
fi
[[ -z "$uptime_days" || "$uptime_days" == "" ]] && uptime_days="0"
echo "$uptime_days"
}
# JSON escape helper
json_escape() {
# Escape backslash, double quote, tab, and newline
local escaped
escaped=$(echo -n "$1" | sed 's/\\/\\\\/g; s/"/\\"/g; s/ /\\t/g' | tr '\n' ' ')
echo -n "${escaped% }"
}
# Generate JSON output
generate_health_json() {
# System info
read -r mem_used mem_total <<< "$(get_memory_info)"
read -r disk_used disk_total disk_percent <<< "$(get_disk_info)"
local uptime=$(get_uptime_days)
# Ensure all values are valid numbers (fallback to 0)
mem_used=${mem_used:-0}
mem_total=${mem_total:-0}
disk_used=${disk_used:-0}
disk_total=${disk_total:-0}
disk_percent=${disk_percent:-0}
uptime=${uptime:-0}
# Start JSON
cat << EOF
{
"memory_used_gb": $mem_used,
"memory_total_gb": $mem_total,
"disk_used_gb": $disk_used,
"disk_total_gb": $disk_total,
"disk_used_percent": $disk_percent,
"uptime_days": $uptime,
"optimizations": [
EOF
# Collect all optimization items
local -a items=()
# Core optimizations (safe and valuable)
items+=('system_maintenance|DNS & Spotlight Check|Refresh DNS cache & verify Spotlight status|true')
items+=('cache_refresh|Finder Cache Refresh|Refresh QuickLook thumbnails & icon services cache|true')
items+=('saved_state_cleanup|App State Cleanup|Remove old saved application states (30+ days)|true')
items+=('fix_broken_configs|Broken Config Repair|Fix corrupted preferences files|true')
items+=('network_optimization|Network Cache Refresh|Optimize DNS cache & restart mDNSResponder|true')
# Advanced optimizations (high value, auto-run with safety checks)
items+=('sqlite_vacuum|Database Optimization|Compress SQLite databases for Mail, Safari & Messages (skips if apps are running)|true')
items+=('launch_services_rebuild|LaunchServices Repair|Repair "Open with" menu & file associations|true')
items+=('font_cache_rebuild|Font Cache Rebuild|Rebuild font database to fix rendering issues|true')
items+=('dock_refresh|Dock Refresh|Fix broken icons and visual glitches in the Dock|true')
# System performance optimizations (new)
items+=('memory_pressure_relief|Memory Optimization|Release inactive memory to improve system responsiveness|true')
items+=('network_stack_optimize|Network Stack Refresh|Flush routing table and ARP cache to resolve network issues|true')
items+=('disk_permissions_repair|Permission Repair|Fix user directory permission issues|true')
items+=('bluetooth_reset|Bluetooth Refresh|Restart Bluetooth module to fix connectivity (skips if in use)|true')
items+=('spotlight_index_optimize|Spotlight Optimization|Rebuild index if search is slow (smart detection)|true')
# Removed high-risk optimizations:
# - startup_items_cleanup: Risk of deleting legitimate app helpers
# - system_services_refresh: Risk of data loss when killing system services
# - dyld_cache_update: Low benefit, time-consuming, auto-managed by macOS
# Output items as JSON
local first=true
for item in "${items[@]}"; do
IFS='|' read -r action name desc safe <<< "$item"
# Escape strings
action=$(json_escape "$action")
name=$(json_escape "$name")
desc=$(json_escape "$desc")
[[ "$first" == "true" ]] && first=false || echo ","
cat << EOF
{
"category": "system",
"name": "$name",
"description": "$desc",
"action": "$action",
"safe": $safe
}
EOF
done
# Close JSON
cat << 'EOF'
]
}
EOF
}
# Main execution (for testing)
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
generate_health_json
fi

View File

@@ -1,235 +0,0 @@
#!/bin/bash
# User GUI Applications Cleanup Module (desktop apps, media, utilities).
set -euo pipefail
# Xcode and iOS tooling.
clean_xcode_tools() {
# Skip DerivedData/Archives while Xcode is running.
local xcode_running=false
if pgrep -x "Xcode" > /dev/null 2>&1; then
xcode_running=true
fi
safe_clean ~/Library/Developer/CoreSimulator/Caches/* "Simulator cache"
safe_clean ~/Library/Developer/CoreSimulator/Devices/*/data/tmp/* "Simulator temp files"
safe_clean ~/Library/Caches/com.apple.dt.Xcode/* "Xcode cache"
safe_clean ~/Library/Developer/Xcode/iOS\ Device\ Logs/* "iOS device logs"
safe_clean ~/Library/Developer/Xcode/watchOS\ Device\ Logs/* "watchOS device logs"
safe_clean ~/Library/Developer/Xcode/Products/* "Xcode build products"
if [[ "$xcode_running" == "false" ]]; then
safe_clean ~/Library/Developer/Xcode/DerivedData/* "Xcode derived data"
safe_clean ~/Library/Developer/Xcode/Archives/* "Xcode archives"
else
echo -e " ${YELLOW}${ICON_WARNING}${NC} Xcode is running, skipping DerivedData and Archives cleanup"
fi
}
# Code editors.
clean_code_editors() {
safe_clean ~/Library/Application\ Support/Code/logs/* "VS Code logs"
safe_clean ~/Library/Application\ Support/Code/Cache/* "VS Code cache"
safe_clean ~/Library/Application\ Support/Code/CachedExtensions/* "VS Code extension cache"
safe_clean ~/Library/Application\ Support/Code/CachedData/* "VS Code data cache"
safe_clean ~/Library/Caches/com.sublimetext.*/* "Sublime Text cache"
}
# Communication apps.
clean_communication_apps() {
safe_clean ~/Library/Application\ Support/discord/Cache/* "Discord cache"
safe_clean ~/Library/Application\ Support/legcord/Cache/* "Legcord cache"
safe_clean ~/Library/Application\ Support/Slack/Cache/* "Slack cache"
safe_clean ~/Library/Caches/us.zoom.xos/* "Zoom cache"
safe_clean ~/Library/Caches/com.tencent.xinWeChat/* "WeChat cache"
safe_clean ~/Library/Caches/ru.keepcoder.Telegram/* "Telegram cache"
safe_clean ~/Library/Caches/com.microsoft.teams2/* "Microsoft Teams cache"
safe_clean ~/Library/Caches/net.whatsapp.WhatsApp/* "WhatsApp cache"
safe_clean ~/Library/Caches/com.skype.skype/* "Skype cache"
safe_clean ~/Library/Caches/com.tencent.meeting/* "Tencent Meeting cache"
safe_clean ~/Library/Caches/com.tencent.WeWorkMac/* "WeCom cache"
safe_clean ~/Library/Caches/com.feishu.*/* "Feishu cache"
}
# DingTalk.
clean_dingtalk() {
safe_clean ~/Library/Caches/dd.work.exclusive4aliding/* "DingTalk iDingTalk cache"
safe_clean ~/Library/Caches/com.alibaba.AliLang.osx/* "AliLang security component"
safe_clean ~/Library/Application\ Support/iDingTalk/log/* "DingTalk logs"
safe_clean ~/Library/Application\ Support/iDingTalk/holmeslogs/* "DingTalk holmes logs"
}
# AI assistants.
clean_ai_apps() {
safe_clean ~/Library/Caches/com.openai.chat/* "ChatGPT cache"
safe_clean ~/Library/Caches/com.anthropic.claudefordesktop/* "Claude desktop cache"
safe_clean ~/Library/Logs/Claude/* "Claude logs"
}
# Design and creative tools.
clean_design_tools() {
safe_clean ~/Library/Caches/com.bohemiancoding.sketch3/* "Sketch cache"
safe_clean ~/Library/Application\ Support/com.bohemiancoding.sketch3/cache/* "Sketch app cache"
safe_clean ~/Library/Caches/Adobe/* "Adobe cache"
safe_clean ~/Library/Caches/com.adobe.*/* "Adobe app caches"
safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache"
# Raycast cache is protected (clipboard history, images).
}
# Video editing tools.
clean_video_tools() {
safe_clean ~/Library/Caches/net.telestream.screenflow10/* "ScreenFlow cache"
safe_clean ~/Library/Caches/com.apple.FinalCut/* "Final Cut Pro cache"
safe_clean ~/Library/Caches/com.blackmagic-design.DaVinciResolve/* "DaVinci Resolve cache"
safe_clean ~/Library/Caches/com.adobe.PremierePro.*/* "Premiere Pro cache"
}
# 3D and CAD tools.
clean_3d_tools() {
safe_clean ~/Library/Caches/org.blenderfoundation.blender/* "Blender cache"
safe_clean ~/Library/Caches/com.maxon.cinema4d/* "Cinema 4D cache"
safe_clean ~/Library/Caches/com.autodesk.*/* "Autodesk cache"
safe_clean ~/Library/Caches/com.sketchup.*/* "SketchUp cache"
}
# Productivity apps.
clean_productivity_apps() {
safe_clean ~/Library/Caches/com.tw93.MiaoYan/* "MiaoYan cache"
safe_clean ~/Library/Caches/com.klee.desktop/* "Klee cache"
safe_clean ~/Library/Caches/klee_desktop/* "Klee desktop cache"
safe_clean ~/Library/Caches/com.orabrowser.app/* "Ora browser cache"
safe_clean ~/Library/Caches/com.filo.client/* "Filo cache"
safe_clean ~/Library/Caches/com.flomoapp.mac/* "Flomo cache"
safe_clean ~/Library/Application\ Support/Quark/Cache/videoCache/* "Quark video cache"
}
# Music/media players (protect Spotify offline music).
clean_media_players() {
local spotify_cache="$HOME/Library/Caches/com.spotify.client"
local spotify_data="$HOME/Library/Application Support/Spotify"
local has_offline_music=false
# Heuristics: offline DB or large cache.
if [[ -f "$spotify_data/PersistentCache/Storage/offline.bnk" ]] ||
[[ -d "$spotify_data/PersistentCache/Storage" && -n "$(find "$spotify_data/PersistentCache/Storage" -type f -name "*.file" 2> /dev/null | head -1)" ]]; then
has_offline_music=true
elif [[ -d "$spotify_cache" ]]; then
local cache_size_kb
cache_size_kb=$(get_path_size_kb "$spotify_cache")
if [[ $cache_size_kb -ge 512000 ]]; then
has_offline_music=true
fi
fi
if [[ "$has_offline_music" == "true" ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Spotify cache protected · offline music detected"
note_activity
else
safe_clean ~/Library/Caches/com.spotify.client/* "Spotify cache"
fi
safe_clean ~/Library/Caches/com.apple.Music "Apple Music cache"
safe_clean ~/Library/Caches/com.apple.podcasts "Apple Podcasts cache"
safe_clean ~/Library/Caches/com.apple.TV/* "Apple TV cache"
safe_clean ~/Library/Caches/tv.plex.player.desktop "Plex cache"
safe_clean ~/Library/Caches/com.netease.163music "NetEase Music cache"
safe_clean ~/Library/Caches/com.tencent.QQMusic/* "QQ Music cache"
safe_clean ~/Library/Caches/com.kugou.mac/* "Kugou Music cache"
safe_clean ~/Library/Caches/com.kuwo.mac/* "Kuwo Music cache"
}
# Video players.
clean_video_players() {
safe_clean ~/Library/Caches/com.colliderli.iina "IINA cache"
safe_clean ~/Library/Caches/org.videolan.vlc "VLC cache"
safe_clean ~/Library/Caches/io.mpv "MPV cache"
safe_clean ~/Library/Caches/com.iqiyi.player "iQIYI cache"
safe_clean ~/Library/Caches/com.tencent.tenvideo "Tencent Video cache"
safe_clean ~/Library/Caches/tv.danmaku.bili/* "Bilibili cache"
safe_clean ~/Library/Caches/com.douyu.*/* "Douyu cache"
safe_clean ~/Library/Caches/com.huya.*/* "Huya cache"
}
# Download managers.
clean_download_managers() {
safe_clean ~/Library/Caches/net.xmac.aria2gui "Aria2 cache"
safe_clean ~/Library/Caches/org.m0k.transmission "Transmission cache"
safe_clean ~/Library/Caches/com.qbittorrent.qBittorrent "qBittorrent cache"
safe_clean ~/Library/Caches/com.downie.Downie-* "Downie cache"
safe_clean ~/Library/Caches/com.folx.*/* "Folx cache"
safe_clean ~/Library/Caches/com.charlessoft.pacifist/* "Pacifist cache"
}
# Gaming platforms.
clean_gaming_platforms() {
safe_clean ~/Library/Caches/com.valvesoftware.steam/* "Steam cache"
safe_clean ~/Library/Application\ Support/Steam/htmlcache/* "Steam web cache"
safe_clean ~/Library/Caches/com.epicgames.EpicGamesLauncher/* "Epic Games cache"
safe_clean ~/Library/Caches/com.blizzard.Battle.net/* "Battle.net cache"
safe_clean ~/Library/Application\ Support/Battle.net/Cache/* "Battle.net app cache"
safe_clean ~/Library/Caches/com.ea.*/* "EA Origin cache"
safe_clean ~/Library/Caches/com.gog.galaxy/* "GOG Galaxy cache"
safe_clean ~/Library/Caches/com.riotgames.*/* "Riot Games cache"
}
# Translation/dictionary apps.
clean_translation_apps() {
safe_clean ~/Library/Caches/com.youdao.YoudaoDict "Youdao Dictionary cache"
safe_clean ~/Library/Caches/com.eudic.* "Eudict cache"
safe_clean ~/Library/Caches/com.bob-build.Bob "Bob Translation cache"
}
# Screenshot/recording tools.
clean_screenshot_tools() {
safe_clean ~/Library/Caches/com.cleanshot.* "CleanShot cache"
safe_clean ~/Library/Caches/com.reincubate.camo "Camo cache"
safe_clean ~/Library/Caches/com.xnipapp.xnip "Xnip cache"
}
# Email clients.
clean_email_clients() {
safe_clean ~/Library/Caches/com.readdle.smartemail-Mac "Spark cache"
safe_clean ~/Library/Caches/com.airmail.* "Airmail cache"
}
# Task management apps.
clean_task_apps() {
safe_clean ~/Library/Caches/com.todoist.mac.Todoist "Todoist cache"
safe_clean ~/Library/Caches/com.any.do.* "Any.do cache"
}
# Shell/terminal utilities.
clean_shell_utils() {
safe_clean ~/.zcompdump* "Zsh completion cache"
safe_clean ~/.lesshst "less history"
safe_clean ~/.viminfo.tmp "Vim temporary files"
safe_clean ~/.wget-hsts "wget HSTS cache"
}
# Input methods and system utilities.
clean_system_utils() {
safe_clean ~/Library/Caches/com.runjuu.Input-Source-Pro/* "Input Source Pro cache"
safe_clean ~/Library/Caches/macos-wakatime.WakaTime/* "WakaTime cache"
}
# Note-taking apps.
clean_note_apps() {
safe_clean ~/Library/Caches/notion.id/* "Notion cache"
safe_clean ~/Library/Caches/md.obsidian/* "Obsidian cache"
safe_clean ~/Library/Caches/com.logseq.*/* "Logseq cache"
safe_clean ~/Library/Caches/com.bear-writer.*/* "Bear cache"
safe_clean ~/Library/Caches/com.evernote.*/* "Evernote cache"
safe_clean ~/Library/Caches/com.yinxiang.*/* "Yinxiang Note cache"
}
# Launchers and automation tools.
clean_launcher_apps() {
safe_clean ~/Library/Caches/com.runningwithcrayons.Alfred/* "Alfred cache"
safe_clean ~/Library/Caches/cx.c3.theunarchiver/* "The Unarchiver cache"
}
# Remote desktop tools.
clean_remote_desktop() {
safe_clean ~/Library/Caches/com.teamviewer.*/* "TeamViewer cache"
safe_clean ~/Library/Caches/com.anydesk.*/* "AnyDesk cache"
safe_clean ~/Library/Caches/com.todesk.*/* "ToDesk cache"
safe_clean ~/Library/Caches/com.sunlogin.*/* "Sunlogin cache"
}
# Main entry for GUI app cleanup.
clean_user_gui_applications() {
stop_section_spinner
clean_xcode_tools
clean_code_editors
clean_communication_apps
clean_dingtalk
clean_ai_apps
clean_design_tools
clean_video_tools
clean_3d_tools
clean_productivity_apps
clean_media_players
clean_video_players
clean_download_managers
clean_gaming_platforms
clean_translation_apps
clean_screenshot_tools
clean_email_clients
clean_task_apps
clean_shell_utils
clean_system_utils
clean_note_apps
clean_launcher_apps
clean_remote_desktop
}

View File

@@ -1,313 +0,0 @@
#!/bin/bash
# Application Data Cleanup Module
set -euo pipefail
# Args: $1=target_dir, $2=label
clean_ds_store_tree() {
local target="$1"
local label="$2"
[[ -d "$target" ]] || return 0
local file_count=0
local total_bytes=0
local spinner_active="false"
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" "
start_inline_spinner "Cleaning Finder metadata..."
spinner_active="true"
fi
local -a exclude_paths=(
-path "*/Library/Application Support/MobileSync" -prune -o
-path "*/Library/Developer" -prune -o
-path "*/.Trash" -prune -o
-path "*/node_modules" -prune -o
-path "*/.git" -prune -o
-path "*/Library/Caches" -prune -o
)
local -a find_cmd=("command" "find" "$target")
if [[ "$target" == "$HOME" ]]; then
find_cmd+=("-maxdepth" "5")
fi
find_cmd+=("${exclude_paths[@]}" "-type" "f" "-name" ".DS_Store" "-print0")
while IFS= read -r -d '' ds_file; do
local size
size=$(get_file_size "$ds_file")
total_bytes=$((total_bytes + size))
((file_count++))
if [[ "$DRY_RUN" != "true" ]]; then
rm -f "$ds_file" 2> /dev/null || true
fi
if [[ $file_count -ge $MOLE_MAX_DS_STORE_FILES ]]; then
break
fi
done < <("${find_cmd[@]}" 2> /dev/null || true)
if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner
fi
if [[ $file_count -gt 0 ]]; then
local size_human
size_human=$(bytes_to_human "$total_bytes")
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $label ${YELLOW}($file_count files, $size_human dry)${NC}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label ${GREEN}($file_count files, $size_human)${NC}"
fi
local size_kb=$(((total_bytes + 1023) / 1024))
((files_cleaned += file_count))
((total_size_cleaned += size_kb))
((total_items++))
note_activity
fi
}
# Orphaned app data (60+ days inactive). Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
# Usage: scan_installed_apps "output_file"
scan_installed_apps() {
local installed_bundles="$1"
# Cache installed app scan briefly to speed repeated runs.
local cache_file="$HOME/.cache/mole/installed_apps_cache"
local cache_age_seconds=300 # 5 minutes
if [[ -f "$cache_file" ]]; then
local cache_mtime=$(get_file_mtime "$cache_file")
local current_time
current_time=$(get_epoch_seconds)
local age=$((current_time - cache_mtime))
if [[ $age -lt $cache_age_seconds ]]; then
debug_log "Using cached app list (age: ${age}s)"
if [[ -r "$cache_file" ]] && [[ -s "$cache_file" ]]; then
if cat "$cache_file" > "$installed_bundles" 2> /dev/null; then
return 0
else
debug_log "Warning: Failed to read cache, rebuilding"
fi
else
debug_log "Warning: Cache file empty or unreadable, rebuilding"
fi
fi
fi
debug_log "Scanning installed applications (cache expired or missing)"
local -a app_dirs=(
"/Applications"
"/System/Applications"
"$HOME/Applications"
# Homebrew Cask locations
"/opt/homebrew/Caskroom"
"/usr/local/Caskroom"
# Setapp applications
"$HOME/Library/Application Support/Setapp/Applications"
)
# Temp dir avoids write contention across parallel scans.
local scan_tmp_dir=$(create_temp_dir)
local pids=()
local dir_idx=0
for app_dir in "${app_dirs[@]}"; do
[[ -d "$app_dir" ]] || continue
(
local -a app_paths=()
while IFS= read -r app_path; do
[[ -n "$app_path" ]] && app_paths+=("$app_path")
done < <(find "$app_dir" -name '*.app' -maxdepth 3 -type d 2> /dev/null)
local count=0
for app_path in "${app_paths[@]:-}"; do
local plist_path="$app_path/Contents/Info.plist"
[[ ! -f "$plist_path" ]] && continue
local bundle_id=$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" "$plist_path" 2> /dev/null || echo "")
if [[ -n "$bundle_id" ]]; then
echo "$bundle_id"
((count++))
fi
done
) > "$scan_tmp_dir/apps_${dir_idx}.txt" &
pids+=($!)
((dir_idx++))
done
# Collect running apps and LaunchAgents to avoid false orphan cleanup.
(
local running_apps=$(run_with_timeout 5 osascript -e 'tell application "System Events" to get bundle identifier of every application process' 2> /dev/null || echo "")
echo "$running_apps" | tr ',' '\n' | sed -e 's/^ *//;s/ *$//' -e '/^$/d' > "$scan_tmp_dir/running.txt"
# Fallback: lsappinfo is more reliable than osascript
if command -v lsappinfo > /dev/null 2>&1; then
run_with_timeout 3 lsappinfo list 2> /dev/null | grep -o '"CFBundleIdentifier"="[^"]*"' | cut -d'"' -f4 >> "$scan_tmp_dir/running.txt" 2> /dev/null || true
fi
) &
pids+=($!)
(
run_with_timeout 5 find ~/Library/LaunchAgents /Library/LaunchAgents \
-name "*.plist" -type f 2> /dev/null |
xargs -I {} basename {} .plist > "$scan_tmp_dir/agents.txt" 2> /dev/null || true
) &
pids+=($!)
debug_log "Waiting for ${#pids[@]} background processes: ${pids[*]}"
for pid in "${pids[@]}"; do
wait "$pid" 2> /dev/null || true
done
debug_log "All background processes completed"
cat "$scan_tmp_dir"/*.txt >> "$installed_bundles" 2> /dev/null || true
safe_remove "$scan_tmp_dir" true
sort -u "$installed_bundles" -o "$installed_bundles"
ensure_user_dir "$(dirname "$cache_file")"
cp "$installed_bundles" "$cache_file" 2> /dev/null || true
local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ')
debug_log "Scanned $app_count unique applications"
}
# Sensitive data patterns that should never be treated as orphaned
# These patterns protect security-critical application data
readonly ORPHAN_NEVER_DELETE_PATTERNS=(
"*1password*" "*1Password*"
"*keychain*" "*Keychain*"
"*bitwarden*" "*Bitwarden*"
"*lastpass*" "*LastPass*"
"*keepass*" "*KeePass*"
"*dashlane*" "*Dashlane*"
"*enpass*" "*Enpass*"
"*ssh*" "*gpg*" "*gnupg*"
"com.apple.keychain*"
)
# Cache file for mdfind results (Bash 3.2 compatible, no associative arrays)
ORPHAN_MDFIND_CACHE_FILE=""
# Usage: is_bundle_orphaned "bundle_id" "directory_path" "installed_bundles_file"
is_bundle_orphaned() {
local bundle_id="$1"
local directory_path="$2"
local installed_bundles="$3"
# 1. Fast path: check protection list (in-memory, instant)
if should_protect_data "$bundle_id"; then
return 1
fi
# 2. Fast path: check sensitive data patterns (in-memory, instant)
local bundle_lower
bundle_lower=$(echo "$bundle_id" | LC_ALL=C tr '[:upper:]' '[:lower:]')
for pattern in "${ORPHAN_NEVER_DELETE_PATTERNS[@]}"; do
# shellcheck disable=SC2053
if [[ "$bundle_lower" == $pattern ]]; then
return 1
fi
done
# 3. Fast path: check installed bundles file (file read, fast)
if grep -Fxq "$bundle_id" "$installed_bundles" 2> /dev/null; then
return 1
fi
# 4. Fast path: hardcoded system components
case "$bundle_id" in
loginwindow | dock | systempreferences | systemsettings | settings | controlcenter | finder | safari)
return 1
;;
esac
# 5. Fast path: 60-day modification check (stat call, fast)
if [[ -e "$directory_path" ]]; then
local last_modified_epoch=$(get_file_mtime "$directory_path")
local current_epoch
current_epoch=$(get_epoch_seconds)
local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400))
if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-60} ]]; then
return 1
fi
fi
# 6. Slow path: mdfind fallback with file-based caching (Bash 3.2 compatible)
# This catches apps installed in non-standard locations
if [[ -n "$bundle_id" ]] && [[ "$bundle_id" =~ ^[a-zA-Z0-9._-]+$ ]] && [[ ${#bundle_id} -ge 5 ]]; then
# Initialize cache file if needed
if [[ -z "$ORPHAN_MDFIND_CACHE_FILE" ]]; then
ORPHAN_MDFIND_CACHE_FILE=$(mktemp "${TMPDIR:-/tmp}/mole_mdfind_cache.XXXXXX")
register_temp_file "$ORPHAN_MDFIND_CACHE_FILE"
fi
# Check cache first (grep is fast for small files)
if grep -Fxq "FOUND:$bundle_id" "$ORPHAN_MDFIND_CACHE_FILE" 2> /dev/null; then
return 1
fi
if grep -Fxq "NOTFOUND:$bundle_id" "$ORPHAN_MDFIND_CACHE_FILE" 2> /dev/null; then
# Already checked, not found - continue to return 0
:
else
# Query mdfind with strict timeout (2 seconds max)
local app_exists
app_exists=$(run_with_timeout 2 mdfind "kMDItemCFBundleIdentifier == '$bundle_id'" 2> /dev/null | head -1 || echo "")
if [[ -n "$app_exists" ]]; then
echo "FOUND:$bundle_id" >> "$ORPHAN_MDFIND_CACHE_FILE"
return 1
else
echo "NOTFOUND:$bundle_id" >> "$ORPHAN_MDFIND_CACHE_FILE"
fi
fi
fi
# All checks passed - this is an orphan
return 0
}
# Orphaned app data sweep.
clean_orphaned_app_data() {
if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then
stop_section_spinner
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Library folders"
return 0
fi
start_section_spinner "Scanning installed apps..."
local installed_bundles=$(create_temp_file)
scan_installed_apps "$installed_bundles"
stop_section_spinner
local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ')
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Found $app_count active/installed apps"
local orphaned_count=0
local total_orphaned_kb=0
start_section_spinner "Scanning orphaned app resources..."
# CRITICAL: NEVER add LaunchAgents or LaunchDaemons (breaks login items/startup apps).
local -a resource_types=(
"$HOME/Library/Caches|Caches|com.*:org.*:net.*:io.*"
"$HOME/Library/Logs|Logs|com.*:org.*:net.*:io.*"
"$HOME/Library/Saved Application State|States|*.savedState"
"$HOME/Library/WebKit|WebKit|com.*:org.*:net.*:io.*"
"$HOME/Library/HTTPStorages|HTTP|com.*:org.*:net.*:io.*"
"$HOME/Library/Cookies|Cookies|*.binarycookies"
)
orphaned_count=0
for resource_type in "${resource_types[@]}"; do
IFS='|' read -r base_path label patterns <<< "$resource_type"
if [[ ! -d "$base_path" ]]; then
continue
fi
if ! ls "$base_path" > /dev/null 2>&1; then
continue
fi
local -a file_patterns=()
IFS=':' read -ra pattern_arr <<< "$patterns"
for pat in "${pattern_arr[@]}"; do
file_patterns+=("$base_path/$pat")
done
for item_path in "${file_patterns[@]}"; do
local iteration_count=0
for match in $item_path; do
[[ -e "$match" ]] || continue
((iteration_count++))
if [[ $iteration_count -gt $MOLE_MAX_ORPHAN_ITERATIONS ]]; then
break
fi
local bundle_id=$(basename "$match")
bundle_id="${bundle_id%.savedState}"
bundle_id="${bundle_id%.binarycookies}"
if is_bundle_orphaned "$bundle_id" "$match" "$installed_bundles"; then
local size_kb
size_kb=$(get_path_size_kb "$match")
if [[ -z "$size_kb" || "$size_kb" == "0" ]]; then
continue
fi
safe_clean "$match" "Orphaned $label: $bundle_id"
((orphaned_count++))
((total_orphaned_kb += size_kb))
fi
done
done
done
stop_section_spinner
if [[ $orphaned_count -gt 0 ]]; then
local orphaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}')
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count items (~${orphaned_mb}MB)"
note_activity
fi
rm -f "$installed_bundles"
}

View File

@@ -1,117 +0,0 @@
#!/bin/bash
# Clean Homebrew caches and remove orphaned dependencies
# Env: DRY_RUN
# Skips if run within 7 days, runs cleanup/autoremove in parallel with 120s timeout
clean_homebrew() {
command -v brew > /dev/null 2>&1 || return 0
if [[ "${DRY_RUN:-false}" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Homebrew · would cleanup and autoremove"
return 0
fi
# Skip if cleaned recently to avoid repeated heavy operations.
local brew_cache_file="${HOME}/.cache/mole/brew_last_cleanup"
local cache_valid_days=7
local should_skip=false
if [[ -f "$brew_cache_file" ]]; then
local last_cleanup
last_cleanup=$(cat "$brew_cache_file" 2> /dev/null || echo "0")
local current_time
current_time=$(get_epoch_seconds)
local time_diff=$((current_time - last_cleanup))
local days_diff=$((time_diff / 86400))
if [[ $days_diff -lt $cache_valid_days ]]; then
should_skip=true
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew · cleaned ${days_diff}d ago, skipped"
fi
fi
[[ "$should_skip" == "true" ]] && return 0
# Skip cleanup if cache is small; still run autoremove.
local skip_cleanup=false
local brew_cache_size=0
if [[ -d ~/Library/Caches/Homebrew ]]; then
brew_cache_size=$(run_with_timeout 3 du -sk ~/Library/Caches/Homebrew 2> /dev/null | awk '{print $1}')
local du_exit=$?
if [[ $du_exit -eq 0 && -n "$brew_cache_size" && "$brew_cache_size" -lt 51200 ]]; then
skip_cleanup=true
fi
fi
# Spinner reflects whether cleanup is skipped.
if [[ -t 1 ]]; then
if [[ "$skip_cleanup" == "true" ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew autoremove (cleanup skipped)..."
else
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew cleanup and autoremove..."
fi
fi
# Run cleanup/autoremove in parallel with timeout guard per command.
local timeout_seconds=120
local brew_tmp_file autoremove_tmp_file
local brew_pid autoremove_pid
local brew_exit=0
local autoremove_exit=0
if [[ "$skip_cleanup" == "false" ]]; then
brew_tmp_file=$(create_temp_file)
run_with_timeout "$timeout_seconds" brew cleanup > "$brew_tmp_file" 2>&1 &
brew_pid=$!
fi
autoremove_tmp_file=$(create_temp_file)
run_with_timeout "$timeout_seconds" brew autoremove > "$autoremove_tmp_file" 2>&1 &
autoremove_pid=$!
if [[ -n "$brew_pid" ]]; then
wait "$brew_pid" 2> /dev/null || brew_exit=$?
fi
wait "$autoremove_pid" 2> /dev/null || autoremove_exit=$?
local brew_success=false
if [[ "$skip_cleanup" == "false" && $brew_exit -eq 0 ]]; then
brew_success=true
fi
local autoremove_success=false
if [[ $autoremove_exit -eq 0 ]]; then
autoremove_success=true
fi
if [[ -t 1 ]]; then stop_inline_spinner; fi
# Process cleanup output and extract metrics
# Summarize cleanup results.
if [[ "$skip_cleanup" == "true" ]]; then
# Cleanup was skipped due to small cache size
local size_mb=$((brew_cache_size / 1024))
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup · cache ${size_mb}MB, skipped"
elif [[ "$brew_success" == "true" && -f "$brew_tmp_file" ]]; then
local brew_output
brew_output=$(cat "$brew_tmp_file" 2> /dev/null || echo "")
local removed_count freed_space
removed_count=$(printf '%s\n' "$brew_output" | grep -c "Removing:" 2> /dev/null || true)
freed_space=$(printf '%s\n' "$brew_output" | grep -o "[0-9.]*[KMGT]B freed" 2> /dev/null | tail -1 || true)
if [[ $removed_count -gt 0 ]] || [[ -n "$freed_space" ]]; then
if [[ -n "$freed_space" ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup ${GREEN}($freed_space)${NC}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup (${removed_count} items)"
fi
fi
elif [[ $brew_exit -eq 124 ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Homebrew cleanup timed out · run ${GRAY}brew cleanup${NC} manually"
fi
# Process autoremove output - only show if packages were removed
# Only surface autoremove output when packages were removed.
if [[ "$autoremove_success" == "true" && -f "$autoremove_tmp_file" ]]; then
local autoremove_output
autoremove_output=$(cat "$autoremove_tmp_file" 2> /dev/null || echo "")
local removed_packages
removed_packages=$(printf '%s\n' "$autoremove_output" | grep -c "^Uninstalling" 2> /dev/null || true)
if [[ $removed_packages -gt 0 ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed orphaned dependencies (${removed_packages} packages)"
fi
elif [[ $autoremove_exit -eq 124 ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Autoremove timed out · run ${GRAY}brew autoremove${NC} manually"
fi
# Update cache timestamp on successful completion or when cleanup was intelligently skipped
# This prevents repeated cache size checks within the 7-day window
# Update cache timestamp when any work succeeded or was intentionally skipped.
if [[ "$skip_cleanup" == "true" ]] || [[ "$brew_success" == "true" ]] || [[ "$autoremove_success" == "true" ]]; then
ensure_user_file "$brew_cache_file"
get_epoch_seconds > "$brew_cache_file"
fi
}

View File

@@ -1,217 +0,0 @@
#!/bin/bash
# Cache Cleanup Module
set -euo pipefail
# Preflight TCC prompts once to avoid mid-run interruptions.
check_tcc_permissions() {
[[ -t 1 ]] || return 0
local permission_flag="$HOME/.cache/mole/permissions_granted"
[[ -f "$permission_flag" ]] && return 0
local -a tcc_dirs=(
"$HOME/Library/Caches"
"$HOME/Library/Logs"
"$HOME/Library/Application Support"
"$HOME/Library/Containers"
"$HOME/.cache"
)
# Quick permission probe (avoid deep scans).
local needs_permission_check=false
if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then
needs_permission_check=true
fi
if [[ "$needs_permission_check" == "true" ]]; then
echo ""
echo -e "${BLUE}First-time setup${NC}"
echo -e "${GRAY}macOS will request permissions to access Library folders.${NC}"
echo -e "${GRAY}You may see ${GREEN}${#tcc_dirs[@]} permission dialogs${NC}${GRAY} - please approve them all.${NC}"
echo ""
echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to continue: "
read -r
MOLE_SPINNER_PREFIX="" start_inline_spinner "Requesting permissions..."
# Touch each directory to trigger prompts without deep scanning.
for dir in "${tcc_dirs[@]}"; do
[[ -d "$dir" ]] && command find "$dir" -maxdepth 1 -type d > /dev/null 2>&1
done
stop_inline_spinner
echo ""
fi
# Mark as granted to avoid repeat prompts.
ensure_user_file "$permission_flag"
return 0
}
# Args: $1=browser_name, $2=cache_path
# Clean Service Worker cache while protecting critical web editors.
clean_service_worker_cache() {
local browser_name="$1"
local cache_path="$2"
[[ ! -d "$cache_path" ]] && return 0
local cleaned_size=0
local protected_count=0
while IFS= read -r cache_dir; do
[[ ! -d "$cache_dir" ]] && continue
# Extract a best-effort domain name from cache folder.
local domain=$(basename "$cache_dir" | grep -oE '[a-zA-Z0-9][-a-zA-Z0-9]*\.[a-zA-Z]{2,}' | head -1 || echo "")
local size=$(run_with_timeout 5 get_path_size_kb "$cache_dir")
local is_protected=false
for protected_domain in "${PROTECTED_SW_DOMAINS[@]}"; do
if [[ "$domain" == *"$protected_domain"* ]]; then
is_protected=true
protected_count=$((protected_count + 1))
break
fi
done
if [[ "$is_protected" == "false" ]]; then
if [[ "$DRY_RUN" != "true" ]]; then
safe_remove "$cache_dir" true || true
fi
cleaned_size=$((cleaned_size + size))
fi
done < <(run_with_timeout 10 sh -c "find '$cache_path' -type d -depth 2 2> /dev/null || true")
if [[ $cleaned_size -gt 0 ]]; then
local spinner_was_running=false
if [[ -t 1 && -n "${INLINE_SPINNER_PID:-}" ]]; then
stop_inline_spinner
spinner_was_running=true
fi
local cleaned_mb=$((cleaned_size / 1024))
if [[ "$DRY_RUN" != "true" ]]; then
if [[ $protected_count -gt 0 ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $browser_name Service Worker (${cleaned_mb}MB, ${protected_count} protected)"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $browser_name Service Worker (${cleaned_mb}MB)"
fi
else
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $browser_name Service Worker (would clean ${cleaned_mb}MB, ${protected_count} protected)"
fi
note_activity
if [[ "$spinner_was_running" == "true" ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning browser Service Worker caches..."
fi
fi
}
# Next.js/Python project caches with tight scan bounds and timeouts.
clean_project_caches() {
stop_inline_spinner 2> /dev/null || true
# Fast pre-check before scanning the whole home dir.
local has_dev_projects=false
local -a common_dev_dirs=(
"$HOME/Code"
"$HOME/Projects"
"$HOME/workspace"
"$HOME/github"
"$HOME/dev"
"$HOME/work"
"$HOME/src"
"$HOME/repos"
"$HOME/Development"
"$HOME/www"
"$HOME/golang"
"$HOME/go"
"$HOME/rust"
"$HOME/python"
"$HOME/ruby"
"$HOME/java"
"$HOME/dotnet"
"$HOME/node"
)
for dir in "${common_dev_dirs[@]}"; do
if [[ -d "$dir" ]]; then
has_dev_projects=true
break
fi
done
# Fallback: look for project markers near $HOME.
if [[ "$has_dev_projects" == "false" ]]; then
local -a project_markers=(
"node_modules"
".git"
"target"
"go.mod"
"Cargo.toml"
"package.json"
"pom.xml"
"build.gradle"
)
local spinner_active=false
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" "
start_inline_spinner "Detecting dev projects..."
spinner_active=true
fi
for marker in "${project_markers[@]}"; do
if run_with_timeout 3 sh -c "find '$HOME' -maxdepth 2 -name '$marker' -not -path '*/Library/*' -not -path '*/.Trash/*' 2>/dev/null | head -1" | grep -q .; then
has_dev_projects=true
break
fi
done
if [[ "$spinner_active" == "true" ]]; then
stop_inline_spinner 2> /dev/null || true
fi
[[ "$has_dev_projects" == "false" ]] && return 0
fi
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" "
start_inline_spinner "Searching project caches..."
fi
local nextjs_tmp_file
nextjs_tmp_file=$(create_temp_file)
local pycache_tmp_file
pycache_tmp_file=$(create_temp_file)
local find_timeout=10
# Parallel scans (Next.js and __pycache__).
(
command find "$HOME" -P -mount -type d -name ".next" -maxdepth 3 \
-not -path "*/Library/*" \
-not -path "*/.Trash/*" \
-not -path "*/node_modules/*" \
-not -path "*/.*" \
2> /dev/null || true
) > "$nextjs_tmp_file" 2>&1 &
local next_pid=$!
(
command find "$HOME" -P -mount -type d -name "__pycache__" -maxdepth 3 \
-not -path "*/Library/*" \
-not -path "*/.Trash/*" \
-not -path "*/node_modules/*" \
-not -path "*/.*" \
2> /dev/null || true
) > "$pycache_tmp_file" 2>&1 &
local py_pid=$!
local elapsed=0
local check_interval=0.2 # Check every 200ms instead of 1s for smoother experience
while [[ $(echo "$elapsed < $find_timeout" | awk '{print ($1 < $2)}') -eq 1 ]]; do
if ! kill -0 $next_pid 2> /dev/null && ! kill -0 $py_pid 2> /dev/null; then
break
fi
sleep $check_interval
elapsed=$(echo "$elapsed + $check_interval" | awk '{print $1 + $2}')
done
# Kill stuck scans after timeout.
for pid in $next_pid $py_pid; do
if kill -0 "$pid" 2> /dev/null; then
kill -TERM "$pid" 2> /dev/null || true
local grace_period=0
while [[ $grace_period -lt 20 ]]; do
if ! kill -0 "$pid" 2> /dev/null; then
break
fi
sleep 0.1
((grace_period++))
done
if kill -0 "$pid" 2> /dev/null; then
kill -KILL "$pid" 2> /dev/null || true
fi
wait "$pid" 2> /dev/null || true
else
wait "$pid" 2> /dev/null || true
fi
done
if [[ -t 1 ]]; then
stop_inline_spinner
fi
while IFS= read -r next_dir; do
[[ -d "$next_dir/cache" ]] && safe_clean "$next_dir/cache"/* "Next.js build cache" || true
done < "$nextjs_tmp_file"
while IFS= read -r pycache; do
[[ -d "$pycache" ]] && safe_clean "$pycache"/* "Python bytecode cache" || true
done < "$pycache_tmp_file"
}

View File

@@ -1,296 +0,0 @@
#!/bin/bash
# Developer Tools Cleanup Module
set -euo pipefail
# Tool cache helper (respects DRY_RUN).
clean_tool_cache() {
local description="$1"
shift
if [[ "$DRY_RUN" != "true" ]]; then
if "$@" > /dev/null 2>&1; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $description"
fi
else
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $description · would clean"
fi
return 0
}
# npm/pnpm/yarn/bun caches.
clean_dev_npm() {
if command -v npm > /dev/null 2>&1; then
clean_tool_cache "npm cache" npm cache clean --force
note_activity
fi
# Clean pnpm store cache
local pnpm_default_store=~/Library/pnpm/store
# Check if pnpm is actually usable (not just Corepack shim)
if command -v pnpm > /dev/null 2>&1 && COREPACK_ENABLE_DOWNLOAD_PROMPT=0 pnpm --version > /dev/null 2>&1; then
COREPACK_ENABLE_DOWNLOAD_PROMPT=0 clean_tool_cache "pnpm cache" pnpm store prune
local pnpm_store_path
start_section_spinner "Checking store path..."
pnpm_store_path=$(COREPACK_ENABLE_DOWNLOAD_PROMPT=0 run_with_timeout 2 pnpm store path 2> /dev/null) || pnpm_store_path=""
stop_section_spinner
if [[ -n "$pnpm_store_path" && "$pnpm_store_path" != "$pnpm_default_store" ]]; then
safe_clean "$pnpm_default_store"/* "Orphaned pnpm store"
fi
else
# pnpm not installed or not usable, just clean the default store directory
safe_clean "$pnpm_default_store"/* "pnpm store"
fi
note_activity
safe_clean ~/.tnpm/_cacache/* "tnpm cache directory"
safe_clean ~/.tnpm/_logs/* "tnpm logs"
safe_clean ~/.yarn/cache/* "Yarn cache"
safe_clean ~/.bun/install/cache/* "Bun cache"
}
# Python/pip ecosystem caches.
clean_dev_python() {
if command -v pip3 > /dev/null 2>&1; then
clean_tool_cache "pip cache" bash -c 'pip3 cache purge >/dev/null 2>&1 || true'
note_activity
fi
safe_clean ~/.pyenv/cache/* "pyenv cache"
safe_clean ~/.cache/poetry/* "Poetry cache"
safe_clean ~/.cache/uv/* "uv cache"
safe_clean ~/.cache/ruff/* "Ruff cache"
safe_clean ~/.cache/mypy/* "MyPy cache"
safe_clean ~/.pytest_cache/* "Pytest cache"
safe_clean ~/.jupyter/runtime/* "Jupyter runtime cache"
safe_clean ~/.cache/huggingface/* "Hugging Face cache"
safe_clean ~/.cache/torch/* "PyTorch cache"
safe_clean ~/.cache/tensorflow/* "TensorFlow cache"
safe_clean ~/.conda/pkgs/* "Conda packages cache"
safe_clean ~/anaconda3/pkgs/* "Anaconda packages cache"
safe_clean ~/.cache/wandb/* "Weights & Biases cache"
}
# Go build/module caches.
clean_dev_go() {
if command -v go > /dev/null 2>&1; then
clean_tool_cache "Go cache" bash -c 'go clean -modcache >/dev/null 2>&1 || true; go clean -cache >/dev/null 2>&1 || true'
note_activity
fi
}
# Rust/cargo caches.
clean_dev_rust() {
safe_clean ~/.cargo/registry/cache/* "Rust cargo cache"
safe_clean ~/.cargo/git/* "Cargo git cache"
safe_clean ~/.rustup/downloads/* "Rust downloads cache"
}
# Docker caches (guarded by daemon check).
clean_dev_docker() {
if command -v docker > /dev/null 2>&1; then
if [[ "$DRY_RUN" != "true" ]]; then
start_section_spinner "Checking Docker daemon..."
local docker_running=false
if run_with_timeout 3 docker info > /dev/null 2>&1; then
docker_running=true
fi
stop_section_spinner
if [[ "$docker_running" == "true" ]]; then
clean_tool_cache "Docker build cache" docker builder prune -af
else
debug_log "Docker daemon not running, skipping Docker cache cleanup"
fi
else
note_activity
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker build cache · would clean"
fi
fi
safe_clean ~/.docker/buildx/cache/* "Docker BuildX cache"
}
# Nix garbage collection.
clean_dev_nix() {
if command -v nix-collect-garbage > /dev/null 2>&1; then
if [[ "$DRY_RUN" != "true" ]]; then
clean_tool_cache "Nix garbage collection" nix-collect-garbage --delete-older-than 30d
else
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Nix garbage collection · would clean"
fi
note_activity
fi
}
# Cloud CLI caches.
clean_dev_cloud() {
safe_clean ~/.kube/cache/* "Kubernetes cache"
safe_clean ~/.local/share/containers/storage/tmp/* "Container storage temp"
safe_clean ~/.aws/cli/cache/* "AWS CLI cache"
safe_clean ~/.config/gcloud/logs/* "Google Cloud logs"
safe_clean ~/.azure/logs/* "Azure CLI logs"
}
# Frontend build caches.
clean_dev_frontend() {
safe_clean ~/.cache/typescript/* "TypeScript cache"
safe_clean ~/.cache/electron/* "Electron cache"
safe_clean ~/.cache/node-gyp/* "node-gyp cache"
safe_clean ~/.node-gyp/* "node-gyp build cache"
safe_clean ~/.turbo/cache/* "Turbo cache"
safe_clean ~/.vite/cache/* "Vite cache"
safe_clean ~/.cache/vite/* "Vite global cache"
safe_clean ~/.cache/webpack/* "Webpack cache"
safe_clean ~/.parcel-cache/* "Parcel cache"
safe_clean ~/.cache/eslint/* "ESLint cache"
safe_clean ~/.cache/prettier/* "Prettier cache"
}
# Mobile dev caches (can be large).
# Check for multiple Android NDK versions.
check_android_ndk() {
local ndk_dir="$HOME/Library/Android/sdk/ndk"
if [[ -d "$ndk_dir" ]]; then
local count
count=$(find "$ndk_dir" -mindepth 1 -maxdepth 1 -type d 2> /dev/null | wc -l | tr -d ' ')
if [[ "$count" -gt 1 ]]; then
note_activity
echo -e " Found ${GREEN}${count}${NC} Android NDK versions"
echo -e " You can delete unused versions manually: ${ndk_dir}"
fi
fi
}
clean_dev_mobile() {
check_android_ndk
if command -v xcrun > /dev/null 2>&1; then
debug_log "Checking for unavailable Xcode simulators"
if [[ "$DRY_RUN" == "true" ]]; then
clean_tool_cache "Xcode unavailable simulators" xcrun simctl delete unavailable
else
start_section_spinner "Checking unavailable simulators..."
if xcrun simctl delete unavailable > /dev/null 2>&1; then
stop_section_spinner
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators"
else
stop_section_spinner
fi
fi
note_activity
fi
# DeviceSupport caches/logs (preserve core support files).
safe_clean ~/Library/Developer/Xcode/iOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "iOS device symbol cache"
safe_clean ~/Library/Developer/Xcode/iOS\ DeviceSupport/*.log "iOS device support logs"
safe_clean ~/Library/Developer/Xcode/watchOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "watchOS device symbol cache"
safe_clean ~/Library/Developer/Xcode/tvOS\ DeviceSupport/*/Symbols/System/Library/Caches/* "tvOS device symbol cache"
# Simulator runtime caches.
safe_clean ~/Library/Developer/CoreSimulator/Profiles/Runtimes/*/Contents/Resources/RuntimeRoot/System/Library/Caches/* "Simulator runtime cache"
safe_clean ~/Library/Caches/Google/AndroidStudio*/* "Android Studio cache"
safe_clean ~/Library/Caches/CocoaPods/* "CocoaPods cache"
safe_clean ~/.cache/flutter/* "Flutter cache"
safe_clean ~/.android/build-cache/* "Android build cache"
safe_clean ~/.android/cache/* "Android SDK cache"
safe_clean ~/Library/Developer/Xcode/UserData/IB\ Support/* "Xcode Interface Builder cache"
safe_clean ~/.cache/swift-package-manager/* "Swift package manager cache"
}
# JVM ecosystem caches.
clean_dev_jvm() {
safe_clean ~/.gradle/caches/* "Gradle caches"
safe_clean ~/.gradle/daemon/* "Gradle daemon logs"
safe_clean ~/.sbt/* "SBT cache"
safe_clean ~/.ivy2/cache/* "Ivy cache"
}
# Other language tool caches.
clean_dev_other_langs() {
safe_clean ~/.bundle/cache/* "Ruby Bundler cache"
safe_clean ~/.composer/cache/* "PHP Composer cache"
safe_clean ~/.nuget/packages/* "NuGet packages cache"
safe_clean ~/.pub-cache/* "Dart Pub cache"
safe_clean ~/.cache/bazel/* "Bazel cache"
safe_clean ~/.cache/zig/* "Zig cache"
safe_clean ~/Library/Caches/deno/* "Deno cache"
}
# CI/CD and DevOps caches.
clean_dev_cicd() {
safe_clean ~/.cache/terraform/* "Terraform cache"
safe_clean ~/.grafana/cache/* "Grafana cache"
safe_clean ~/.prometheus/data/wal/* "Prometheus WAL cache"
safe_clean ~/.jenkins/workspace/*/target/* "Jenkins workspace cache"
safe_clean ~/.cache/gitlab-runner/* "GitLab Runner cache"
safe_clean ~/.github/cache/* "GitHub Actions cache"
safe_clean ~/.circleci/cache/* "CircleCI cache"
safe_clean ~/.sonar/* "SonarQube cache"
}
# Database tool caches.
clean_dev_database() {
safe_clean ~/Library/Caches/com.sequel-ace.sequel-ace/* "Sequel Ace cache"
safe_clean ~/Library/Caches/com.eggerapps.Sequel-Pro/* "Sequel Pro cache"
safe_clean ~/Library/Caches/redis-desktop-manager/* "Redis Desktop Manager cache"
safe_clean ~/Library/Caches/com.navicat.* "Navicat cache"
safe_clean ~/Library/Caches/com.dbeaver.* "DBeaver cache"
safe_clean ~/Library/Caches/com.redis.RedisInsight "Redis Insight cache"
}
# API/debugging tool caches.
clean_dev_api_tools() {
safe_clean ~/Library/Caches/com.postmanlabs.mac/* "Postman cache"
safe_clean ~/Library/Caches/com.konghq.insomnia/* "Insomnia cache"
safe_clean ~/Library/Caches/com.tinyapp.TablePlus/* "TablePlus cache"
safe_clean ~/Library/Caches/com.getpaw.Paw/* "Paw API cache"
safe_clean ~/Library/Caches/com.charlesproxy.charles/* "Charles Proxy cache"
safe_clean ~/Library/Caches/com.proxyman.NSProxy/* "Proxyman cache"
}
# Misc dev tool caches.
clean_dev_misc() {
safe_clean ~/Library/Caches/com.unity3d.*/* "Unity cache"
safe_clean ~/Library/Caches/com.mongodb.compass/* "MongoDB Compass cache"
safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache"
safe_clean ~/Library/Caches/com.github.GitHubDesktop/* "GitHub Desktop cache"
safe_clean ~/Library/Caches/SentryCrash/* "Sentry crash reports"
safe_clean ~/Library/Caches/KSCrash/* "KSCrash reports"
safe_clean ~/Library/Caches/com.crashlytics.data/* "Crashlytics data"
}
# Shell and VCS leftovers.
clean_dev_shell() {
safe_clean ~/.gitconfig.lock "Git config lock"
safe_clean ~/.gitconfig.bak* "Git config backup"
safe_clean ~/.oh-my-zsh/cache/* "Oh My Zsh cache"
safe_clean ~/.config/fish/fish_history.bak* "Fish shell backup"
safe_clean ~/.bash_history.bak* "Bash history backup"
safe_clean ~/.zsh_history.bak* "Zsh history backup"
safe_clean ~/.cache/pre-commit/* "pre-commit cache"
}
# Network tool caches.
clean_dev_network() {
safe_clean ~/.cache/curl/* "curl cache"
safe_clean ~/.cache/wget/* "wget cache"
safe_clean ~/Library/Caches/curl/* "macOS curl cache"
safe_clean ~/Library/Caches/wget/* "macOS wget cache"
}
# Orphaned SQLite temp files (-shm/-wal). Disabled due to low ROI.
clean_sqlite_temp_files() {
return 0
}
# Main developer tools cleanup sequence.
clean_developer_tools() {
stop_section_spinner
clean_sqlite_temp_files
clean_dev_npm
clean_dev_python
clean_dev_go
clean_dev_rust
clean_dev_docker
clean_dev_cloud
clean_dev_nix
clean_dev_shell
clean_dev_frontend
clean_project_caches
clean_dev_mobile
clean_dev_jvm
clean_dev_other_langs
clean_dev_cicd
clean_dev_database
clean_dev_api_tools
clean_dev_network
clean_dev_misc
safe_clean ~/Library/Caches/Homebrew/* "Homebrew cache"
# Clean Homebrew locks without repeated sudo prompts.
local brew_lock_dirs=(
"/opt/homebrew/var/homebrew/locks"
"/usr/local/var/homebrew/locks"
)
for lock_dir in "${brew_lock_dirs[@]}"; do
if [[ -d "$lock_dir" && -w "$lock_dir" ]]; then
safe_clean "$lock_dir"/* "Homebrew lock files"
elif [[ -d "$lock_dir" ]]; then
if find "$lock_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
debug_log "Skipping read-only Homebrew locks in $lock_dir"
fi
fi
done
clean_homebrew
}

View File

@@ -1,925 +0,0 @@
#!/bin/bash
# Project Purge Module (mo purge).
# Removes heavy project build artifacts and dependencies.
set -euo pipefail
PROJECT_LIB_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CORE_LIB_DIR="$(cd "$PROJECT_LIB_DIR/../core" && pwd)"
if ! command -v ensure_user_dir > /dev/null 2>&1; then
# shellcheck disable=SC1090
source "$CORE_LIB_DIR/common.sh"
fi
# Targets to look for (heavy build artifacts).
readonly PURGE_TARGETS=(
"node_modules"
"target" # Rust, Maven
"build" # Gradle, various
"dist" # JS builds
"venv" # Python
".venv" # Python
".pytest_cache" # Python (pytest)
".mypy_cache" # Python (mypy)
".tox" # Python (tox virtualenvs)
".nox" # Python (nox virtualenvs)
".ruff_cache" # Python (ruff)
".gradle" # Gradle local
"__pycache__" # Python
".next" # Next.js
".nuxt" # Nuxt.js
".output" # Nuxt.js
"vendor" # PHP Composer
"bin" # .NET build output (guarded; see is_protected_purge_artifact)
"obj" # C# / Unity
".turbo" # Turborepo cache
".parcel-cache" # Parcel bundler
".dart_tool" # Flutter/Dart build cache
".zig-cache" # Zig
"zig-out" # Zig
".angular" # Angular
".svelte-kit" # SvelteKit
".astro" # Astro
"coverage" # Code coverage reports
)
# Minimum age in days before considering for cleanup.
readonly MIN_AGE_DAYS=7
# Scan depth defaults (relative to search root).
readonly PURGE_MIN_DEPTH_DEFAULT=2
readonly PURGE_MAX_DEPTH_DEFAULT=8
# Search paths (default, can be overridden via config file).
readonly DEFAULT_PURGE_SEARCH_PATHS=(
"$HOME/www"
"$HOME/dev"
"$HOME/Projects"
"$HOME/GitHub"
"$HOME/Code"
"$HOME/Workspace"
"$HOME/Repos"
"$HOME/Development"
)
# Config file for custom purge paths.
readonly PURGE_CONFIG_FILE="$HOME/.config/mole/purge_paths"
# Resolved search paths.
PURGE_SEARCH_PATHS=()
# Project indicators for container detection.
readonly PROJECT_INDICATORS=(
"package.json"
"Cargo.toml"
"go.mod"
"pyproject.toml"
"requirements.txt"
"pom.xml"
"build.gradle"
"Gemfile"
"composer.json"
"pubspec.yaml"
"Makefile"
"build.zig"
"build.zig.zon"
".git"
)
# Check if a directory contains projects (directly or in subdirectories).
is_project_container() {
local dir="$1"
local max_depth="${2:-2}"
# Skip hidden/system directories.
local basename
basename=$(basename "$dir")
[[ "$basename" == .* ]] && return 1
[[ "$basename" == "Library" ]] && return 1
[[ "$basename" == "Applications" ]] && return 1
[[ "$basename" == "Movies" ]] && return 1
[[ "$basename" == "Music" ]] && return 1
[[ "$basename" == "Pictures" ]] && return 1
[[ "$basename" == "Public" ]] && return 1
# Single find expression for indicators.
local -a find_args=("$dir" "-maxdepth" "$max_depth" "(")
local first=true
for indicator in "${PROJECT_INDICATORS[@]}"; do
if [[ "$first" == "true" ]]; then
first=false
else
find_args+=("-o")
fi
find_args+=("-name" "$indicator")
done
find_args+=(")" "-print" "-quit")
if find "${find_args[@]}" 2> /dev/null | grep -q .; then
return 0
fi
return 1
}
# Discover project directories in $HOME.
discover_project_dirs() {
local -a discovered=()
for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do
if [[ -d "$path" ]]; then
discovered+=("$path")
fi
done
# Scan $HOME for other containers (depth 1).
local dir
for dir in "$HOME"/*/; do
[[ ! -d "$dir" ]] && continue
dir="${dir%/}" # Remove trailing slash
local already_found=false
for existing in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do
if [[ "$dir" == "$existing" ]]; then
already_found=true
break
fi
done
[[ "$already_found" == "true" ]] && continue
if is_project_container "$dir" 2; then
discovered+=("$dir")
fi
done
printf '%s\n' "${discovered[@]}" | sort -u
}
# Save discovered paths to config.
save_discovered_paths() {
local -a paths=("$@")
ensure_user_dir "$(dirname "$PURGE_CONFIG_FILE")"
cat > "$PURGE_CONFIG_FILE" << 'EOF'
# Mole Purge Paths - Auto-discovered project directories
# Edit this file to customize, or run: mo purge --paths
# Add one path per line (supports ~ for home directory)
EOF
printf '\n' >> "$PURGE_CONFIG_FILE"
for path in "${paths[@]}"; do
# Convert $HOME to ~ for portability
path="${path/#$HOME/~}"
echo "$path" >> "$PURGE_CONFIG_FILE"
done
}
# Load purge paths from config or auto-discover
load_purge_config() {
PURGE_SEARCH_PATHS=()
if [[ -f "$PURGE_CONFIG_FILE" ]]; then
while IFS= read -r line; do
line="${line#"${line%%[![:space:]]*}"}"
line="${line%"${line##*[![:space:]]}"}"
[[ -z "$line" || "$line" =~ ^# ]] && continue
line="${line/#\~/$HOME}"
PURGE_SEARCH_PATHS+=("$line")
done < "$PURGE_CONFIG_FILE"
fi
if [[ ${#PURGE_SEARCH_PATHS[@]} -eq 0 ]]; then
if [[ -t 1 ]] && [[ -z "${_PURGE_DISCOVERY_SILENT:-}" ]]; then
echo -e "${GRAY}First run: discovering project directories...${NC}" >&2
fi
local -a discovered=()
while IFS= read -r path; do
[[ -n "$path" ]] && discovered+=("$path")
done < <(discover_project_dirs)
if [[ ${#discovered[@]} -gt 0 ]]; then
PURGE_SEARCH_PATHS=("${discovered[@]}")
save_discovered_paths "${discovered[@]}"
if [[ -t 1 ]] && [[ -z "${_PURGE_DISCOVERY_SILENT:-}" ]]; then
echo -e "${GRAY}Found ${#discovered[@]} project directories, saved to config${NC}" >&2
fi
else
PURGE_SEARCH_PATHS=("${DEFAULT_PURGE_SEARCH_PATHS[@]}")
fi
fi
}
# Initialize paths on script load.
load_purge_config
# Args: $1 - path to check
# Safe cleanup requires the path be inside a project directory.
is_safe_project_artifact() {
local path="$1"
local search_path="$2"
if [[ "$path" != /* ]]; then
return 1
fi
# Must not be a direct child of the search root.
local relative_path="${path#"$search_path"/}"
local depth=$(echo "$relative_path" | LC_ALL=C tr -cd '/' | wc -c)
if [[ $depth -lt 1 ]]; then
return 1
fi
return 0
}
# Detect if directory is a Rails project root
is_rails_project_root() {
local dir="$1"
[[ -f "$dir/config/application.rb" ]] || return 1
[[ -f "$dir/Gemfile" ]] || return 1
[[ -f "$dir/bin/rails" || -f "$dir/config/environment.rb" ]]
}
# Detect if directory is a Go project root
is_go_project_root() {
local dir="$1"
[[ -f "$dir/go.mod" ]]
}
# Detect if directory is a PHP Composer project root
is_php_project_root() {
local dir="$1"
[[ -f "$dir/composer.json" ]]
}
# Decide whether a "bin" directory is a .NET directory
is_dotnet_bin_dir() {
local path="$1"
[[ "$(basename "$path")" == "bin" ]] || return 1
# Check if parent directory has a .csproj/.fsproj/.vbproj file
local parent_dir
parent_dir="$(dirname "$path")"
find "$parent_dir" -maxdepth 1 \( -name "*.csproj" -o -name "*.fsproj" -o -name "*.vbproj" \) 2> /dev/null | grep -q . || return 1
# Check if bin directory contains Debug/ or Release/ subdirectories
[[ -d "$path/Debug" || -d "$path/Release" ]] || return 1
return 0
}
# Check if a vendor directory should be protected from purge
# Expects path to be a vendor directory (basename == vendor)
# Strategy: Only clean PHP Composer vendor, protect all others
is_protected_vendor_dir() {
local path="$1"
local base
base=$(basename "$path")
[[ "$base" == "vendor" ]] || return 1
local parent_dir
parent_dir=$(dirname "$path")
# PHP Composer vendor can be safely regenerated with 'composer install'
# Do NOT protect it (return 1 = not protected = can be cleaned)
if is_php_project_root "$parent_dir"; then
return 1
fi
# Rails vendor (importmap dependencies) - should be protected
if is_rails_project_root "$parent_dir"; then
return 0
fi
# Go vendor (optional vendoring) - protect to avoid accidental deletion
if is_go_project_root "$parent_dir"; then
return 0
fi
# Unknown vendor type - protect by default (conservative approach)
return 0
}
# Check if an artifact should be protected from purge
is_protected_purge_artifact() {
local path="$1"
local base
base=$(basename "$path")
case "$base" in
bin)
# Only allow purging bin/ when we can detect .NET context.
if is_dotnet_bin_dir "$path"; then
return 1
fi
return 0
;;
vendor)
is_protected_vendor_dir "$path"
return $?
;;
esac
return 1
}
# Scan purge targets using fd (fast) or pruned find.
scan_purge_targets() {
local search_path="$1"
local output_file="$2"
local min_depth="$PURGE_MIN_DEPTH_DEFAULT"
local max_depth="$PURGE_MAX_DEPTH_DEFAULT"
if [[ ! "$min_depth" =~ ^[0-9]+$ ]]; then
min_depth="$PURGE_MIN_DEPTH_DEFAULT"
fi
if [[ ! "$max_depth" =~ ^[0-9]+$ ]]; then
max_depth="$PURGE_MAX_DEPTH_DEFAULT"
fi
if [[ "$max_depth" -lt "$min_depth" ]]; then
max_depth="$min_depth"
fi
if [[ ! -d "$search_path" ]]; then
return
fi
if command -v fd > /dev/null 2>&1; then
# Escape regex special characters in target names for fd patterns
local escaped_targets=()
for target in "${PURGE_TARGETS[@]}"; do
escaped_targets+=("$(printf '%s' "$target" | sed -e 's/[][(){}.^$*+?|\\]/\\&/g')")
done
local pattern="($(
IFS='|'
echo "${escaped_targets[*]}"
))"
local fd_args=(
"--absolute-path"
"--hidden"
"--no-ignore"
"--type" "d"
"--min-depth" "$min_depth"
"--max-depth" "$max_depth"
"--threads" "4"
"--exclude" ".git"
"--exclude" "Library"
"--exclude" ".Trash"
"--exclude" "Applications"
)
fd "${fd_args[@]}" "$pattern" "$search_path" 2> /dev/null | while IFS= read -r item; do
if is_safe_project_artifact "$item" "$search_path"; then
echo "$item"
fi
done | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
else
# Pruned find avoids descending into heavy directories.
local prune_args=()
local prune_dirs=(".git" "Library" ".Trash" "Applications")
for dir in "${prune_dirs[@]}"; do
prune_args+=("-name" "$dir" "-prune" "-o")
done
for target in "${PURGE_TARGETS[@]}"; do
prune_args+=("-name" "$target" "-print" "-prune" "-o")
done
local find_expr=()
for dir in "${prune_dirs[@]}"; do
find_expr+=("-name" "$dir" "-prune" "-o")
done
local i=0
for target in "${PURGE_TARGETS[@]}"; do
find_expr+=("-name" "$target" "-print" "-prune")
if [[ $i -lt $((${#PURGE_TARGETS[@]} - 1)) ]]; then
find_expr+=("-o")
fi
((i++))
done
command find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \
\( "${find_expr[@]}" \) 2> /dev/null | while IFS= read -r item; do
if is_safe_project_artifact "$item" "$search_path"; then
echo "$item"
fi
done | filter_nested_artifacts | filter_protected_artifacts > "$output_file"
fi
}
# Filter out nested artifacts (e.g. node_modules inside node_modules).
filter_nested_artifacts() {
while IFS= read -r item; do
local parent_dir=$(dirname "$item")
local is_nested=false
for target in "${PURGE_TARGETS[@]}"; do
if [[ "$parent_dir" == *"/$target/"* || "$parent_dir" == *"/$target" ]]; then
is_nested=true
break
fi
done
if [[ "$is_nested" == "false" ]]; then
echo "$item"
fi
done
}
filter_protected_artifacts() {
while IFS= read -r item; do
if ! is_protected_purge_artifact "$item"; then
echo "$item"
fi
done
}
# Args: $1 - path
# Check if a path was modified recently (safety check).
is_recently_modified() {
local path="$1"
local age_days=$MIN_AGE_DAYS
if [[ ! -e "$path" ]]; then
return 1
fi
local mod_time
mod_time=$(get_file_mtime "$path")
local current_time
current_time=$(get_epoch_seconds)
local age_seconds=$((current_time - mod_time))
local age_in_days=$((age_seconds / 86400))
if [[ $age_in_days -lt $age_days ]]; then
return 0 # Recently modified
else
return 1 # Old enough to clean
fi
}
# Args: $1 - path
# Get directory size in KB.
get_dir_size_kb() {
local path="$1"
if [[ -d "$path" ]]; then
du -sk "$path" 2> /dev/null | awk '{print $1}' || echo "0"
else
echo "0"
fi
}
# Purge category selector.
select_purge_categories() {
local -a categories=("$@")
local total_items=${#categories[@]}
local clear_line=$'\r\033[2K'
if [[ $total_items -eq 0 ]]; then
return 1
fi
# Calculate items per page based on terminal height.
_get_items_per_page() {
local term_height=24
if [[ -t 0 ]] || [[ -t 2 ]]; then
term_height=$(stty size < /dev/tty 2> /dev/null | awk '{print $1}')
fi
if [[ -z "$term_height" || $term_height -le 0 ]]; then
if command -v tput > /dev/null 2>&1; then
term_height=$(tput lines 2> /dev/null || echo "24")
else
term_height=24
fi
fi
local reserved=6
local available=$((term_height - reserved))
if [[ $available -lt 3 ]]; then
echo 3
elif [[ $available -gt 50 ]]; then
echo 50
else
echo "$available"
fi
}
local items_per_page=$(_get_items_per_page)
local cursor_pos=0
local top_index=0
# Initialize selection (all selected by default, except recent ones)
local -a selected=()
IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}"
for ((i = 0; i < total_items; i++)); do
# Default unselected if category has recent items
if [[ ${recent_flags[i]:-false} == "true" ]]; then
selected[i]=false
else
selected[i]=true
fi
done
local original_stty=""
if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then
original_stty=$(stty -g 2> /dev/null || echo "")
fi
# Terminal control functions
restore_terminal() {
trap - EXIT INT TERM
show_cursor
if [[ -n "${original_stty:-}" ]]; then
stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || true
fi
}
# shellcheck disable=SC2329
handle_interrupt() {
restore_terminal
exit 130
}
draw_menu() {
# Recalculate items_per_page dynamically to handle window resize
items_per_page=$(_get_items_per_page)
# Clamp pagination state to avoid cursor drifting out of view
local max_top_index=0
if [[ $total_items -gt $items_per_page ]]; then
max_top_index=$((total_items - items_per_page))
fi
if [[ $top_index -gt $max_top_index ]]; then
top_index=$max_top_index
fi
if [[ $top_index -lt 0 ]]; then
top_index=0
fi
local visible_count=$((total_items - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -gt $((visible_count - 1)) ]]; then
cursor_pos=$((visible_count - 1))
fi
if [[ $cursor_pos -lt 0 ]]; then
cursor_pos=0
fi
printf "\033[H"
# Calculate total size of selected items for header
local selected_size=0
local selected_count=0
IFS=',' read -r -a sizes <<< "${PURGE_CATEGORY_SIZES:-}"
for ((i = 0; i < total_items; i++)); do
if [[ ${selected[i]} == true ]]; then
selected_size=$((selected_size + ${sizes[i]:-0}))
((selected_count++))
fi
done
local selected_gb
selected_gb=$(printf "%.1f" "$(echo "scale=2; $selected_size/1024/1024" | bc)")
# Show position indicator if scrolling is needed
local scroll_indicator=""
if [[ $total_items -gt $items_per_page ]]; then
local current_pos=$((top_index + cursor_pos + 1))
scroll_indicator=" ${GRAY}[${current_pos}/${total_items}]${NC}"
fi
printf "%s\n" "$clear_line"
printf "%s${PURPLE_BOLD}Select Categories to Clean${NC}%s ${GRAY}- ${selected_gb}GB ($selected_count selected)${NC}\n" "$clear_line" "$scroll_indicator"
printf "%s\n" "$clear_line"
IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}"
# Calculate visible range
local end_index=$((top_index + visible_count))
# Draw only visible items
for ((i = top_index; i < end_index; i++)); do
local checkbox="$ICON_EMPTY"
[[ ${selected[i]} == true ]] && checkbox="$ICON_SOLID"
local recent_marker=""
[[ ${recent_flags[i]:-false} == "true" ]] && recent_marker=" ${GRAY}| Recent${NC}"
local rel_pos=$((i - top_index))
if [[ $rel_pos -eq $cursor_pos ]]; then
printf "%s${CYAN}${ICON_ARROW} %s %s%s${NC}\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker"
else
printf "%s %s %s%s\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker"
fi
done
# Fill empty slots to clear previous content
local items_shown=$visible_count
for ((i = items_shown; i < items_per_page; i++)); do
printf "%s\n" "$clear_line"
done
printf "%s\n" "$clear_line"
printf "%s${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space Select | Enter Confirm | A All | I Invert | Q Quit${NC}\n" "$clear_line"
}
trap restore_terminal EXIT
trap handle_interrupt INT TERM
# Preserve interrupt character for Ctrl-C
stty -echo -icanon intr ^C 2> /dev/null || true
hide_cursor
if [[ -t 1 ]]; then
clear_screen
fi
# Main loop
while true; do
draw_menu
# Read key
IFS= read -r -s -n1 key || key=""
case "$key" in
$'\x1b')
# Arrow keys or ESC
# Read next 2 chars with timeout (bash 3.2 needs integer)
IFS= read -r -s -n1 -t 1 key2 || key2=""
if [[ "$key2" == "[" ]]; then
IFS= read -r -s -n1 -t 1 key3 || key3=""
case "$key3" in
A) # Up arrow
if [[ $cursor_pos -gt 0 ]]; then
((cursor_pos--))
elif [[ $top_index -gt 0 ]]; then
((top_index--))
fi
;;
B) # Down arrow
local absolute_index=$((top_index + cursor_pos))
local last_index=$((total_items - 1))
if [[ $absolute_index -lt $last_index ]]; then
local visible_count=$((total_items - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
((cursor_pos++))
elif [[ $((top_index + visible_count)) -lt $total_items ]]; then
((top_index++))
fi
fi
;;
esac
else
# ESC alone (no following chars)
restore_terminal
return 1
fi
;;
" ") # Space - toggle current item
local idx=$((top_index + cursor_pos))
if [[ ${selected[idx]} == true ]]; then
selected[idx]=false
else
selected[idx]=true
fi
;;
"a" | "A") # Select all
for ((i = 0; i < total_items; i++)); do
selected[i]=true
done
;;
"i" | "I") # Invert selection
for ((i = 0; i < total_items; i++)); do
if [[ ${selected[i]} == true ]]; then
selected[i]=false
else
selected[i]=true
fi
done
;;
"q" | "Q" | $'\x03') # Quit or Ctrl-C
restore_terminal
return 1
;;
"" | $'\n' | $'\r') # Enter - confirm
# Build result
PURGE_SELECTION_RESULT=""
for ((i = 0; i < total_items; i++)); do
if [[ ${selected[i]} == true ]]; then
[[ -n "$PURGE_SELECTION_RESULT" ]] && PURGE_SELECTION_RESULT+=","
PURGE_SELECTION_RESULT+="$i"
fi
done
restore_terminal
return 0
;;
esac
done
}
# Main cleanup function - scans and prompts user to select artifacts to clean
clean_project_artifacts() {
local -a all_found_items=()
local -a safe_to_clean=()
local -a recently_modified=()
# Set up cleanup on interrupt
# Note: Declared without 'local' so cleanup_scan trap can access them
scan_pids=()
scan_temps=()
# shellcheck disable=SC2329
cleanup_scan() {
# Kill all background scans
for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do
kill "$pid" 2> /dev/null || true
done
# Clean up temp files
for temp in "${scan_temps[@]+"${scan_temps[@]}"}"; do
rm -f "$temp" 2> /dev/null || true
done
if [[ -t 1 ]]; then
stop_inline_spinner
fi
echo ""
exit 130
}
trap cleanup_scan INT TERM
# Start parallel scanning of all paths at once
if [[ -t 1 ]]; then
start_inline_spinner "Scanning projects..."
fi
# Launch all scans in parallel
for path in "${PURGE_SEARCH_PATHS[@]}"; do
if [[ -d "$path" ]]; then
local scan_output
scan_output=$(mktemp)
scan_temps+=("$scan_output")
# Launch scan in background for true parallelism
scan_purge_targets "$path" "$scan_output" &
local scan_pid=$!
scan_pids+=("$scan_pid")
fi
done
# Wait for all scans to complete
for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do
wait "$pid" 2> /dev/null || true
done
if [[ -t 1 ]]; then
stop_inline_spinner
fi
# Collect all results
for scan_output in "${scan_temps[@]+"${scan_temps[@]}"}"; do
if [[ -f "$scan_output" ]]; then
while IFS= read -r item; do
if [[ -n "$item" ]]; then
all_found_items+=("$item")
fi
done < "$scan_output"
rm -f "$scan_output"
fi
done
# Clean up trap
trap - INT TERM
if [[ ${#all_found_items[@]} -eq 0 ]]; then
echo ""
echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No old project artifacts to clean"
printf '\n'
return 2 # Special code: nothing to clean
fi
# Mark recently modified items (for default selection state)
for item in "${all_found_items[@]}"; do
if is_recently_modified "$item"; then
recently_modified+=("$item")
fi
# Add all items to safe_to_clean, let user choose
safe_to_clean+=("$item")
done
# Build menu options - one per artifact
if [[ -t 1 ]]; then
start_inline_spinner "Calculating sizes..."
fi
local -a menu_options=()
local -a item_paths=()
local -a item_sizes=()
local -a item_recent_flags=()
# Helper to get project name from path
# For ~/www/pake/src-tauri/target -> returns "pake"
# For ~/work/code/MyProject/node_modules -> returns "MyProject"
# Strategy: Find the nearest ancestor directory containing a project indicator file
get_project_name() {
local path="$1"
local artifact_name
artifact_name=$(basename "$path")
# Start from the parent of the artifact and walk up
local current_dir
current_dir=$(dirname "$path")
while [[ "$current_dir" != "/" && "$current_dir" != "$HOME" && -n "$current_dir" ]]; do
# Check if current directory contains any project indicator
for indicator in "${PROJECT_INDICATORS[@]}"; do
if [[ -e "$current_dir/$indicator" ]]; then
# Found a project root, return its name
basename "$current_dir"
return 0
fi
done
# Move up one level
current_dir=$(dirname "$current_dir")
done
# Fallback: try the old logic (first directory under search root)
local search_roots=()
if [[ ${#PURGE_SEARCH_PATHS[@]} -gt 0 ]]; then
search_roots=("${PURGE_SEARCH_PATHS[@]}")
else
search_roots=("$HOME/www" "$HOME/dev" "$HOME/Projects")
fi
for root in "${search_roots[@]}"; do
root="${root%/}"
if [[ -n "$root" && "$path" == "$root/"* ]]; then
local relative_path="${path#"$root"/}"
echo "$relative_path" | cut -d'/' -f1
return 0
fi
done
# Final fallback: use grandparent directory
dirname "$(dirname "$path")" | xargs basename
}
# Format display with alignment (like app_selector)
format_purge_display() {
local project_name="$1"
local artifact_type="$2"
local size_str="$3"
# Terminal width for alignment
local terminal_width=$(tput cols 2> /dev/null || echo 80)
local fixed_width=28 # Reserve for type and size
local available_width=$((terminal_width - fixed_width))
# Bounds: 24-35 chars for project name
[[ $available_width -lt 24 ]] && available_width=24
[[ $available_width -gt 35 ]] && available_width=35
# Truncate project name if needed
local truncated_name=$(truncate_by_display_width "$project_name" "$available_width")
local current_width=$(get_display_width "$truncated_name")
local char_count=${#truncated_name}
local padding=$((available_width - current_width))
local printf_width=$((char_count + padding))
# Format: "project_name size | artifact_type"
printf "%-*s %9s | %-13s" "$printf_width" "$truncated_name" "$size_str" "$artifact_type"
}
# Build menu options - one line per artifact
for item in "${safe_to_clean[@]}"; do
local project_name=$(get_project_name "$item")
local artifact_type=$(basename "$item")
local size_kb=$(get_dir_size_kb "$item")
local size_human=$(bytes_to_human "$((size_kb * 1024))")
# Check if recent
local is_recent=false
for recent_item in "${recently_modified[@]+"${recently_modified[@]}"}"; do
if [[ "$item" == "$recent_item" ]]; then
is_recent=true
break
fi
done
menu_options+=("$(format_purge_display "$project_name" "$artifact_type" "$size_human")")
item_paths+=("$item")
item_sizes+=("$size_kb")
item_recent_flags+=("$is_recent")
done
if [[ -t 1 ]]; then
stop_inline_spinner
fi
# Set global vars for selector
export PURGE_CATEGORY_SIZES=$(
IFS=,
echo "${item_sizes[*]}"
)
export PURGE_RECENT_CATEGORIES=$(
IFS=,
echo "${item_recent_flags[*]}"
)
# Interactive selection (only if terminal is available)
PURGE_SELECTION_RESULT=""
if [[ -t 0 ]]; then
if ! select_purge_categories "${menu_options[@]}"; then
unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT
return 1
fi
else
# Non-interactive: select all non-recent items
for ((i = 0; i < ${#menu_options[@]}; i++)); do
if [[ ${item_recent_flags[i]} != "true" ]]; then
[[ -n "$PURGE_SELECTION_RESULT" ]] && PURGE_SELECTION_RESULT+=","
PURGE_SELECTION_RESULT+="$i"
fi
done
fi
if [[ -z "$PURGE_SELECTION_RESULT" ]]; then
echo ""
echo -e "${GRAY}No items selected${NC}"
printf '\n'
unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT
return 0
fi
# Clean selected items
echo ""
IFS=',' read -r -a selected_indices <<< "$PURGE_SELECTION_RESULT"
local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole"
local cleaned_count=0
for idx in "${selected_indices[@]}"; do
local item_path="${item_paths[idx]}"
local artifact_type=$(basename "$item_path")
local project_name=$(get_project_name "$item_path")
local size_kb="${item_sizes[idx]}"
local size_human=$(bytes_to_human "$((size_kb * 1024))")
# Safety checks
if [[ -z "$item_path" || "$item_path" == "/" || "$item_path" == "$HOME" || "$item_path" != "$HOME/"* ]]; then
continue
fi
if [[ -t 1 ]]; then
start_inline_spinner "Cleaning $project_name/$artifact_type..."
fi
if [[ -e "$item_path" ]]; then
safe_remove "$item_path" true
if [[ ! -e "$item_path" ]]; then
local current_total=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0")
echo "$((current_total + size_kb))" > "$stats_dir/purge_stats"
((cleaned_count++))
fi
fi
if [[ -t 1 ]]; then
stop_inline_spinner
echo -e "${GREEN}${ICON_SUCCESS}${NC} $project_name - $artifact_type ${GREEN}($size_human)${NC}"
fi
done
# Update count
echo "$cleaned_count" > "$stats_dir/purge_count"
unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT
}

View File

@@ -1,339 +0,0 @@
#!/bin/bash
# System-Level Cleanup Module (requires sudo).
set -euo pipefail
# System caches, logs, and temp files.
clean_deep_system() {
stop_section_spinner
local cache_cleaned=0
safe_sudo_find_delete "/Library/Caches" "*.cache" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true
safe_sudo_find_delete "/Library/Caches" "*.tmp" "$MOLE_TEMP_FILE_AGE_DAYS" "f" && cache_cleaned=1 || true
safe_sudo_find_delete "/Library/Caches" "*.log" "$MOLE_LOG_AGE_DAYS" "f" && cache_cleaned=1 || true
[[ $cache_cleaned -eq 1 ]] && log_success "System caches"
local tmp_cleaned=0
safe_sudo_find_delete "/private/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true
safe_sudo_find_delete "/private/var/tmp" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f" && tmp_cleaned=1 || true
[[ $tmp_cleaned -eq 1 ]] && log_success "System temp files"
safe_sudo_find_delete "/Library/Logs/DiagnosticReports" "*" "$MOLE_CRASH_REPORT_AGE_DAYS" "f" || true
log_success "System crash reports"
safe_sudo_find_delete "/private/var/log" "*.log" "$MOLE_LOG_AGE_DAYS" "f" || true
safe_sudo_find_delete "/private/var/log" "*.gz" "$MOLE_LOG_AGE_DAYS" "f" || true
log_success "System logs"
if [[ -d "/Library/Updates" && ! -L "/Library/Updates" ]]; then
if ! is_sip_enabled; then
local updates_cleaned=0
while IFS= read -r -d '' item; do
if [[ -z "$item" ]] || [[ ! "$item" =~ ^/Library/Updates/[^/]+$ ]]; then
debug_log "Skipping malformed path: $item"
continue
fi
local item_flags
item_flags=$($STAT_BSD -f%Sf "$item" 2> /dev/null || echo "")
if [[ "$item_flags" == *"restricted"* ]]; then
continue
fi
if safe_sudo_remove "$item"; then
((updates_cleaned++))
fi
done < <(find /Library/Updates -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
[[ $updates_cleaned -gt 0 ]] && log_success "System library updates"
fi
fi
if [[ -d "/macOS Install Data" ]]; then
local mtime=$(get_file_mtime "/macOS Install Data")
local age_days=$((($(get_epoch_seconds) - mtime) / 86400))
debug_log "Found macOS Install Data (age: ${age_days} days)"
if [[ $age_days -ge 30 ]]; then
local size_kb=$(get_path_size_kb "/macOS Install Data")
if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then
local size_human=$(bytes_to_human "$((size_kb * 1024))")
debug_log "Cleaning macOS Install Data: $size_human (${age_days} days old)"
if safe_sudo_remove "/macOS Install Data"; then
log_success "macOS Install Data ($size_human)"
fi
fi
else
debug_log "Keeping macOS Install Data (only ${age_days} days old, needs 30+)"
fi
fi
start_section_spinner "Scanning system caches..."
local code_sign_cleaned=0
local found_count=0
local last_update_time
last_update_time=$(get_epoch_seconds)
local update_interval=2
while IFS= read -r -d '' cache_dir; do
if safe_remove "$cache_dir" true; then
((code_sign_cleaned++))
fi
((found_count++))
# Optimize: only check time every 50 files
if ((found_count % 50 == 0)); then
local current_time
current_time=$(get_epoch_seconds)
if [[ $((current_time - last_update_time)) -ge $update_interval ]]; then
start_section_spinner "Scanning system caches... ($found_count found)"
last_update_time=$current_time
fi
fi
done < <(run_with_timeout 5 command find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true)
stop_section_spinner
[[ $code_sign_cleaned -gt 0 ]] && log_success "Browser code signature caches ($code_sign_cleaned items)"
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
log_success "System diagnostic logs"
safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" || true
log_success "Power logs"
safe_sudo_find_delete "/private/var/db/reportmemoryexception/MemoryLimitViolations" "*" "30" "f" || true
log_success "Memory exception reports"
start_section_spinner "Cleaning diagnostic trace logs..."
local diag_logs_cleaned=0
safe_sudo_find_delete "/private/var/db/diagnostics/Persist" "*.tracev3" "30" "f" && diag_logs_cleaned=1 || true
safe_sudo_find_delete "/private/var/db/diagnostics/Special" "*.tracev3" "30" "f" && diag_logs_cleaned=1 || true
stop_section_spinner
[[ $diag_logs_cleaned -eq 1 ]] && log_success "System diagnostic trace logs"
}
# Incomplete Time Machine backups.
clean_time_machine_failed_backups() {
local tm_cleaned=0
if ! command -v tmutil > /dev/null 2>&1; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
return 0
fi
start_section_spinner "Checking Time Machine configuration..."
local spinner_active=true
local tm_info
tm_info=$(run_with_timeout 2 tmutil destinationinfo 2>&1 || echo "failed")
if [[ "$tm_info" == *"No destinations configured"* || "$tm_info" == "failed" ]]; then
if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner
fi
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
return 0
fi
if [[ ! -d "/Volumes" ]]; then
if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner
fi
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
return 0
fi
if tmutil status 2> /dev/null | grep -q "Running = 1"; then
if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner
fi
echo -e " ${YELLOW}!${NC} Time Machine backup in progress, skipping cleanup"
return 0
fi
if [[ "$spinner_active" == "true" ]]; then
start_section_spinner "Checking backup volumes..."
fi
# Fast pre-scan for backup volumes to avoid slow tmutil checks.
local -a backup_volumes=()
for volume in /Volumes/*; do
[[ -d "$volume" ]] || continue
[[ "$volume" == "/Volumes/MacintoshHD" || "$volume" == "/" ]] && continue
[[ -L "$volume" ]] && continue
if [[ -d "$volume/Backups.backupdb" ]] || [[ -d "$volume/.MobileBackups" ]]; then
backup_volumes+=("$volume")
fi
done
if [[ ${#backup_volumes[@]} -eq 0 ]]; then
if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner
fi
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
return 0
fi
if [[ "$spinner_active" == "true" ]]; then
start_section_spinner "Scanning backup volumes..."
fi
for volume in "${backup_volumes[@]}"; do
local fs_type
fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "unknown")
case "$fs_type" in
nfs | smbfs | afpfs | cifs | webdav | unknown) continue ;;
esac
local backupdb_dir="$volume/Backups.backupdb"
if [[ -d "$backupdb_dir" ]]; then
while IFS= read -r inprogress_file; do
[[ -d "$inprogress_file" ]] || continue
# Only delete old incomplete backups (safety window).
local file_mtime=$(get_file_mtime "$inprogress_file")
local current_time
current_time=$(get_epoch_seconds)
local hours_old=$(((current_time - file_mtime) / 3600))
if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then
continue
fi
local size_kb=$(get_path_size_kb "$inprogress_file")
[[ "$size_kb" -le 0 ]] && continue
if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner
spinner_active=false
fi
local backup_name=$(basename "$inprogress_file")
local size_human=$(bytes_to_human "$((size_kb * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete backup: $backup_name ${YELLOW}($size_human dry)${NC}"
((tm_cleaned++))
note_activity
continue
fi
if ! command -v tmutil > /dev/null 2>&1; then
echo -e " ${YELLOW}!${NC} tmutil not available, skipping: $backup_name"
continue
fi
if tmutil delete "$inprogress_file" 2> /dev/null; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete backup: $backup_name ${GREEN}($size_human)${NC}"
((tm_cleaned++))
((files_cleaned++))
((total_size_cleaned += size_kb))
((total_items++))
note_activity
else
echo -e " ${YELLOW}!${NC} Could not delete: $backup_name · try manually with sudo"
fi
done < <(run_with_timeout 15 find "$backupdb_dir" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true)
fi
# APFS bundles.
for bundle in "$volume"/*.backupbundle "$volume"/*.sparsebundle; do
[[ -e "$bundle" ]] || continue
[[ -d "$bundle" ]] || continue
local bundle_name=$(basename "$bundle")
local mounted_path=$(hdiutil info 2> /dev/null | grep -A 5 "image-path.*$bundle_name" | grep "/Volumes/" | awk '{print $1}' | head -1 || echo "")
if [[ -n "$mounted_path" && -d "$mounted_path" ]]; then
while IFS= read -r inprogress_file; do
[[ -d "$inprogress_file" ]] || continue
local file_mtime=$(get_file_mtime "$inprogress_file")
local current_time
current_time=$(get_epoch_seconds)
local hours_old=$(((current_time - file_mtime) / 3600))
if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then
continue
fi
local size_kb=$(get_path_size_kb "$inprogress_file")
[[ "$size_kb" -le 0 ]] && continue
if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner
spinner_active=false
fi
local backup_name=$(basename "$inprogress_file")
local size_human=$(bytes_to_human "$((size_kb * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${YELLOW}($size_human dry)${NC}"
((tm_cleaned++))
note_activity
continue
fi
if ! command -v tmutil > /dev/null 2>&1; then
continue
fi
if tmutil delete "$inprogress_file" 2> /dev/null; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete APFS backup in $bundle_name: $backup_name ${GREEN}($size_human)${NC}"
((tm_cleaned++))
((files_cleaned++))
((total_size_cleaned += size_kb))
((total_items++))
note_activity
else
echo -e " ${YELLOW}!${NC} Could not delete from bundle: $backup_name"
fi
done < <(run_with_timeout 15 find "$mounted_path" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true)
fi
done
done
if [[ "$spinner_active" == "true" ]]; then
stop_section_spinner
fi
if [[ $tm_cleaned -eq 0 ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found"
fi
}
# Local APFS snapshots (keep the most recent).
clean_local_snapshots() {
if ! command -v tmutil > /dev/null 2>&1; then
return 0
fi
start_section_spinner "Checking local snapshots..."
local snapshot_list
snapshot_list=$(tmutil listlocalsnapshots / 2> /dev/null)
stop_section_spinner
[[ -z "$snapshot_list" ]] && return 0
local cleaned_count=0
local total_cleaned_size=0 # Estimation not possible without thin
local newest_ts=0
local newest_name=""
local -a snapshots=()
while IFS= read -r line; do
if [[ "$line" =~ com\.apple\.TimeMachine\.([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{6}) ]]; then
local snap_name="${BASH_REMATCH[0]}"
snapshots+=("$snap_name")
local date_str="${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]} ${BASH_REMATCH[4]:0:2}:${BASH_REMATCH[4]:2:2}:${BASH_REMATCH[4]:4:2}"
local snap_ts=$(date -j -f "%Y-%m-%d %H:%M:%S" "$date_str" "+%s" 2> /dev/null || echo "0")
[[ "$snap_ts" == "0" ]] && continue
if [[ "$snap_ts" -gt "$newest_ts" ]]; then
newest_ts="$snap_ts"
newest_name="$snap_name"
fi
fi
done <<< "$snapshot_list"
[[ ${#snapshots[@]} -eq 0 ]] && return 0
[[ -z "$newest_name" ]] && return 0
local deletable_count=$((${#snapshots[@]} - 1))
[[ $deletable_count -le 0 ]] && return 0
if [[ "$DRY_RUN" != "true" ]]; then
if [[ ! -t 0 ]]; then
echo -e " ${YELLOW}!${NC} ${#snapshots[@]} local snapshot(s) found, skipping non-interactive mode"
echo -e " ${YELLOW}${ICON_WARNING}${NC} ${GRAY}Tip: Snapshots may cause Disk Utility to show different 'Available' values${NC}"
return 0
fi
echo -e " ${YELLOW}!${NC} Time Machine local snapshots found"
echo -e " ${GRAY}macOS can recreate them if needed.${NC}"
echo -e " ${GRAY}The most recent snapshot will be kept.${NC}"
echo -ne " ${PURPLE}${ICON_ARROW}${NC} Remove all local snapshots except the most recent one? ${GREEN}Enter${NC} continue, ${GRAY}Space${NC} skip: "
local choice
if type read_key > /dev/null 2>&1; then
choice=$(read_key)
else
IFS= read -r -s -n 1 choice || choice=""
if [[ -z "$choice" || "$choice" == $'\n' || "$choice" == $'\r' ]]; then
choice="ENTER"
fi
fi
if [[ "$choice" == "ENTER" ]]; then
printf "\r\033[K" # Clear the prompt line
else
echo -e " ${GRAY}Skipped${NC}"
return 0
fi
fi
local snap_name
for snap_name in "${snapshots[@]}"; do
if [[ "$snap_name" =~ com\.apple\.TimeMachine\.([0-9]{4})-([0-9]{2})-([0-9]{2})-([0-9]{6}) ]]; then
if [[ "${BASH_REMATCH[0]}" != "$newest_name" ]]; then
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Local snapshot: $snap_name ${YELLOW}dry-run${NC}"
((cleaned_count++))
note_activity
else
if sudo tmutil deletelocalsnapshots "${BASH_REMATCH[1]}-${BASH_REMATCH[2]}-${BASH_REMATCH[3]}-${BASH_REMATCH[4]}" > /dev/null 2>&1; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed snapshot: $snap_name"
((cleaned_count++))
note_activity
else
echo -e " ${YELLOW}!${NC} Failed to remove: $snap_name"
fi
fi
fi
fi
done
if [[ $cleaned_count -gt 0 && "$DRY_RUN" != "true" ]]; then
log_success "Cleaned $cleaned_count local snapshots, kept latest"
fi
}

View File

@@ -1,695 +0,0 @@
#!/bin/bash
# User Data Cleanup Module
set -euo pipefail
clean_user_essentials() {
start_section_spinner "Scanning caches..."
safe_clean ~/Library/Caches/* "User app cache"
stop_section_spinner
start_section_spinner "Scanning empty items..."
clean_empty_library_items
stop_section_spinner
safe_clean ~/Library/Logs/* "User app logs"
if is_path_whitelisted "$HOME/.Trash"; then
note_activity
echo -e " ${GREEN}${ICON_EMPTY}${NC} Trash · whitelist protected"
else
safe_clean ~/.Trash/* "Trash"
fi
}
clean_empty_library_items() {
if [[ ! -d "$HOME/Library" ]]; then
return 0
fi
# 1. Clean top-level empty directories in Library
local -a empty_dirs=()
while IFS= read -r -d '' dir; do
[[ -d "$dir" ]] && empty_dirs+=("$dir")
done < <(find "$HOME/Library" -mindepth 1 -maxdepth 1 -type d -empty -print0 2> /dev/null)
if [[ ${#empty_dirs[@]} -gt 0 ]]; then
safe_clean "${empty_dirs[@]}" "Empty Library folders"
fi
# 2. Clean empty subdirectories in Application Support and other key locations
# Iteratively remove empty directories until no more are found
local -a key_locations=(
"$HOME/Library/Application Support"
"$HOME/Library/Caches"
)
for location in "${key_locations[@]}"; do
[[ -d "$location" ]] || continue
# Limit passes to keep cleanup fast; 3 iterations handle most nested scenarios.
local max_iterations=3
local iteration=0
while [[ $iteration -lt $max_iterations ]]; do
local -a nested_empty_dirs=()
# Find empty directories
while IFS= read -r -d '' dir; do
# Skip if whitelisted
if is_path_whitelisted "$dir"; then
continue
fi
# Skip protected system components
local dir_name=$(basename "$dir")
if is_critical_system_component "$dir_name"; then
continue
fi
[[ -d "$dir" ]] && nested_empty_dirs+=("$dir")
done < <(find "$location" -mindepth 1 -type d -empty -print0 2> /dev/null)
# If no empty dirs found, we're done with this location
if [[ ${#nested_empty_dirs[@]} -eq 0 ]]; then
break
fi
local location_name=$(basename "$location")
safe_clean "${nested_empty_dirs[@]}" "Empty $location_name subdirs"
((iteration++))
done
done
# Empty file cleanup is skipped to avoid removing app sentinel files.
}
# Remove old Google Chrome versions while keeping Current.
clean_chrome_old_versions() {
local -a app_paths=(
"/Applications/Google Chrome.app"
"$HOME/Applications/Google Chrome.app"
)
# Use -f to match Chrome Helper processes as well
if pgrep -f "Google Chrome" > /dev/null 2>&1; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Google Chrome running · old versions cleanup skipped"
return 0
fi
local cleaned_count=0
local total_size=0
local cleaned_any=false
for app_path in "${app_paths[@]}"; do
[[ -d "$app_path" ]] || continue
local versions_dir="$app_path/Contents/Frameworks/Google Chrome Framework.framework/Versions"
[[ -d "$versions_dir" ]] || continue
local current_link="$versions_dir/Current"
[[ -L "$current_link" ]] || continue
local current_version
current_version=$(readlink "$current_link" 2> /dev/null || true)
current_version="${current_version##*/}"
[[ -n "$current_version" ]] || continue
local -a old_versions=()
local dir name
for dir in "$versions_dir"/*; do
[[ -d "$dir" ]] || continue
name=$(basename "$dir")
[[ "$name" == "Current" ]] && continue
[[ "$name" == "$current_version" ]] && continue
if is_path_whitelisted "$dir"; then
continue
fi
old_versions+=("$dir")
done
if [[ ${#old_versions[@]} -eq 0 ]]; then
continue
fi
for dir in "${old_versions[@]}"; do
local size_kb
size_kb=$(get_path_size_kb "$dir" || echo 0)
size_kb="${size_kb:-0}"
total_size=$((total_size + size_kb))
((cleaned_count++))
cleaned_any=true
if [[ "$DRY_RUN" != "true" ]]; then
if has_sudo_session; then
safe_sudo_remove "$dir" > /dev/null 2>&1 || true
else
safe_remove "$dir" true > /dev/null 2>&1 || true
fi
fi
done
done
if [[ "$cleaned_any" == "true" ]]; then
local size_human
size_human=$(bytes_to_human "$((total_size * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Chrome old versions ${YELLOW}(${cleaned_count} dirs, $size_human dry)${NC}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Chrome old versions ${GREEN}(${cleaned_count} dirs, $size_human)${NC}"
fi
((files_cleaned += cleaned_count))
((total_size_cleaned += total_size))
((total_items++))
note_activity
fi
}
# Remove old Microsoft Edge versions while keeping Current.
clean_edge_old_versions() {
local -a app_paths=(
"/Applications/Microsoft Edge.app"
"$HOME/Applications/Microsoft Edge.app"
)
# Use -f to match Edge Helper processes as well
if pgrep -f "Microsoft Edge" > /dev/null 2>&1; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Microsoft Edge running · old versions cleanup skipped"
return 0
fi
local cleaned_count=0
local total_size=0
local cleaned_any=false
for app_path in "${app_paths[@]}"; do
[[ -d "$app_path" ]] || continue
local versions_dir="$app_path/Contents/Frameworks/Microsoft Edge Framework.framework/Versions"
[[ -d "$versions_dir" ]] || continue
local current_link="$versions_dir/Current"
[[ -L "$current_link" ]] || continue
local current_version
current_version=$(readlink "$current_link" 2> /dev/null || true)
current_version="${current_version##*/}"
[[ -n "$current_version" ]] || continue
local -a old_versions=()
local dir name
for dir in "$versions_dir"/*; do
[[ -d "$dir" ]] || continue
name=$(basename "$dir")
[[ "$name" == "Current" ]] && continue
[[ "$name" == "$current_version" ]] && continue
if is_path_whitelisted "$dir"; then
continue
fi
old_versions+=("$dir")
done
if [[ ${#old_versions[@]} -eq 0 ]]; then
continue
fi
for dir in "${old_versions[@]}"; do
local size_kb
size_kb=$(get_path_size_kb "$dir" || echo 0)
size_kb="${size_kb:-0}"
total_size=$((total_size + size_kb))
((cleaned_count++))
cleaned_any=true
if [[ "$DRY_RUN" != "true" ]]; then
if has_sudo_session; then
safe_sudo_remove "$dir" > /dev/null 2>&1 || true
else
safe_remove "$dir" true > /dev/null 2>&1 || true
fi
fi
done
done
if [[ "$cleaned_any" == "true" ]]; then
local size_human
size_human=$(bytes_to_human "$((total_size * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Edge old versions ${YELLOW}(${cleaned_count} dirs, $size_human dry)${NC}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Edge old versions ${GREEN}(${cleaned_count} dirs, $size_human)${NC}"
fi
((files_cleaned += cleaned_count))
((total_size_cleaned += total_size))
((total_items++))
note_activity
fi
}
# Remove old Microsoft EdgeUpdater versions while keeping latest.
clean_edge_updater_old_versions() {
local updater_dir="$HOME/Library/Application Support/Microsoft/EdgeUpdater/apps/msedge-stable"
[[ -d "$updater_dir" ]] || return 0
if pgrep -f "Microsoft Edge" > /dev/null 2>&1; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Microsoft Edge running · updater cleanup skipped"
return 0
fi
local -a version_dirs=()
local dir
for dir in "$updater_dir"/*; do
[[ -d "$dir" ]] || continue
version_dirs+=("$dir")
done
if [[ ${#version_dirs[@]} -lt 2 ]]; then
return 0
fi
local latest_version
latest_version=$(printf '%s\n' "${version_dirs[@]##*/}" | sort -V | tail -n 1)
[[ -n "$latest_version" ]] || return 0
local cleaned_count=0
local total_size=0
local cleaned_any=false
for dir in "${version_dirs[@]}"; do
local name
name=$(basename "$dir")
[[ "$name" == "$latest_version" ]] && continue
if is_path_whitelisted "$dir"; then
continue
fi
local size_kb
size_kb=$(get_path_size_kb "$dir" || echo 0)
size_kb="${size_kb:-0}"
total_size=$((total_size + size_kb))
((cleaned_count++))
cleaned_any=true
if [[ "$DRY_RUN" != "true" ]]; then
safe_remove "$dir" true > /dev/null 2>&1 || true
fi
done
if [[ "$cleaned_any" == "true" ]]; then
local size_human
size_human=$(bytes_to_human "$((total_size * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Edge updater old versions ${YELLOW}(${cleaned_count} dirs, $size_human dry)${NC}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Edge updater old versions ${GREEN}(${cleaned_count} dirs, $size_human)${NC}"
fi
((files_cleaned += cleaned_count))
((total_size_cleaned += total_size))
((total_items++))
note_activity
fi
}
scan_external_volumes() {
[[ -d "/Volumes" ]] || return 0
local -a candidate_volumes=()
local -a network_volumes=()
for volume in /Volumes/*; do
[[ -d "$volume" && -w "$volume" && ! -L "$volume" ]] || continue
[[ "$volume" == "/" || "$volume" == "/Volumes/Macintosh HD" ]] && continue
local protocol=""
protocol=$(run_with_timeout 1 command diskutil info "$volume" 2> /dev/null | grep -i "Protocol:" | awk '{print $2}' || echo "")
case "$protocol" in
SMB | NFS | AFP | CIFS | WebDAV)
network_volumes+=("$volume")
continue
;;
esac
local fs_type=""
fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "")
case "$fs_type" in
nfs | smbfs | afpfs | cifs | webdav)
network_volumes+=("$volume")
continue
;;
esac
candidate_volumes+=("$volume")
done
local volume_count=${#candidate_volumes[@]}
local network_count=${#network_volumes[@]}
if [[ $volume_count -eq 0 ]]; then
if [[ $network_count -gt 0 ]]; then
echo -e " ${GRAY}${ICON_LIST}${NC} External volumes (${network_count} network volume(s) skipped)"
note_activity
fi
return 0
fi
start_section_spinner "Scanning $volume_count external volume(s)..."
for volume in "${candidate_volumes[@]}"; do
[[ -d "$volume" && -r "$volume" ]] || continue
local volume_trash="$volume/.Trashes"
if [[ -d "$volume_trash" && "$DRY_RUN" != "true" ]] && ! is_path_whitelisted "$volume_trash"; then
while IFS= read -r -d '' item; do
safe_remove "$item" true || true
done < <(command find "$volume_trash" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true)
fi
if [[ "$PROTECT_FINDER_METADATA" != "true" ]]; then
clean_ds_store_tree "$volume" "$(basename "$volume") volume (.DS_Store)"
fi
done
stop_section_spinner
}
# Finder metadata (.DS_Store).
clean_finder_metadata() {
stop_section_spinner
if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then
note_activity
echo -e " ${GREEN}${ICON_EMPTY}${NC} Finder metadata · whitelist protected"
return
fi
clean_ds_store_tree "$HOME" "Home directory (.DS_Store)"
}
# macOS system caches and user-level leftovers.
clean_macos_system_caches() {
stop_section_spinner
# safe_clean already checks protected paths.
safe_clean ~/Library/Saved\ Application\ State/* "Saved application states" || true
safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache" || true
safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache" || true
safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache" || true
safe_clean ~/Library/DiagnosticReports/* "Diagnostic reports" || true
safe_clean ~/Library/Caches/com.apple.QuickLook.thumbnailcache "QuickLook thumbnails" || true
safe_clean ~/Library/Caches/Quick\ Look/* "QuickLook cache" || true
safe_clean ~/Library/Caches/com.apple.iconservices* "Icon services cache" || true
safe_clean ~/Downloads/*.download "Safari incomplete downloads" || true
safe_clean ~/Downloads/*.crdownload "Chrome incomplete downloads" || true
safe_clean ~/Downloads/*.part "Partial incomplete downloads" || true
safe_clean ~/Library/Autosave\ Information/* "Autosave information" || true
safe_clean ~/Library/IdentityCaches/* "Identity caches" || true
safe_clean ~/Library/Suggestions/* "Siri suggestions cache" || true
safe_clean ~/Library/Calendars/Calendar\ Cache "Calendar cache" || true
safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache" || true
}
clean_recent_items() {
stop_section_spinner
local shared_dir="$HOME/Library/Application Support/com.apple.sharedfilelist"
local -a recent_lists=(
"$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl2"
"$shared_dir/com.apple.LSSharedFileList.RecentDocuments.sfl2"
"$shared_dir/com.apple.LSSharedFileList.RecentServers.sfl2"
"$shared_dir/com.apple.LSSharedFileList.RecentHosts.sfl2"
"$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl"
"$shared_dir/com.apple.LSSharedFileList.RecentDocuments.sfl"
"$shared_dir/com.apple.LSSharedFileList.RecentServers.sfl"
"$shared_dir/com.apple.LSSharedFileList.RecentHosts.sfl"
)
if [[ -d "$shared_dir" ]]; then
for sfl_file in "${recent_lists[@]}"; do
[[ -e "$sfl_file" ]] && safe_clean "$sfl_file" "Recent items list" || true
done
fi
safe_clean ~/Library/Preferences/com.apple.recentitems.plist "Recent items preferences" || true
}
clean_mail_downloads() {
stop_section_spinner
local mail_age_days=${MOLE_MAIL_AGE_DAYS:-}
if ! [[ "$mail_age_days" =~ ^[0-9]+$ ]]; then
mail_age_days=30
fi
local -a mail_dirs=(
"$HOME/Library/Mail Downloads"
"$HOME/Library/Containers/com.apple.mail/Data/Library/Mail Downloads"
)
local count=0
local cleaned_kb=0
for target_path in "${mail_dirs[@]}"; do
if [[ -d "$target_path" ]]; then
local dir_size_kb=0
dir_size_kb=$(get_path_size_kb "$target_path")
if ! [[ "$dir_size_kb" =~ ^[0-9]+$ ]]; then
dir_size_kb=0
fi
local min_kb="${MOLE_MAIL_DOWNLOADS_MIN_KB:-}"
if ! [[ "$min_kb" =~ ^[0-9]+$ ]]; then
min_kb=5120
fi
if [[ "$dir_size_kb" -lt "$min_kb" ]]; then
continue
fi
while IFS= read -r -d '' file_path; do
if [[ -f "$file_path" ]]; then
local file_size_kb=$(get_path_size_kb "$file_path")
if safe_remove "$file_path" true; then
((count++))
((cleaned_kb += file_size_kb))
fi
fi
done < <(command find "$target_path" -type f -mtime +"$mail_age_days" -print0 2> /dev/null || true)
fi
done
if [[ $count -gt 0 ]]; then
local cleaned_mb=$(echo "$cleaned_kb" | awk '{printf "%.1f", $1/1024}' || echo "0.0")
echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $count mail attachments (~${cleaned_mb}MB)"
note_activity
fi
}
# Sandboxed app caches.
clean_sandboxed_app_caches() {
stop_section_spinner
safe_clean ~/Library/Containers/com.apple.wallpaper.agent/Data/Library/Caches/* "Wallpaper agent cache"
safe_clean ~/Library/Containers/com.apple.mediaanalysisd/Data/Library/Caches/* "Media analysis cache"
safe_clean ~/Library/Containers/com.apple.AppStore/Data/Library/Caches/* "App Store cache"
safe_clean ~/Library/Containers/com.apple.configurator.xpc.InternetService/Data/tmp/* "Apple Configurator temp files"
local containers_dir="$HOME/Library/Containers"
[[ ! -d "$containers_dir" ]] && return 0
start_section_spinner "Scanning sandboxed apps..."
local total_size=0
local cleaned_count=0
local found_any=false
# Use nullglob to avoid literal globs.
local _ng_state
_ng_state=$(shopt -p nullglob || true)
shopt -s nullglob
for container_dir in "$containers_dir"/*; do
process_container_cache "$container_dir"
done
eval "$_ng_state"
stop_section_spinner
if [[ "$found_any" == "true" ]]; then
local size_human=$(bytes_to_human "$((total_size * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Sandboxed app caches ${YELLOW}($size_human dry)${NC}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Sandboxed app caches ${GREEN}($size_human)${NC}"
fi
((files_cleaned += cleaned_count))
((total_size_cleaned += total_size))
((total_items++))
note_activity
fi
}
# Process a single container cache directory.
process_container_cache() {
local container_dir="$1"
[[ -d "$container_dir" ]] || return 0
local bundle_id=$(basename "$container_dir")
if is_critical_system_component "$bundle_id"; then
return 0
fi
if should_protect_data "$bundle_id" || should_protect_data "$(echo "$bundle_id" | LC_ALL=C tr '[:upper:]' '[:lower:]')"; then
return 0
fi
local cache_dir="$container_dir/Data/Library/Caches"
[[ -d "$cache_dir" ]] || return 0
# Fast non-empty check.
if find "$cache_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
local size=$(get_path_size_kb "$cache_dir")
((total_size += size))
found_any=true
((cleaned_count++))
if [[ "$DRY_RUN" != "true" ]]; then
# Clean contents safely with local nullglob.
local _ng_state
_ng_state=$(shopt -p nullglob || true)
shopt -s nullglob
for item in "$cache_dir"/*; do
[[ -e "$item" ]] || continue
safe_remove "$item" true || true
done
eval "$_ng_state"
fi
fi
}
# Browser caches (Safari/Chrome/Edge/Firefox).
clean_browsers() {
stop_section_spinner
safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache"
# Chrome/Chromium.
safe_clean ~/Library/Caches/Google/Chrome/* "Chrome cache"
safe_clean ~/Library/Application\ Support/Google/Chrome/*/Application\ Cache/* "Chrome app cache"
safe_clean ~/Library/Application\ Support/Google/Chrome/*/GPUCache/* "Chrome GPU cache"
safe_clean ~/Library/Caches/Chromium/* "Chromium cache"
safe_clean ~/Library/Caches/com.microsoft.edgemac/* "Edge cache"
safe_clean ~/Library/Caches/company.thebrowser.Browser/* "Arc cache"
safe_clean ~/Library/Caches/company.thebrowser.dia/* "Dia cache"
safe_clean ~/Library/Caches/BraveSoftware/Brave-Browser/* "Brave cache"
local firefox_running=false
if pgrep -x "Firefox" > /dev/null 2>&1; then
firefox_running=true
fi
if [[ "$firefox_running" == "true" ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Firefox is running · cache cleanup skipped"
else
safe_clean ~/Library/Caches/Firefox/* "Firefox cache"
fi
safe_clean ~/Library/Caches/com.operasoftware.Opera/* "Opera cache"
safe_clean ~/Library/Caches/com.vivaldi.Vivaldi/* "Vivaldi cache"
safe_clean ~/Library/Caches/Comet/* "Comet cache"
safe_clean ~/Library/Caches/com.kagi.kagimacOS/* "Orion cache"
safe_clean ~/Library/Caches/zen/* "Zen cache"
if [[ "$firefox_running" == "true" ]]; then
echo -e " ${YELLOW}${ICON_WARNING}${NC} Firefox is running · profile cache cleanup skipped"
else
safe_clean ~/Library/Application\ Support/Firefox/Profiles/*/cache2/* "Firefox profile cache"
fi
clean_chrome_old_versions
clean_edge_old_versions
clean_edge_updater_old_versions
}
# Cloud storage caches.
clean_cloud_storage() {
stop_section_spinner
safe_clean ~/Library/Caches/com.dropbox.* "Dropbox cache"
safe_clean ~/Library/Caches/com.getdropbox.dropbox "Dropbox cache"
safe_clean ~/Library/Caches/com.google.GoogleDrive "Google Drive cache"
safe_clean ~/Library/Caches/com.baidu.netdisk "Baidu Netdisk cache"
safe_clean ~/Library/Caches/com.alibaba.teambitiondisk "Alibaba Cloud cache"
safe_clean ~/Library/Caches/com.box.desktop "Box cache"
safe_clean ~/Library/Caches/com.microsoft.OneDrive "OneDrive cache"
}
# Office app caches.
clean_office_applications() {
stop_section_spinner
safe_clean ~/Library/Caches/com.microsoft.Word "Microsoft Word cache"
safe_clean ~/Library/Caches/com.microsoft.Excel "Microsoft Excel cache"
safe_clean ~/Library/Caches/com.microsoft.Powerpoint "Microsoft PowerPoint cache"
safe_clean ~/Library/Caches/com.microsoft.Outlook/* "Microsoft Outlook cache"
safe_clean ~/Library/Caches/com.apple.iWork.* "Apple iWork cache"
safe_clean ~/Library/Caches/com.kingsoft.wpsoffice.mac "WPS Office cache"
safe_clean ~/Library/Caches/org.mozilla.thunderbird/* "Thunderbird cache"
safe_clean ~/Library/Caches/com.apple.mail/* "Apple Mail cache"
}
# Virtualization caches.
clean_virtualization_tools() {
stop_section_spinner
safe_clean ~/Library/Caches/com.vmware.fusion "VMware Fusion cache"
safe_clean ~/Library/Caches/com.parallels.* "Parallels cache"
safe_clean ~/VirtualBox\ VMs/.cache "VirtualBox cache"
safe_clean ~/.vagrant.d/tmp/* "Vagrant temporary files"
}
# Application Support logs/caches.
clean_application_support_logs() {
stop_section_spinner
if [[ ! -d "$HOME/Library/Application Support" ]] || ! ls "$HOME/Library/Application Support" > /dev/null 2>&1; then
note_activity
echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped: No permission to access Application Support"
return 0
fi
start_section_spinner "Scanning Application Support..."
local total_size=0
local cleaned_count=0
local found_any=false
# Enable nullglob for safe globbing.
local _ng_state
_ng_state=$(shopt -p nullglob || true)
shopt -s nullglob
for app_dir in ~/Library/Application\ Support/*; do
[[ -d "$app_dir" ]] || continue
local app_name=$(basename "$app_dir")
local app_name_lower=$(echo "$app_name" | LC_ALL=C tr '[:upper:]' '[:lower:]')
local is_protected=false
if should_protect_data "$app_name"; then
is_protected=true
elif should_protect_data "$app_name_lower"; then
is_protected=true
fi
if [[ "$is_protected" == "true" ]]; then
continue
fi
if is_critical_system_component "$app_name"; then
continue
fi
local -a start_candidates=("$app_dir/log" "$app_dir/logs" "$app_dir/activitylog" "$app_dir/Cache/Cache_Data" "$app_dir/Crashpad/completed")
for candidate in "${start_candidates[@]}"; do
if [[ -d "$candidate" ]]; then
if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
local size=$(get_path_size_kb "$candidate")
((total_size += size))
((cleaned_count++))
found_any=true
if [[ "$DRY_RUN" != "true" ]]; then
for item in "$candidate"/*; do
[[ -e "$item" ]] || continue
safe_remove "$item" true > /dev/null 2>&1 || true
done
fi
fi
fi
done
done
# Group Containers logs (explicit allowlist).
local known_group_containers=(
"group.com.apple.contentdelivery"
)
for container in "${known_group_containers[@]}"; do
local container_path="$HOME/Library/Group Containers/$container"
local -a gc_candidates=("$container_path/Logs" "$container_path/Library/Logs")
for candidate in "${gc_candidates[@]}"; do
if [[ -d "$candidate" ]]; then
if find "$candidate" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then
local size=$(get_path_size_kb "$candidate")
((total_size += size))
((cleaned_count++))
found_any=true
if [[ "$DRY_RUN" != "true" ]]; then
for item in "$candidate"/*; do
[[ -e "$item" ]] || continue
safe_remove "$item" true > /dev/null 2>&1 || true
done
fi
fi
fi
done
done
eval "$_ng_state"
stop_section_spinner
if [[ "$found_any" == "true" ]]; then
local size_human=$(bytes_to_human "$((total_size * 1024))")
if [[ "$DRY_RUN" == "true" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Application Support logs/caches ${YELLOW}($size_human dry)${NC}"
else
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Application Support logs/caches ${GREEN}($size_human)${NC}"
fi
((files_cleaned += cleaned_count))
((total_size_cleaned += total_size))
((total_items++))
note_activity
fi
}
# iOS device backup info.
check_ios_device_backups() {
local backup_dir="$HOME/Library/Application Support/MobileSync/Backup"
# Simplified check without find to avoid hanging.
if [[ -d "$backup_dir" ]]; then
local backup_kb=$(get_path_size_kb "$backup_dir")
if [[ -n "${backup_kb:-}" && "$backup_kb" -gt 102400 ]]; then
local backup_human=$(command du -sh "$backup_dir" 2> /dev/null | awk '{print $1}')
if [[ -n "$backup_human" ]]; then
note_activity
echo -e " Found ${GREEN}${backup_human}${NC} iOS backups"
echo -e " You can delete them manually: ${backup_dir}"
fi
fi
fi
return 0
}
# Apple Silicon specific caches (IS_M_SERIES).
clean_apple_silicon_caches() {
if [[ "${IS_M_SERIES:-false}" != "true" ]]; then
return 0
fi
start_section "Apple Silicon updates"
safe_clean /Library/Apple/usr/share/rosetta/rosetta_update_bundle "Rosetta 2 cache"
safe_clean ~/Library/Caches/com.apple.rosetta.update "Rosetta 2 user cache"
safe_clean ~/Library/Caches/com.apple.amp.mediasevicesd "Apple Silicon media service cache"
end_section
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,864 +0,0 @@
#!/bin/bash
# Mole - Base Definitions and Utilities
# Core definitions, constants, and basic utility functions used by all modules
set -euo pipefail
# Prevent multiple sourcing
if [[ -n "${MOLE_BASE_LOADED:-}" ]]; then
return 0
fi
readonly MOLE_BASE_LOADED=1
# ============================================================================
# Color Definitions
# ============================================================================
readonly ESC=$'\033'
readonly GREEN="${ESC}[0;32m"
readonly BLUE="${ESC}[0;34m"
readonly CYAN="${ESC}[0;36m"
readonly YELLOW="${ESC}[0;33m"
readonly PURPLE="${ESC}[0;35m"
readonly PURPLE_BOLD="${ESC}[1;35m"
readonly RED="${ESC}[0;31m"
readonly GRAY="${ESC}[0;90m"
readonly NC="${ESC}[0m"
# ============================================================================
# Icon Definitions
# ============================================================================
readonly ICON_CONFIRM="◎"
readonly ICON_ADMIN="⚙"
readonly ICON_SUCCESS="✓"
readonly ICON_ERROR="☻"
readonly ICON_WARNING="●"
readonly ICON_EMPTY="○"
readonly ICON_SOLID="●"
readonly ICON_LIST="•"
readonly ICON_ARROW="➤"
readonly ICON_DRY_RUN="→"
readonly ICON_NAV_UP="↑"
readonly ICON_NAV_DOWN="↓"
# ============================================================================
# Global Configuration Constants
# ============================================================================
readonly MOLE_TEMP_FILE_AGE_DAYS=7 # Temp file retention (days)
readonly MOLE_ORPHAN_AGE_DAYS=60 # Orphaned data retention (days)
readonly MOLE_MAX_PARALLEL_JOBS=15 # Parallel job limit
readonly MOLE_MAIL_DOWNLOADS_MIN_KB=5120 # Mail attachment size threshold
readonly MOLE_MAIL_AGE_DAYS=30 # Mail attachment retention (days)
readonly MOLE_LOG_AGE_DAYS=7 # Log retention (days)
readonly MOLE_CRASH_REPORT_AGE_DAYS=7 # Crash report retention (days)
readonly MOLE_SAVED_STATE_AGE_DAYS=30 # Saved state retention (days) - increased for safety
readonly MOLE_TM_BACKUP_SAFE_HOURS=48 # TM backup safety window (hours)
readonly MOLE_MAX_DS_STORE_FILES=500 # Max .DS_Store files to clean per scan
readonly MOLE_MAX_ORPHAN_ITERATIONS=100 # Max iterations for orphaned app data scan
# ============================================================================
# Whitelist Configuration
# ============================================================================
readonly FINDER_METADATA_SENTINEL="FINDER_METADATA"
declare -a DEFAULT_WHITELIST_PATTERNS=(
"$HOME/Library/Caches/ms-playwright*"
"$HOME/.cache/huggingface*"
"$HOME/.m2/repository/*"
"$HOME/.ollama/models/*"
"$HOME/Library/Caches/com.nssurge.surge-mac/*"
"$HOME/Library/Application Support/com.nssurge.surge-mac/*"
"$HOME/Library/Caches/org.R-project.R/R/renv/*"
"$HOME/Library/Caches/pypoetry/virtualenvs*"
"$HOME/Library/Caches/JetBrains*"
"$HOME/Library/Caches/com.jetbrains.toolbox*"
"$HOME/Library/Application Support/JetBrains*"
"$HOME/Library/Caches/com.apple.finder"
"$HOME/Library/Mobile Documents*"
# System-critical caches that affect macOS functionality and stability
# CRITICAL: Removing these will cause system search and UI issues
"$HOME/Library/Caches/com.apple.FontRegistry*"
"$HOME/Library/Caches/com.apple.spotlight*"
"$HOME/Library/Caches/com.apple.Spotlight*"
"$HOME/Library/Caches/CloudKit*"
"$FINDER_METADATA_SENTINEL"
)
declare -a DEFAULT_OPTIMIZE_WHITELIST_PATTERNS=(
"check_brew_health"
"check_touchid"
"check_git_config"
)
# ============================================================================
# BSD Stat Compatibility
# ============================================================================
readonly STAT_BSD="/usr/bin/stat"
# Get file size in bytes
get_file_size() {
local file="$1"
local result
result=$($STAT_BSD -f%z "$file" 2> /dev/null)
echo "${result:-0}"
}
# Get file modification time in epoch seconds
get_file_mtime() {
local file="$1"
[[ -z "$file" ]] && {
echo "0"
return
}
local result
result=$($STAT_BSD -f%m "$file" 2> /dev/null || echo "")
if [[ "$result" =~ ^[0-9]+$ ]]; then
echo "$result"
else
echo "0"
fi
}
# Determine date command once
if [[ -x /bin/date ]]; then
_DATE_CMD="/bin/date"
else
_DATE_CMD="date"
fi
# Get current time in epoch seconds (defensive against locale/aliases)
get_epoch_seconds() {
local result
result=$($_DATE_CMD +%s 2> /dev/null || echo "")
if [[ "$result" =~ ^[0-9]+$ ]]; then
echo "$result"
else
echo "0"
fi
}
# Get file owner username
get_file_owner() {
local file="$1"
$STAT_BSD -f%Su "$file" 2> /dev/null || echo ""
}
# ============================================================================
# System Utilities
# ============================================================================
# Check if System Integrity Protection is enabled
# Returns: 0 if SIP is enabled, 1 if disabled or cannot determine
is_sip_enabled() {
if ! command -v csrutil > /dev/null 2>&1; then
return 0
fi
local sip_status
sip_status=$(csrutil status 2> /dev/null || echo "")
if echo "$sip_status" | grep -qi "enabled"; then
return 0
else
return 1
fi
}
# Check if running in an interactive terminal
is_interactive() {
[[ -t 1 ]]
}
# Detect CPU architecture
# Returns: "Apple Silicon" or "Intel"
detect_architecture() {
if [[ "$(uname -m)" == "arm64" ]]; then
echo "Apple Silicon"
else
echo "Intel"
fi
}
# Get free disk space on root volume
# Returns: human-readable string (e.g., "100G")
get_free_space() {
local target="/"
if [[ -d "/System/Volumes/Data" ]]; then
target="/System/Volumes/Data"
fi
df -h "$target" | awk 'NR==2 {print $4}'
}
# Get Darwin kernel major version (e.g., 24 for 24.2.0)
# Returns 999 on failure to adopt conservative behavior (assume modern system)
get_darwin_major() {
local kernel
kernel=$(uname -r 2> /dev/null || true)
local major="${kernel%%.*}"
if [[ ! "$major" =~ ^[0-9]+$ ]]; then
# Return high number to skip potentially dangerous operations on unknown systems
major=999
fi
echo "$major"
}
# Check if Darwin kernel major version meets minimum
is_darwin_ge() {
local minimum="$1"
local major
major=$(get_darwin_major)
[[ "$major" -ge "$minimum" ]]
}
# Get optimal parallel jobs for operation type (scan|io|compute|default)
get_optimal_parallel_jobs() {
local operation_type="${1:-default}"
local cpu_cores
cpu_cores=$(sysctl -n hw.ncpu 2> /dev/null || echo 4)
case "$operation_type" in
scan | io)
echo $((cpu_cores * 2))
;;
compute)
echo "$cpu_cores"
;;
*)
echo $((cpu_cores + 2))
;;
esac
}
# ============================================================================
# User Context Utilities
# ============================================================================
is_root_user() {
[[ "$(id -u)" == "0" ]]
}
get_user_home() {
local user="$1"
local home=""
if [[ -z "$user" ]]; then
echo ""
return 0
fi
if command -v dscl > /dev/null 2>&1; then
home=$(dscl . -read "/Users/$user" NFSHomeDirectory 2> /dev/null | awk '{print $2}' | head -1 || true)
fi
if [[ -z "$home" ]]; then
home=$(eval echo "~$user" 2> /dev/null || true)
fi
if [[ "$home" == "~"* ]]; then
home=""
fi
echo "$home"
}
get_invoking_user() {
if [[ -n "${SUDO_USER:-}" && "${SUDO_USER:-}" != "root" ]]; then
echo "$SUDO_USER"
return 0
fi
echo "${USER:-}"
}
get_invoking_uid() {
if [[ -n "${SUDO_UID:-}" ]]; then
echo "$SUDO_UID"
return 0
fi
local uid
uid=$(id -u 2> /dev/null || true)
echo "$uid"
}
get_invoking_gid() {
if [[ -n "${SUDO_GID:-}" ]]; then
echo "$SUDO_GID"
return 0
fi
local gid
gid=$(id -g 2> /dev/null || true)
echo "$gid"
}
get_invoking_home() {
if [[ -n "${SUDO_USER:-}" && "${SUDO_USER:-}" != "root" ]]; then
get_user_home "$SUDO_USER"
return 0
fi
echo "${HOME:-}"
}
ensure_user_dir() {
local raw_path="$1"
if [[ -z "$raw_path" ]]; then
return 0
fi
local target_path="$raw_path"
if [[ "$target_path" == "~"* ]]; then
target_path="${target_path/#\~/$HOME}"
fi
mkdir -p "$target_path" 2> /dev/null || true
if ! is_root_user; then
return 0
fi
local sudo_user="${SUDO_USER:-}"
if [[ -z "$sudo_user" || "$sudo_user" == "root" ]]; then
return 0
fi
local user_home
user_home=$(get_user_home "$sudo_user")
if [[ -z "$user_home" ]]; then
return 0
fi
user_home="${user_home%/}"
if [[ "$target_path" != "$user_home" && "$target_path" != "$user_home/"* ]]; then
return 0
fi
local owner_uid="${SUDO_UID:-}"
local owner_gid="${SUDO_GID:-}"
if [[ -z "$owner_uid" || -z "$owner_gid" ]]; then
owner_uid=$(id -u "$sudo_user" 2> /dev/null || true)
owner_gid=$(id -g "$sudo_user" 2> /dev/null || true)
fi
if [[ -z "$owner_uid" || -z "$owner_gid" ]]; then
return 0
fi
local dir="$target_path"
while [[ -n "$dir" && "$dir" != "/" ]]; do
# Early stop: if ownership is already correct, no need to continue up the tree
if [[ -d "$dir" ]]; then
local current_uid
current_uid=$("$STAT_BSD" -f%u "$dir" 2> /dev/null || echo "")
if [[ "$current_uid" == "$owner_uid" ]]; then
break
fi
fi
chown "$owner_uid:$owner_gid" "$dir" 2> /dev/null || true
if [[ "$dir" == "$user_home" ]]; then
break
fi
dir=$(dirname "$dir")
if [[ "$dir" == "." ]]; then
break
fi
done
}
ensure_user_file() {
local raw_path="$1"
if [[ -z "$raw_path" ]]; then
return 0
fi
local target_path="$raw_path"
if [[ "$target_path" == "~"* ]]; then
target_path="${target_path/#\~/$HOME}"
fi
ensure_user_dir "$(dirname "$target_path")"
touch "$target_path" 2> /dev/null || true
if ! is_root_user; then
return 0
fi
local sudo_user="${SUDO_USER:-}"
if [[ -z "$sudo_user" || "$sudo_user" == "root" ]]; then
return 0
fi
local user_home
user_home=$(get_user_home "$sudo_user")
if [[ -z "$user_home" ]]; then
return 0
fi
user_home="${user_home%/}"
if [[ "$target_path" != "$user_home" && "$target_path" != "$user_home/"* ]]; then
return 0
fi
local owner_uid="${SUDO_UID:-}"
local owner_gid="${SUDO_GID:-}"
if [[ -z "$owner_uid" || -z "$owner_gid" ]]; then
owner_uid=$(id -u "$sudo_user" 2> /dev/null || true)
owner_gid=$(id -g "$sudo_user" 2> /dev/null || true)
fi
if [[ -n "$owner_uid" && -n "$owner_gid" ]]; then
chown "$owner_uid:$owner_gid" "$target_path" 2> /dev/null || true
fi
}
# ============================================================================
# Formatting Utilities
# ============================================================================
# Convert bytes to human-readable format (e.g., 1.5GB)
bytes_to_human() {
local bytes="$1"
[[ "$bytes" =~ ^[0-9]+$ ]] || {
echo "0B"
return 1
}
# GB: >= 1073741824 bytes
if ((bytes >= 1073741824)); then
printf "%d.%02dGB\n" $((bytes / 1073741824)) $(((bytes % 1073741824) * 100 / 1073741824))
# MB: >= 1048576 bytes
elif ((bytes >= 1048576)); then
printf "%d.%01dMB\n" $((bytes / 1048576)) $(((bytes % 1048576) * 10 / 1048576))
# KB: >= 1024 bytes (round up)
elif ((bytes >= 1024)); then
printf "%dKB\n" $(((bytes + 512) / 1024))
else
printf "%dB\n" "$bytes"
fi
}
# Convert kilobytes to human-readable format
# Args: $1 - size in KB
# Returns: formatted string
bytes_to_human_kb() {
bytes_to_human "$((${1:-0} * 1024))"
}
# Get brand-friendly localized name for an application
get_brand_name() {
local name="$1"
# Detect if system primary language is Chinese (Cached)
if [[ -z "${MOLE_IS_CHINESE_SYSTEM:-}" ]]; then
local sys_lang
sys_lang=$(defaults read -g AppleLanguages 2> /dev/null | grep -o 'zh-Hans\|zh-Hant\|zh' | head -1 || echo "")
if [[ -n "$sys_lang" ]]; then
export MOLE_IS_CHINESE_SYSTEM="true"
else
export MOLE_IS_CHINESE_SYSTEM="false"
fi
fi
local is_chinese="${MOLE_IS_CHINESE_SYSTEM}"
# Return localized names based on system language
if [[ "$is_chinese" == true ]]; then
# Chinese system - prefer Chinese names
case "$name" in
"qiyimac" | "iQiyi") echo "爱奇艺" ;;
"wechat" | "WeChat") echo "微信" ;;
"QQ") echo "QQ" ;;
"VooV Meeting") echo "腾讯会议" ;;
"dingtalk" | "DingTalk") echo "钉钉" ;;
"NeteaseMusic" | "NetEase Music") echo "网易云音乐" ;;
"BaiduNetdisk" | "Baidu NetDisk") echo "百度网盘" ;;
"alipay" | "Alipay") echo "支付宝" ;;
"taobao" | "Taobao") echo "淘宝" ;;
"futunn" | "Futu NiuNiu") echo "富途牛牛" ;;
"tencent lemon" | "Tencent Lemon Cleaner" | "Tencent Lemon") echo "腾讯柠檬清理" ;;
*) echo "$name" ;;
esac
else
# Non-Chinese system - use English names
case "$name" in
"qiyimac" | "爱奇艺") echo "iQiyi" ;;
"wechat" | "微信") echo "WeChat" ;;
"QQ") echo "QQ" ;;
"腾讯会议") echo "VooV Meeting" ;;
"dingtalk" | "钉钉") echo "DingTalk" ;;
"网易云音乐") echo "NetEase Music" ;;
"百度网盘") echo "Baidu NetDisk" ;;
"alipay" | "支付宝") echo "Alipay" ;;
"taobao" | "淘宝") echo "Taobao" ;;
"富途牛牛") echo "Futu NiuNiu" ;;
"腾讯柠檬清理" | "Tencent Lemon Cleaner") echo "Tencent Lemon" ;;
"keynote" | "Keynote") echo "Keynote" ;;
"pages" | "Pages") echo "Pages" ;;
"numbers" | "Numbers") echo "Numbers" ;;
*) echo "$name" ;;
esac
fi
}
# ============================================================================
# Temporary File Management
# ============================================================================
# Tracked temporary files and directories
declare -a MOLE_TEMP_FILES=()
declare -a MOLE_TEMP_DIRS=()
# Create tracked temporary file
create_temp_file() {
local temp
temp=$(mktemp) || return 1
MOLE_TEMP_FILES+=("$temp")
echo "$temp"
}
# Create tracked temporary directory
create_temp_dir() {
local temp
temp=$(mktemp -d) || return 1
MOLE_TEMP_DIRS+=("$temp")
echo "$temp"
}
# Register existing file for cleanup
register_temp_file() {
MOLE_TEMP_FILES+=("$1")
}
# Register existing directory for cleanup
register_temp_dir() {
MOLE_TEMP_DIRS+=("$1")
}
# Create temp file with prefix (for analyze.sh compatibility)
# Compatible with both BSD mktemp (macOS default) and GNU mktemp (coreutils)
mktemp_file() {
local prefix="${1:-mole}"
# Use TMPDIR if set, otherwise /tmp
# Add .XXXXXX suffix to work with both BSD and GNU mktemp
mktemp "${TMPDIR:-/tmp}/${prefix}.XXXXXX"
}
# Cleanup all tracked temp files and directories
cleanup_temp_files() {
stop_inline_spinner 2> /dev/null || true
local file
if [[ ${#MOLE_TEMP_FILES[@]} -gt 0 ]]; then
for file in "${MOLE_TEMP_FILES[@]}"; do
[[ -f "$file" ]] && rm -f "$file" 2> /dev/null || true
done
fi
if [[ ${#MOLE_TEMP_DIRS[@]} -gt 0 ]]; then
for file in "${MOLE_TEMP_DIRS[@]}"; do
[[ -d "$file" ]] && rm -rf "$file" 2> /dev/null || true # SAFE: cleanup_temp_files
done
fi
MOLE_TEMP_FILES=()
MOLE_TEMP_DIRS=()
}
# ============================================================================
# Section Tracking (for progress indication)
# ============================================================================
# Global section tracking variables
TRACK_SECTION=0
SECTION_ACTIVITY=0
# Start a new section
# Args: $1 - section title
start_section() {
TRACK_SECTION=1
SECTION_ACTIVITY=0
echo ""
echo -e "${PURPLE_BOLD}${ICON_ARROW} $1${NC}"
}
# End a section
# Shows "Nothing to tidy" if no activity was recorded
end_section() {
if [[ "${TRACK_SECTION:-0}" == "1" && "${SECTION_ACTIVITY:-0}" == "0" ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} Nothing to tidy"
fi
TRACK_SECTION=0
}
# Mark activity in current section
note_activity() {
if [[ "${TRACK_SECTION:-0}" == "1" ]]; then
SECTION_ACTIVITY=1
fi
}
# Start a section spinner with optional message
# Usage: start_section_spinner "message"
start_section_spinner() {
local message="${1:-Scanning...}"
stop_inline_spinner 2> /dev/null || true
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "$message"
fi
}
# Stop spinner and clear the line
# Usage: stop_section_spinner
stop_section_spinner() {
stop_inline_spinner 2> /dev/null || true
if [[ -t 1 ]]; then
echo -ne "\r\033[K" >&2 || true
fi
}
# Safe terminal line clearing with terminal type detection
# Usage: safe_clear_lines <num_lines> [tty_device]
# Returns: 0 on success, 1 if terminal doesn't support ANSI
safe_clear_lines() {
local lines="${1:-1}"
local tty_device="${2:-/dev/tty}"
# Use centralized ANSI support check (defined below)
# Note: This forward reference works because functions are parsed before execution
is_ansi_supported 2> /dev/null || return 1
# Clear lines one by one (more reliable than multi-line sequences)
local i
for ((i = 0; i < lines; i++)); do
printf "\033[1A\r\033[K" > "$tty_device" 2> /dev/null || return 1
done
return 0
}
# Safe single line clear with fallback
# Usage: safe_clear_line [tty_device]
safe_clear_line() {
local tty_device="${1:-/dev/tty}"
# Use centralized ANSI support check
is_ansi_supported 2> /dev/null || return 1
printf "\r\033[K" > "$tty_device" 2> /dev/null || return 1
return 0
}
# Update progress spinner if enough time has elapsed
# Usage: update_progress_if_needed <completed> <total> <last_update_time_var> [interval]
# Example: update_progress_if_needed "$completed" "$total" last_progress_update 2
# Returns: 0 if updated, 1 if skipped
update_progress_if_needed() {
local completed="$1"
local total="$2"
local last_update_var="$3" # Name of variable holding last update time
local interval="${4:-2}" # Default: update every 2 seconds
# Get current time
local current_time
current_time=$(get_epoch_seconds)
# Get last update time from variable
local last_time
eval "last_time=\${$last_update_var:-0}"
[[ "$last_time" =~ ^[0-9]+$ ]] || last_time=0
# Check if enough time has elapsed
if [[ $((current_time - last_time)) -ge $interval ]]; then
# Update the spinner with progress
stop_section_spinner
start_section_spinner "Scanning items... ($completed/$total)"
# Update the last_update_time variable
eval "$last_update_var=$current_time"
return 0
fi
return 1
}
# ============================================================================
# Spinner Stack Management (prevents nesting issues)
# ============================================================================
# Global spinner stack
declare -a MOLE_SPINNER_STACK=()
# Push current spinner state onto stack
# Usage: push_spinner_state
push_spinner_state() {
local current_state=""
# Save current spinner PID if running
if [[ -n "${MOLE_SPINNER_PID:-}" ]] && kill -0 "$MOLE_SPINNER_PID" 2> /dev/null; then
current_state="running:$MOLE_SPINNER_PID"
else
current_state="stopped"
fi
MOLE_SPINNER_STACK+=("$current_state")
debug_log "Pushed spinner state: $current_state (stack depth: ${#MOLE_SPINNER_STACK[@]})"
}
# Pop and restore spinner state from stack
# Usage: pop_spinner_state
pop_spinner_state() {
if [[ ${#MOLE_SPINNER_STACK[@]} -eq 0 ]]; then
debug_log "Warning: Attempted to pop from empty spinner stack"
return 1
fi
# Stack depth safety check
if [[ ${#MOLE_SPINNER_STACK[@]} -gt 10 ]]; then
debug_log "Warning: Spinner stack depth excessive (${#MOLE_SPINNER_STACK[@]}), possible leak"
fi
local last_idx=$((${#MOLE_SPINNER_STACK[@]} - 1))
local state="${MOLE_SPINNER_STACK[$last_idx]}"
# Remove from stack (Bash 3.2 compatible way)
# Instead of unset, rebuild array without last element
local -a new_stack=()
local i
for ((i = 0; i < last_idx; i++)); do
new_stack+=("${MOLE_SPINNER_STACK[$i]}")
done
MOLE_SPINNER_STACK=("${new_stack[@]}")
debug_log "Popped spinner state: $state (remaining depth: ${#MOLE_SPINNER_STACK[@]})"
# Restore state if needed
if [[ "$state" == running:* ]]; then
# Previous spinner was running - we don't restart it automatically
# This is intentional to avoid UI conflicts
:
fi
return 0
}
# Safe spinner start with stack management
# Usage: safe_start_spinner <message>
safe_start_spinner() {
local message="${1:-Working...}"
# Push current state
push_spinner_state
# Stop any existing spinner
stop_section_spinner 2> /dev/null || true
# Start new spinner
start_section_spinner "$message"
}
# Safe spinner stop with stack management
# Usage: safe_stop_spinner
safe_stop_spinner() {
# Stop current spinner
stop_section_spinner 2> /dev/null || true
# Pop previous state
pop_spinner_state || true
}
# ============================================================================
# Terminal Compatibility Checks
# ============================================================================
# Check if terminal supports ANSI escape codes
# Usage: is_ansi_supported
# Returns: 0 if supported, 1 if not
is_ansi_supported() {
# Check if running in interactive terminal
[[ -t 1 ]] || return 1
# Check TERM variable
[[ -n "${TERM:-}" ]] || return 1
# Check for known ANSI-compatible terminals
case "$TERM" in
xterm* | vt100 | vt220 | screen* | tmux* | ansi | linux | rxvt* | konsole*)
return 0
;;
dumb | unknown)
return 1
;;
*)
# Check terminfo database if available
if command -v tput > /dev/null 2>&1; then
# Test if terminal supports colors (good proxy for ANSI support)
local colors=$(tput colors 2> /dev/null || echo "0")
[[ "$colors" -ge 8 ]] && return 0
fi
return 1
;;
esac
}
# Get terminal capability info
# Usage: get_terminal_info
get_terminal_info() {
local info="Terminal: ${TERM:-unknown}"
if is_ansi_supported; then
info+=" (ANSI supported)"
if command -v tput > /dev/null 2>&1; then
local cols=$(tput cols 2> /dev/null || echo "?")
local lines=$(tput lines 2> /dev/null || echo "?")
local colors=$(tput colors 2> /dev/null || echo "?")
info+=" ${cols}x${lines}, ${colors} colors"
fi
else
info+=" (ANSI not supported)"
fi
echo "$info"
}
# Validate terminal environment before running
# Usage: validate_terminal_environment
# Returns: 0 if OK, 1 with warning if issues detected
validate_terminal_environment() {
local warnings=0
# Check if TERM is set
if [[ -z "${TERM:-}" ]]; then
log_warning "TERM environment variable not set"
((warnings++))
fi
# Check if running in a known problematic terminal
case "${TERM:-}" in
dumb)
log_warning "Running in 'dumb' terminal - limited functionality"
((warnings++))
;;
unknown)
log_warning "Terminal type unknown - may have display issues"
((warnings++))
;;
esac
# Check terminal size if available
if command -v tput > /dev/null 2>&1; then
local cols=$(tput cols 2> /dev/null || echo "80")
if [[ "$cols" -lt 60 ]]; then
log_warning "Terminal width ($cols cols) is narrow - output may wrap"
((warnings++))
fi
fi
# Report compatibility
if [[ $warnings -eq 0 ]]; then
debug_log "Terminal environment validated: $(get_terminal_info)"
return 0
else
debug_log "Terminal compatibility warnings: $warnings"
return 1
fi
}

View File

@@ -1,18 +0,0 @@
#!/bin/bash
# Shared command list for help text and completions.
MOLE_COMMANDS=(
"clean:Free up disk space"
"uninstall:Remove apps completely"
"optimize:Check and maintain system"
"analyze:Explore disk usage"
"status:Monitor system health"
"purge:Remove old project artifacts"
"installer:Find and remove installer files"
"touchid:Configure Touch ID for sudo"
"completion:Setup shell tab completion"
"update:Update to latest version"
"remove:Remove Mole from system"
"help:Show help"
"version:Show version"
)

View File

@@ -1,188 +0,0 @@
#!/bin/bash
# Mole - Common Functions Library
# Main entry point that loads all core modules
set -euo pipefail
# Prevent multiple sourcing
if [[ -n "${MOLE_COMMON_LOADED:-}" ]]; then
return 0
fi
readonly MOLE_COMMON_LOADED=1
_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Load core modules
source "$_MOLE_CORE_DIR/base.sh"
source "$_MOLE_CORE_DIR/log.sh"
source "$_MOLE_CORE_DIR/timeout.sh"
source "$_MOLE_CORE_DIR/file_ops.sh"
source "$_MOLE_CORE_DIR/ui.sh"
source "$_MOLE_CORE_DIR/app_protection.sh"
# Load sudo management if available
if [[ -f "$_MOLE_CORE_DIR/sudo.sh" ]]; then
source "$_MOLE_CORE_DIR/sudo.sh"
fi
# Update via Homebrew
update_via_homebrew() {
local current_version="$1"
local temp_update temp_upgrade
temp_update=$(mktemp_file "brew_update")
temp_upgrade=$(mktemp_file "brew_upgrade")
# Set up trap for interruption (Ctrl+C) with inline cleanup
trap 'stop_inline_spinner 2>/dev/null; rm -f "$temp_update" "$temp_upgrade" 2>/dev/null; echo ""; exit 130' INT TERM
# Update Homebrew
if [[ -t 1 ]]; then
start_inline_spinner "Updating Homebrew..."
else
echo "Updating Homebrew..."
fi
brew update > "$temp_update" 2>&1 &
local update_pid=$!
wait $update_pid 2> /dev/null || true # Continue even if brew update fails
if [[ -t 1 ]]; then
stop_inline_spinner
fi
# Upgrade Mole
if [[ -t 1 ]]; then
start_inline_spinner "Upgrading Mole..."
else
echo "Upgrading Mole..."
fi
brew upgrade mole > "$temp_upgrade" 2>&1 &
local upgrade_pid=$!
wait $upgrade_pid 2> /dev/null || true # Continue even if brew upgrade fails
local upgrade_output
upgrade_output=$(cat "$temp_upgrade")
if [[ -t 1 ]]; then
stop_inline_spinner
fi
# Clear trap
trap - INT TERM
# Cleanup temp files
rm -f "$temp_update" "$temp_upgrade"
if echo "$upgrade_output" | grep -q "already installed"; then
local installed_version
installed_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}')
echo ""
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version (${installed_version:-$current_version})"
echo ""
elif echo "$upgrade_output" | grep -q "Error:"; then
log_error "Homebrew upgrade failed"
echo "$upgrade_output" | grep "Error:" >&2
return 1
else
echo "$upgrade_output" | grep -Ev "^(==>|Updating Homebrew|Warning:)" || true
local new_version
new_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}')
echo ""
echo -e "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version (${new_version:-$current_version})"
echo ""
fi
# Clear update cache (suppress errors if cache doesn't exist or is locked)
rm -f "$HOME/.cache/mole/version_check" "$HOME/.cache/mole/update_message" 2> /dev/null || true
}
# Remove applications from Dock
remove_apps_from_dock() {
if [[ $# -eq 0 ]]; then
return 0
fi
local plist="$HOME/Library/Preferences/com.apple.dock.plist"
[[ -f "$plist" ]] || return 0
if ! command -v python3 > /dev/null 2>&1; then
return 0
fi
# Prune dock entries using Python helper
python3 - "$@" << 'PY' 2> /dev/null || return 0
import os
import plistlib
import subprocess
import sys
import urllib.parse
plist_path = os.path.expanduser('~/Library/Preferences/com.apple.dock.plist')
if not os.path.exists(plist_path):
sys.exit(0)
def normalise(path):
if not path:
return ''
return os.path.normpath(os.path.realpath(path.rstrip('/')))
targets = {normalise(arg) for arg in sys.argv[1:] if arg}
targets = {t for t in targets if t}
if not targets:
sys.exit(0)
with open(plist_path, 'rb') as fh:
try:
data = plistlib.load(fh)
except Exception:
sys.exit(0)
apps = data.get('persistent-apps')
if not isinstance(apps, list):
sys.exit(0)
changed = False
filtered = []
for item in apps:
try:
url = item['tile-data']['file-data']['_CFURLString']
except (KeyError, TypeError):
filtered.append(item)
continue
if not isinstance(url, str):
filtered.append(item)
continue
parsed = urllib.parse.urlparse(url)
path = urllib.parse.unquote(parsed.path or '')
if not path:
filtered.append(item)
continue
candidate = normalise(path)
if any(candidate == t or candidate.startswith(t + os.sep) for t in targets):
changed = True
continue
filtered.append(item)
if not changed:
sys.exit(0)
data['persistent-apps'] = filtered
with open(plist_path, 'wb') as fh:
try:
plistlib.dump(data, fh, fmt=plistlib.FMT_BINARY)
except Exception:
plistlib.dump(data, fh)
# Restart Dock to apply changes
try:
subprocess.run(['killall', 'Dock'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=False)
except Exception:
pass
PY
}

View File

@@ -1,351 +0,0 @@
#!/bin/bash
# Mole - File Operations
# Safe file and directory manipulation with validation
set -euo pipefail
# Prevent multiple sourcing
if [[ -n "${MOLE_FILE_OPS_LOADED:-}" ]]; then
return 0
fi
readonly MOLE_FILE_OPS_LOADED=1
# Ensure dependencies are loaded
_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [[ -z "${MOLE_BASE_LOADED:-}" ]]; then
# shellcheck source=lib/core/base.sh
source "$_MOLE_CORE_DIR/base.sh"
fi
if [[ -z "${MOLE_LOG_LOADED:-}" ]]; then
# shellcheck source=lib/core/log.sh
source "$_MOLE_CORE_DIR/log.sh"
fi
if [[ -z "${MOLE_TIMEOUT_LOADED:-}" ]]; then
# shellcheck source=lib/core/timeout.sh
source "$_MOLE_CORE_DIR/timeout.sh"
fi
# ============================================================================
# Path Validation
# ============================================================================
# Validate path for deletion (absolute, no traversal, not system dir)
validate_path_for_deletion() {
local path="$1"
# Check path is not empty
if [[ -z "$path" ]]; then
log_error "Path validation failed: empty path"
return 1
fi
# Check path is absolute
if [[ "$path" != /* ]]; then
log_error "Path validation failed: path must be absolute: $path"
return 1
fi
# Check for path traversal attempts
# Only reject .. when it appears as a complete path component (/../ or /.. or ../)
# This allows legitimate directory names containing .. (e.g., Firefox's "name..files")
if [[ "$path" =~ (^|/)\.\.(\/|$) ]]; then
log_error "Path validation failed: path traversal not allowed: $path"
return 1
fi
# Check path doesn't contain dangerous characters
if [[ "$path" =~ [[:cntrl:]] ]] || [[ "$path" =~ $'\n' ]]; then
log_error "Path validation failed: contains control characters: $path"
return 1
fi
# Allow deletion of coresymbolicationd cache (safe system cache that can be rebuilt)
case "$path" in
/System/Library/Caches/com.apple.coresymbolicationd/data | /System/Library/Caches/com.apple.coresymbolicationd/data/*)
return 0
;;
esac
# Check path isn't critical system directory
case "$path" in
/ | /bin | /sbin | /usr | /usr/bin | /usr/sbin | /etc | /var | /System | /System/* | /Library/Extensions)
log_error "Path validation failed: critical system directory: $path"
return 1
;;
esac
return 0
}
# ============================================================================
# Safe Removal Operations
# ============================================================================
# Safe wrapper around rm -rf with validation
safe_remove() {
local path="$1"
local silent="${2:-false}"
# Validate path
if ! validate_path_for_deletion "$path"; then
return 1
fi
# Check if path exists
if [[ ! -e "$path" ]]; then
return 0
fi
# Dry-run mode: log but don't delete
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
if [[ "${MO_DEBUG:-}" == "1" ]]; then
local file_type="file"
[[ -d "$path" ]] && file_type="directory"
[[ -L "$path" ]] && file_type="symlink"
local file_size=""
local file_age=""
if [[ -e "$path" ]]; then
local size_kb
size_kb=$(get_path_size_kb "$path" 2> /dev/null || echo "0")
if [[ "$size_kb" -gt 0 ]]; then
file_size=$(bytes_to_human "$((size_kb * 1024))")
fi
if [[ -f "$path" || -d "$path" ]] && ! [[ -L "$path" ]]; then
local mod_time
mod_time=$(stat -f%m "$path" 2> /dev/null || echo "0")
local now
now=$(date +%s 2> /dev/null || echo "0")
if [[ "$mod_time" -gt 0 && "$now" -gt 0 ]]; then
file_age=$(((now - mod_time) / 86400))
fi
fi
fi
debug_file_action "[DRY RUN] Would remove" "$path" "$file_size" "$file_age"
else
debug_log "[DRY RUN] Would remove: $path"
fi
return 0
fi
debug_log "Removing: $path"
# Perform the deletion
# Use || to capture the exit code so set -e won't abort on rm failures
local error_msg
local rm_exit=0
error_msg=$(rm -rf "$path" 2>&1) || rm_exit=$? # safe_remove
if [[ $rm_exit -eq 0 ]]; then
return 0
else
# Check if it's a permission error
if [[ "$error_msg" == *"Permission denied"* ]] || [[ "$error_msg" == *"Operation not permitted"* ]]; then
MOLE_PERMISSION_DENIED_COUNT=${MOLE_PERMISSION_DENIED_COUNT:-0}
MOLE_PERMISSION_DENIED_COUNT=$((MOLE_PERMISSION_DENIED_COUNT + 1))
export MOLE_PERMISSION_DENIED_COUNT
debug_log "Permission denied: $path (may need Full Disk Access)"
else
[[ "$silent" != "true" ]] && log_error "Failed to remove: $path"
fi
return 1
fi
}
# Safe sudo removal with symlink protection
safe_sudo_remove() {
local path="$1"
# Validate path
if ! validate_path_for_deletion "$path"; then
log_error "Path validation failed for sudo remove: $path"
return 1
fi
# Check if path exists
if [[ ! -e "$path" ]]; then
return 0
fi
# Additional check: reject symlinks for sudo operations
if [[ -L "$path" ]]; then
log_error "Refusing to sudo remove symlink: $path"
return 1
fi
# Dry-run mode: log but don't delete
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
if [[ "${MO_DEBUG:-}" == "1" ]]; then
local file_type="file"
[[ -d "$path" ]] && file_type="directory"
local file_size=""
local file_age=""
if sudo test -e "$path" 2> /dev/null; then
local size_kb
size_kb=$(sudo du -sk "$path" 2> /dev/null | awk '{print $1}' || echo "0")
if [[ "$size_kb" -gt 0 ]]; then
file_size=$(bytes_to_human "$((size_kb * 1024))")
fi
if sudo test -f "$path" 2> /dev/null || sudo test -d "$path" 2> /dev/null; then
local mod_time
mod_time=$(sudo stat -f%m "$path" 2> /dev/null || echo "0")
local now
now=$(date +%s 2> /dev/null || echo "0")
if [[ "$mod_time" -gt 0 && "$now" -gt 0 ]]; then
file_age=$(((now - mod_time) / 86400))
fi
fi
fi
debug_file_action "[DRY RUN] Would remove (sudo)" "$path" "$file_size" "$file_age"
else
debug_log "[DRY RUN] Would remove (sudo): $path"
fi
return 0
fi
debug_log "Removing (sudo): $path"
# Perform the deletion
if sudo rm -rf "$path" 2> /dev/null; then # SAFE: safe_sudo_remove implementation
return 0
else
log_error "Failed to remove (sudo): $path"
return 1
fi
}
# ============================================================================
# Safe Find and Delete Operations
# ============================================================================
# Safe file discovery and deletion with depth and age limits
safe_find_delete() {
local base_dir="$1"
local pattern="$2"
local age_days="${3:-7}"
local type_filter="${4:-f}"
# Validate base directory exists and is not a symlink
if [[ ! -d "$base_dir" ]]; then
log_error "Directory does not exist: $base_dir"
return 1
fi
if [[ -L "$base_dir" ]]; then
log_error "Refusing to search symlinked directory: $base_dir"
return 1
fi
# Validate type filter
if [[ "$type_filter" != "f" && "$type_filter" != "d" ]]; then
log_error "Invalid type filter: $type_filter (must be 'f' or 'd')"
return 1
fi
debug_log "Finding in $base_dir: $pattern (age: ${age_days}d, type: $type_filter)"
local find_args=("-maxdepth" "5" "-name" "$pattern" "-type" "$type_filter")
if [[ "$age_days" -gt 0 ]]; then
find_args+=("-mtime" "+$age_days")
fi
# Iterate results to respect should_protect_path
while IFS= read -r -d '' match; do
if should_protect_path "$match"; then
continue
fi
safe_remove "$match" true || true
done < <(command find "$base_dir" "${find_args[@]}" -print0 2> /dev/null || true)
return 0
}
# Safe sudo discovery and deletion
safe_sudo_find_delete() {
local base_dir="$1"
local pattern="$2"
local age_days="${3:-7}"
local type_filter="${4:-f}"
# Validate base directory (use sudo for permission-restricted dirs)
if ! sudo test -d "$base_dir" 2> /dev/null; then
debug_log "Directory does not exist (skipping): $base_dir"
return 0
fi
if sudo test -L "$base_dir" 2> /dev/null; then
log_error "Refusing to search symlinked directory: $base_dir"
return 1
fi
# Validate type filter
if [[ "$type_filter" != "f" && "$type_filter" != "d" ]]; then
log_error "Invalid type filter: $type_filter (must be 'f' or 'd')"
return 1
fi
debug_log "Finding (sudo) in $base_dir: $pattern (age: ${age_days}d, type: $type_filter)"
local find_args=("-maxdepth" "5" "-name" "$pattern" "-type" "$type_filter")
if [[ "$age_days" -gt 0 ]]; then
find_args+=("-mtime" "+$age_days")
fi
# Iterate results to respect should_protect_path
while IFS= read -r -d '' match; do
if should_protect_path "$match"; then
continue
fi
safe_sudo_remove "$match" || true
done < <(sudo find "$base_dir" "${find_args[@]}" -print0 2> /dev/null || true)
return 0
}
# ============================================================================
# Size Calculation
# ============================================================================
# Get path size in KB (returns 0 if not found)
get_path_size_kb() {
local path="$1"
[[ -z "$path" || ! -e "$path" ]] && {
echo "0"
return
}
# Direct execution without timeout overhead - critical for performance in loops
# Use || echo 0 to ensure failure in du (e.g. permission error) doesn't exit script under set -e
# Pipefail would normally cause the pipeline to fail if du fails, but || handle catches it.
local size
size=$(command du -sk "$path" 2> /dev/null | awk 'NR==1 {print $1; exit}' || true)
# Ensure size is a valid number (fix for non-numeric du output)
if [[ "$size" =~ ^[0-9]+$ ]]; then
echo "$size"
else
echo "0"
fi
}
# Calculate total size for multiple paths
calculate_total_size() {
local files="$1"
local total_kb=0
while IFS= read -r file; do
if [[ -n "$file" && -e "$file" ]]; then
local size_kb
size_kb=$(get_path_size_kb "$file")
((total_kb += size_kb))
fi
done <<< "$files"
echo "$total_kb"
}

View File

@@ -1,291 +0,0 @@
#!/bin/bash
# Mole - Logging System
# Centralized logging with rotation support
set -euo pipefail
# Prevent multiple sourcing
if [[ -n "${MOLE_LOG_LOADED:-}" ]]; then
return 0
fi
readonly MOLE_LOG_LOADED=1
# Ensure base.sh is loaded for colors and icons
if [[ -z "${MOLE_BASE_LOADED:-}" ]]; then
_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck source=lib/core/base.sh
source "$_MOLE_CORE_DIR/base.sh"
fi
# ============================================================================
# Logging Configuration
# ============================================================================
readonly LOG_FILE="${HOME}/.config/mole/mole.log"
readonly DEBUG_LOG_FILE="${HOME}/.config/mole/mole_debug_session.log"
readonly LOG_MAX_SIZE_DEFAULT=1048576 # 1MB
# Ensure log directory and file exist with correct ownership
ensure_user_file "$LOG_FILE"
# ============================================================================
# Log Rotation
# ============================================================================
# Rotate log file if it exceeds maximum size
rotate_log_once() {
# Skip if already checked this session
[[ -n "${MOLE_LOG_ROTATED:-}" ]] && return 0
export MOLE_LOG_ROTATED=1
local max_size="$LOG_MAX_SIZE_DEFAULT"
if [[ -f "$LOG_FILE" ]] && [[ $(get_file_size "$LOG_FILE") -gt "$max_size" ]]; then
mv "$LOG_FILE" "${LOG_FILE}.old" 2> /dev/null || true
ensure_user_file "$LOG_FILE"
fi
}
# ============================================================================
# Logging Functions
# ============================================================================
# Log informational message
log_info() {
echo -e "${BLUE}$1${NC}"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] INFO: $1" >> "$LOG_FILE" 2> /dev/null || true
if [[ "${MO_DEBUG:-}" == "1" ]]; then
echo "[$timestamp] INFO: $1" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
fi
}
# Log success message
log_success() {
echo -e " ${GREEN}${ICON_SUCCESS}${NC} $1"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] SUCCESS: $1" >> "$LOG_FILE" 2> /dev/null || true
if [[ "${MO_DEBUG:-}" == "1" ]]; then
echo "[$timestamp] SUCCESS: $1" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
fi
}
# Log warning message
log_warning() {
echo -e "${YELLOW}$1${NC}"
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] WARNING: $1" >> "$LOG_FILE" 2> /dev/null || true
if [[ "${MO_DEBUG:-}" == "1" ]]; then
echo "[$timestamp] WARNING: $1" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
fi
}
# Log error message
log_error() {
echo -e "${YELLOW}${ICON_ERROR}${NC} $1" >&2
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
echo "[$timestamp] ERROR: $1" >> "$LOG_FILE" 2> /dev/null || true
if [[ "${MO_DEBUG:-}" == "1" ]]; then
echo "[$timestamp] ERROR: $1" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
fi
}
# Debug logging (active when MO_DEBUG=1)
debug_log() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
echo -e "${GRAY}[DEBUG]${NC} $*" >&2
echo "[$(date '+%Y-%m-%d %H:%M:%S')] DEBUG: $*" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
fi
}
# Enhanced debug logging for operations
debug_operation_start() {
local operation_name="$1"
local operation_desc="${2:-}"
if [[ "${MO_DEBUG:-}" == "1" ]]; then
# Output to stderr for immediate feedback
echo -e "${GRAY}[DEBUG] === $operation_name ===${NC}" >&2
[[ -n "$operation_desc" ]] && echo -e "${GRAY}[DEBUG] $operation_desc${NC}" >&2
# Also log to file
{
echo ""
echo "=== $operation_name ==="
[[ -n "$operation_desc" ]] && echo "Description: $operation_desc"
} >> "$DEBUG_LOG_FILE" 2> /dev/null || true
fi
}
# Log detailed operation information
debug_operation_detail() {
local detail_type="$1" # e.g., "Method", "Target", "Expected Outcome"
local detail_value="$2"
if [[ "${MO_DEBUG:-}" == "1" ]]; then
# Output to stderr
echo -e "${GRAY}[DEBUG] $detail_type: $detail_value${NC}" >&2
# Also log to file
echo "$detail_type: $detail_value" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
fi
}
# Log individual file action with metadata
debug_file_action() {
local action="$1" # e.g., "Would remove", "Removing"
local file_path="$2"
local file_size="${3:-}"
local file_age="${4:-}"
if [[ "${MO_DEBUG:-}" == "1" ]]; then
local msg=" - $file_path"
[[ -n "$file_size" ]] && msg+=" ($file_size"
[[ -n "$file_age" ]] && msg+=", ${file_age} days old"
[[ -n "$file_size" ]] && msg+=")"
# Output to stderr
echo -e "${GRAY}[DEBUG] $action: $msg${NC}" >&2
# Also log to file
echo "$action: $msg" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
fi
}
# Log risk level for operations
debug_risk_level() {
local risk_level="$1" # LOW, MEDIUM, HIGH
local reason="$2"
if [[ "${MO_DEBUG:-}" == "1" ]]; then
local color="$GRAY"
case "$risk_level" in
LOW) color="$GREEN" ;;
MEDIUM) color="$YELLOW" ;;
HIGH) color="$RED" ;;
esac
# Output to stderr with color
echo -e "${GRAY}[DEBUG] Risk Level: ${color}${risk_level}${GRAY} ($reason)${NC}" >&2
# Also log to file
echo "Risk Level: $risk_level ($reason)" >> "$DEBUG_LOG_FILE" 2> /dev/null || true
fi
}
# Log system information for debugging
log_system_info() {
# Only allow once per session
[[ -n "${MOLE_SYS_INFO_LOGGED:-}" ]] && return 0
export MOLE_SYS_INFO_LOGGED=1
# Reset debug log file for this new session
ensure_user_file "$DEBUG_LOG_FILE"
: > "$DEBUG_LOG_FILE"
# Start block in debug log file
{
echo "----------------------------------------------------------------------"
echo "Mole Debug Session - $(date '+%Y-%m-%d %H:%M:%S')"
echo "----------------------------------------------------------------------"
echo "User: $USER"
echo "Hostname: $(hostname)"
echo "Architecture: $(uname -m)"
echo "Kernel: $(uname -r)"
if command -v sw_vers > /dev/null; then
echo "macOS: $(sw_vers -productVersion) ($(sw_vers -buildVersion))"
fi
echo "Shell: ${SHELL:-unknown} (${TERM:-unknown})"
# Check sudo status non-interactively
if sudo -n true 2> /dev/null; then
echo "Sudo Access: Active"
else
echo "Sudo Access: Required"
fi
echo "----------------------------------------------------------------------"
} >> "$DEBUG_LOG_FILE" 2> /dev/null || true
# Notification to stderr
echo -e "${GRAY}[DEBUG] Debug logging enabled. Session log: $DEBUG_LOG_FILE${NC}" >&2
}
# ============================================================================
# Command Execution Wrappers
# ============================================================================
# Run command silently (ignore errors)
run_silent() {
"$@" > /dev/null 2>&1 || true
}
# Run command with error logging
run_logged() {
local cmd="$1"
# Log to main file, and also to debug file if enabled
if [[ "${MO_DEBUG:-}" == "1" ]]; then
if ! "$@" 2>&1 | tee -a "$LOG_FILE" | tee -a "$DEBUG_LOG_FILE" > /dev/null; then
log_warning "Command failed: $cmd"
return 1
fi
else
if ! "$@" 2>&1 | tee -a "$LOG_FILE" > /dev/null; then
log_warning "Command failed: $cmd"
return 1
fi
fi
return 0
}
# ============================================================================
# Formatted Output
# ============================================================================
# Print formatted summary block
print_summary_block() {
local heading=""
local -a details=()
local saw_heading=false
# Parse arguments
for arg in "$@"; do
if [[ "$saw_heading" == "false" ]]; then
saw_heading=true
heading="$arg"
else
details+=("$arg")
fi
done
local divider="======================================================================"
# Print with dividers
echo ""
echo "$divider"
if [[ -n "$heading" ]]; then
echo -e "${BLUE}${heading}${NC}"
fi
# Print details
for detail in "${details[@]}"; do
[[ -z "$detail" ]] && continue
echo -e "${detail}"
done
echo "$divider"
# If debug mode is on, remind user about the log file location
if [[ "${MO_DEBUG:-}" == "1" ]]; then
echo -e "${GRAY}Debug session log saved to:${NC} ${DEBUG_LOG_FILE}"
fi
}
# ============================================================================
# Initialize Logging
# ============================================================================
# Perform log rotation check on module load
rotate_log_once
# If debug mode is enabled, log system info immediately
if [[ "${MO_DEBUG:-}" == "1" ]]; then
log_system_info
fi

View File

@@ -1,319 +0,0 @@
#!/bin/bash
# Sudo Session Manager
# Unified sudo authentication and keepalive management
set -euo pipefail
# ============================================================================
# Touch ID and Clamshell Detection
# ============================================================================
check_touchid_support() {
# Check sudo_local first (Sonoma+)
if [[ -f /etc/pam.d/sudo_local ]]; then
grep -q "pam_tid.so" /etc/pam.d/sudo_local 2> /dev/null
return $?
fi
# Fallback to checking sudo directly
if [[ -f /etc/pam.d/sudo ]]; then
grep -q "pam_tid.so" /etc/pam.d/sudo 2> /dev/null
return $?
fi
return 1
}
# Detect clamshell mode (lid closed)
is_clamshell_mode() {
# ioreg is missing (not macOS) -> treat as lid open
if ! command -v ioreg > /dev/null 2>&1; then
return 1
fi
# Check if lid is closed; ignore pipeline failures so set -e doesn't exit
local clamshell_state=""
clamshell_state=$( (ioreg -r -k AppleClamshellState -d 4 2> /dev/null |
grep "AppleClamshellState" |
head -1) || true)
if [[ "$clamshell_state" =~ \"AppleClamshellState\"\ =\ Yes ]]; then
return 0 # Lid is closed
fi
return 1 # Lid is open
}
_request_password() {
local tty_path="$1"
local attempts=0
local show_hint=true
# Extra safety: ensure sudo cache is cleared before password input
sudo -k 2> /dev/null
# Save original terminal settings and ensure they're restored on exit
local stty_orig
stty_orig=$(stty -g < "$tty_path" 2> /dev/null || echo "")
trap '[[ -n "${stty_orig:-}" ]] && stty "${stty_orig:-}" < "$tty_path" 2> /dev/null || true' RETURN
while ((attempts < 3)); do
local password=""
# Show hint on first attempt about Touch ID appearing again
if [[ $show_hint == true ]] && check_touchid_support; then
echo -e "${GRAY}Note: Touch ID dialog may appear once more - just cancel it${NC}" > "$tty_path"
show_hint=false
fi
printf "${PURPLE}${ICON_ARROW}${NC} Password: " > "$tty_path"
# Disable terminal echo to hide password input
stty -echo -icanon min 1 time 0 < "$tty_path" 2> /dev/null || true
IFS= read -r password < "$tty_path" || password=""
# Restore terminal echo immediately
stty echo icanon < "$tty_path" 2> /dev/null || true
printf "\n" > "$tty_path"
if [[ -z "$password" ]]; then
unset password
((attempts++))
if [[ $attempts -lt 3 ]]; then
echo -e "${YELLOW}${ICON_WARNING}${NC} Password cannot be empty" > "$tty_path"
fi
continue
fi
# Verify password with sudo
# NOTE: macOS PAM will trigger Touch ID before password auth - this is system behavior
if printf '%s\n' "$password" | sudo -S -p "" -v > /dev/null 2>&1; then
unset password
return 0
fi
unset password
((attempts++))
if [[ $attempts -lt 3 ]]; then
echo -e "${YELLOW}${ICON_WARNING}${NC} Incorrect password, try again" > "$tty_path"
fi
done
return 1
}
request_sudo_access() {
local prompt_msg="${1:-Admin access required}"
# Check if already have sudo access
if sudo -n true 2> /dev/null; then
return 0
fi
# Get TTY path
local tty_path="/dev/tty"
if [[ ! -r "$tty_path" || ! -w "$tty_path" ]]; then
tty_path=$(tty 2> /dev/null || echo "")
if [[ -z "$tty_path" || ! -r "$tty_path" || ! -w "$tty_path" ]]; then
log_error "No interactive terminal available"
return 1
fi
fi
sudo -k
# Check if in clamshell mode - if yes, skip Touch ID entirely
if is_clamshell_mode; then
echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg}"
if _request_password "$tty_path"; then
# Clear all prompt lines (use safe clearing method)
safe_clear_lines 3 "$tty_path"
return 0
fi
return 1
fi
# Not in clamshell mode - try Touch ID if configured
if ! check_touchid_support; then
echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg}"
if _request_password "$tty_path"; then
# Clear all prompt lines (use safe clearing method)
safe_clear_lines 3 "$tty_path"
return 0
fi
return 1
fi
# Touch ID is available and not in clamshell mode
echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg} ${GRAY}(Touch ID or password)${NC}"
# Start sudo in background so we can monitor and control it
sudo -v < /dev/null > /dev/null 2>&1 &
local sudo_pid=$!
# Wait for sudo to complete or timeout (5 seconds)
local elapsed=0
local timeout=50 # 50 * 0.1s = 5 seconds
while ((elapsed < timeout)); do
if ! kill -0 "$sudo_pid" 2> /dev/null; then
# Process exited
wait "$sudo_pid" 2> /dev/null
local exit_code=$?
if [[ $exit_code -eq 0 ]] && sudo -n true 2> /dev/null; then
# Touch ID succeeded - clear the prompt line
safe_clear_lines 1 "$tty_path"
return 0
fi
# Touch ID failed or cancelled
break
fi
sleep 0.1
((elapsed++))
done
# Touch ID failed/cancelled - clean up thoroughly before password input
# Kill the sudo process if still running
if kill -0 "$sudo_pid" 2> /dev/null; then
kill -9 "$sudo_pid" 2> /dev/null
wait "$sudo_pid" 2> /dev/null || true
fi
# Clear sudo state immediately
sudo -k 2> /dev/null
# IMPORTANT: Wait longer for macOS to fully close Touch ID UI and SecurityAgent
# Without this delay, subsequent sudo calls may re-trigger Touch ID
sleep 1
# Clear any leftover prompts on the screen
safe_clear_line "$tty_path"
# Now use our password input (this should not trigger Touch ID again)
if _request_password "$tty_path"; then
# Clear all prompt lines (use safe clearing method)
safe_clear_lines 3 "$tty_path"
return 0
fi
return 1
}
# ============================================================================
# Sudo Session Management
# ============================================================================
# Global state
MOLE_SUDO_KEEPALIVE_PID=""
MOLE_SUDO_ESTABLISHED="false"
# Start sudo keepalive
_start_sudo_keepalive() {
# Start background keepalive process with all outputs redirected
# This is critical: command substitution waits for all file descriptors to close
(
# Initial delay to let sudo cache stabilize after password entry
# This prevents immediately triggering Touch ID again
sleep 2
local retry_count=0
while true; do
if ! sudo -n -v 2> /dev/null; then
((retry_count++))
if [[ $retry_count -ge 3 ]]; then
exit 1
fi
sleep 5
continue
fi
retry_count=0
sleep 30
kill -0 "$$" 2> /dev/null || exit
done
) > /dev/null 2>&1 &
local pid=$!
echo $pid
}
# Stop sudo keepalive
_stop_sudo_keepalive() {
local pid="${1:-}"
if [[ -n "$pid" ]]; then
kill "$pid" 2> /dev/null || true
wait "$pid" 2> /dev/null || true
fi
}
# Check if sudo session is active
has_sudo_session() {
sudo -n true 2> /dev/null
}
# Request administrative access
request_sudo() {
local prompt_msg="${1:-Admin access required}"
if has_sudo_session; then
return 0
fi
# Use the robust implementation from common.sh
if request_sudo_access "$prompt_msg"; then
return 0
else
return 1
fi
}
# Maintain active sudo session with keepalive
ensure_sudo_session() {
local prompt="${1:-Admin access required}"
# Check if already established
if has_sudo_session && [[ "$MOLE_SUDO_ESTABLISHED" == "true" ]]; then
return 0
fi
# Stop old keepalive if exists
if [[ -n "$MOLE_SUDO_KEEPALIVE_PID" ]]; then
_stop_sudo_keepalive "$MOLE_SUDO_KEEPALIVE_PID"
MOLE_SUDO_KEEPALIVE_PID=""
fi
# Request sudo access
if ! request_sudo "$prompt"; then
MOLE_SUDO_ESTABLISHED="false"
return 1
fi
# Start keepalive
MOLE_SUDO_KEEPALIVE_PID=$(_start_sudo_keepalive)
MOLE_SUDO_ESTABLISHED="true"
return 0
}
# Stop sudo session and cleanup
stop_sudo_session() {
if [[ -n "$MOLE_SUDO_KEEPALIVE_PID" ]]; then
_stop_sudo_keepalive "$MOLE_SUDO_KEEPALIVE_PID"
MOLE_SUDO_KEEPALIVE_PID=""
fi
MOLE_SUDO_ESTABLISHED="false"
}
# Register cleanup on script exit
register_sudo_cleanup() {
trap stop_sudo_session EXIT INT TERM
}
# Predict if operation requires administrative access
will_need_sudo() {
local -a operations=("$@")
for op in "${operations[@]}"; do
case "$op" in
system_update | appstore_update | macos_update | firewall | touchid | rosetta | system_fix)
return 0
;;
esac
done
return 1
}

View File

@@ -1,156 +0,0 @@
#!/bin/bash
# Mole - Timeout Control
# Command execution with timeout support
set -euo pipefail
# Prevent multiple sourcing
if [[ -n "${MOLE_TIMEOUT_LOADED:-}" ]]; then
return 0
fi
readonly MOLE_TIMEOUT_LOADED=1
# ============================================================================
# Timeout Command Initialization
# ============================================================================
# Initialize timeout command (prefer gtimeout from coreutils, fallback to timeout)
# Sets MO_TIMEOUT_BIN to the available timeout command
#
# Recommendation: Install coreutils for reliable timeout support
# brew install coreutils
#
# The shell-based fallback has known limitations:
# - May not clean up all child processes
# - Has race conditions in edge cases
# - Less reliable than native timeout command
if [[ -z "${MO_TIMEOUT_INITIALIZED:-}" ]]; then
MO_TIMEOUT_BIN=""
for candidate in gtimeout timeout; do
if command -v "$candidate" > /dev/null 2>&1; then
MO_TIMEOUT_BIN="$candidate"
if [[ "${MO_DEBUG:-0}" == "1" ]]; then
echo "[TIMEOUT] Using command: $candidate" >&2
fi
break
fi
done
# Log warning if no timeout command available
if [[ -z "$MO_TIMEOUT_BIN" ]] && [[ "${MO_DEBUG:-0}" == "1" ]]; then
echo "[TIMEOUT] No timeout command found, using shell fallback" >&2
echo "[TIMEOUT] Install coreutils for better reliability: brew install coreutils" >&2
fi
export MO_TIMEOUT_INITIALIZED=1
fi
# ============================================================================
# Timeout Execution
# ============================================================================
# Run command with timeout
# Uses gtimeout/timeout if available, falls back to shell-based implementation
#
# Args:
# $1 - duration in seconds (0 or invalid = no timeout)
# $@ - command and arguments to execute
#
# Returns:
# Command exit code, or 124 if timed out (matches gtimeout behavior)
#
# Environment:
# MO_DEBUG - Set to 1 to enable debug logging to stderr
#
# Implementation notes:
# - Prefers gtimeout (coreutils) or timeout for reliability
# - Shell fallback uses SIGTERM → SIGKILL escalation
# - Attempts process group cleanup to handle child processes
# - Returns exit code 124 on timeout (standard timeout exit code)
#
# Known limitations of shell-based fallback:
# - Race condition: If command exits during signal delivery, the signal
# may target a reused PID (very rare, requires quick PID reuse)
# - Zombie processes: Brief zombies until wait completes
# - Nested children: SIGKILL may not reach all descendants
# - No process group: Cannot guarantee cleanup of detached children
#
# For mission-critical timeouts, install coreutils.
run_with_timeout() {
local duration="${1:-0}"
shift || true
# No timeout if duration is invalid or zero
if [[ ! "$duration" =~ ^[0-9]+(\.[0-9]+)?$ ]] || [[ $(echo "$duration <= 0" | bc -l 2> /dev/null) -eq 1 ]]; then
"$@"
return $?
fi
# Use timeout command if available (preferred path)
if [[ -n "${MO_TIMEOUT_BIN:-}" ]]; then
if [[ "${MO_DEBUG:-0}" == "1" ]]; then
echo "[TIMEOUT] Running with ${duration}s timeout: $*" >&2
fi
"$MO_TIMEOUT_BIN" "$duration" "$@"
return $?
fi
# ========================================================================
# Shell-based fallback implementation
# ========================================================================
if [[ "${MO_DEBUG:-0}" == "1" ]]; then
echo "[TIMEOUT] Shell fallback (${duration}s): $*" >&2
fi
# Start command in background
"$@" &
local cmd_pid=$!
# Start timeout killer in background
(
# Wait for timeout duration
sleep "$duration"
# Check if process still exists
if kill -0 "$cmd_pid" 2> /dev/null; then
# Try to kill process group first (negative PID), fallback to single process
# Process group kill is best effort - may not work if setsid was used
kill -TERM -"$cmd_pid" 2> /dev/null || kill -TERM "$cmd_pid" 2> /dev/null || true
# Grace period for clean shutdown
sleep 2
# Escalate to SIGKILL if still alive
if kill -0 "$cmd_pid" 2> /dev/null; then
kill -KILL -"$cmd_pid" 2> /dev/null || kill -KILL "$cmd_pid" 2> /dev/null || true
fi
fi
) &
local killer_pid=$!
# Wait for command to complete
local exit_code=0
set +e
wait "$cmd_pid" 2> /dev/null
exit_code=$?
set -e
# Clean up killer process
if kill -0 "$killer_pid" 2> /dev/null; then
kill "$killer_pid" 2> /dev/null || true
wait "$killer_pid" 2> /dev/null || true
fi
# Check if command was killed by timeout (exit codes 143=SIGTERM, 137=SIGKILL)
if [[ $exit_code -eq 143 || $exit_code -eq 137 ]]; then
# Command was killed by timeout
if [[ "${MO_DEBUG:-0}" == "1" ]]; then
echo "[TIMEOUT] Command timed out after ${duration}s" >&2
fi
return 124
fi
# Command completed normally (or with its own error)
return "$exit_code"
}

View File

@@ -1,434 +0,0 @@
#!/bin/bash
# Mole - UI Components
# Terminal UI utilities: cursor control, keyboard input, spinners, menus
set -euo pipefail
if [[ -n "${MOLE_UI_LOADED:-}" ]]; then
return 0
fi
readonly MOLE_UI_LOADED=1
_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
[[ -z "${MOLE_BASE_LOADED:-}" ]] && source "$_MOLE_CORE_DIR/base.sh"
# Cursor control
clear_screen() { printf '\033[2J\033[H'; }
hide_cursor() { [[ -t 1 ]] && printf '\033[?25l' >&2 || true; }
show_cursor() { [[ -t 1 ]] && printf '\033[?25h' >&2 || true; }
# Calculate display width (CJK characters count as 2)
get_display_width() {
local str="$1"
# Optimized pure bash implementation without forks
local width
# Save current locale
local old_lc="${LC_ALL:-}"
# Get Char Count (UTF-8)
# We must export ensuring it applies to the expansion (though just assignment often works in newer bash, export is safer for all subshells/cmds)
export LC_ALL=en_US.UTF-8
local char_count=${#str}
# Get Byte Count (C)
export LC_ALL=C
local byte_count=${#str}
# Restore Locale immediately
if [[ -n "$old_lc" ]]; then
export LC_ALL="$old_lc"
else
unset LC_ALL
fi
if [[ $byte_count -eq $char_count ]]; then
echo "$char_count"
return
fi
# CJK Heuristic:
# Most CJK chars are 3 bytes in UTF-8 and width 2.
# ASCII chars are 1 byte and width 1.
# Width ~= CharCount + (ByteCount - CharCount) / 2
# "中" (1 char, 3 bytes) -> 1 + (2)/2 = 2.
# "A" (1 char, 1 byte) -> 1 + 0 = 1.
# This is an approximation but very fast and sufficient for App names.
# Integer arithmetic in bash automatically handles floor.
local extra_bytes=$((byte_count - char_count))
local padding=$((extra_bytes / 2))
width=$((char_count + padding))
# Adjust for zero-width joiners and emoji variation selectors (common in filenames/emojis)
# These characters add bytes but no visible width; subtract their count if present.
local zwj=$'\u200d' # zero-width joiner
local vs16=$'\ufe0f' # emoji variation selector
local zero_width=0
local without_zwj=${str//$zwj/}
zero_width=$((zero_width + (char_count - ${#without_zwj})))
local without_vs=${str//$vs16/}
zero_width=$((zero_width + (char_count - ${#without_vs})))
if ((zero_width > 0 && width > zero_width)); then
width=$((width - zero_width))
fi
echo "$width"
}
# Truncate string by display width (handles CJK)
truncate_by_display_width() {
local str="$1"
local max_width="$2"
local current_width
current_width=$(get_display_width "$str")
if [[ $current_width -le $max_width ]]; then
echo "$str"
return
fi
# Fallback: Use pure bash character iteration
# Since we need to know the width of *each* character to truncate at the right spot,
# we cannot just use the total width formula on the whole string.
# However, iterating char-by-char and calling the optimized get_display_width function
# is now much faster because it doesn't fork 'wc'.
# CRITICAL: Switch to UTF-8 for correct character iteration
local old_lc="${LC_ALL:-}"
export LC_ALL=en_US.UTF-8
local truncated=""
local width=0
local i=0
local char char_width
local strlen=${#str} # Re-calculate in UTF-8
# Optimization: If total width <= max_width, return original string (checked above)
while [[ $i -lt $strlen ]]; do
char="${str:$i:1}"
# Inlined width calculation for minimal overhead to avoid recursion overhead
# We are already in UTF-8, so ${#char} is char length (1).
# We need byte length for the heuristic.
# But switching locale inside loop is disastrous for perf.
# Logic: If char is ASCII (1 byte), width 1.
# If char is wide (3 bytes), width 2.
# How to detect byte size without switching locale?
# printf %s "$char" | wc -c ? Slow.
# Check against ASCII range?
# Fast ASCII check: if [[ "$char" < $'\x7f' ]]; then ...
if [[ "$char" =~ [[:ascii:]] ]]; then
char_width=1
else
# Assume wide for non-ascii in this context (simplified)
# Or use LC_ALL=C inside? No.
# Most non-ASCII in filenames are either CJK (width 2) or heavy symbols.
# Let's assume 2 for simplicity in this fast loop as we know we are usually dealing with CJK.
char_width=2
fi
if ((width + char_width + 3 > max_width)); then
break
fi
truncated+="$char"
((width += char_width))
((i++))
done
# Restore locale
if [[ -n "$old_lc" ]]; then
export LC_ALL="$old_lc"
else
unset LC_ALL
fi
echo "${truncated}..."
}
# Read single keyboard input
read_key() {
local key rest read_status
IFS= read -r -s -n 1 key
read_status=$?
[[ $read_status -ne 0 ]] && {
echo "QUIT"
return 0
}
if [[ "${MOLE_READ_KEY_FORCE_CHAR:-}" == "1" ]]; then
[[ -z "$key" ]] && {
echo "ENTER"
return 0
}
case "$key" in
$'\n' | $'\r') echo "ENTER" ;;
$'\x7f' | $'\x08') echo "DELETE" ;;
$'\x1b') echo "QUIT" ;;
[[:print:]]) echo "CHAR:$key" ;;
*) echo "OTHER" ;;
esac
return 0
fi
[[ -z "$key" ]] && {
echo "ENTER"
return 0
}
case "$key" in
$'\n' | $'\r') echo "ENTER" ;;
' ') echo "SPACE" ;;
'/') echo "FILTER" ;;
'q' | 'Q') echo "QUIT" ;;
'R') echo "RETRY" ;;
'm' | 'M') echo "MORE" ;;
'u' | 'U') echo "UPDATE" ;;
't' | 'T') echo "TOUCHID" ;;
'j' | 'J') echo "DOWN" ;;
'k' | 'K') echo "UP" ;;
'h' | 'H') echo "LEFT" ;;
'l' | 'L') echo "RIGHT" ;;
$'\x03') echo "QUIT" ;;
$'\x7f' | $'\x08') echo "DELETE" ;;
$'\x1b')
if IFS= read -r -s -n 1 -t 1 rest 2> /dev/null; then
if [[ "$rest" == "[" ]]; then
if IFS= read -r -s -n 1 -t 1 rest2 2> /dev/null; then
case "$rest2" in
"A") echo "UP" ;; "B") echo "DOWN" ;;
"C") echo "RIGHT" ;; "D") echo "LEFT" ;;
"3")
IFS= read -r -s -n 1 -t 1 rest3 2> /dev/null
[[ "$rest3" == "~" ]] && echo "DELETE" || echo "OTHER"
;;
*) echo "OTHER" ;;
esac
else echo "QUIT"; fi
elif [[ "$rest" == "O" ]]; then
if IFS= read -r -s -n 1 -t 1 rest2 2> /dev/null; then
case "$rest2" in
"A") echo "UP" ;; "B") echo "DOWN" ;;
"C") echo "RIGHT" ;; "D") echo "LEFT" ;;
*) echo "OTHER" ;;
esac
else echo "OTHER"; fi
else echo "OTHER"; fi
else echo "QUIT"; fi
;;
[[:print:]]) echo "CHAR:$key" ;;
*) echo "OTHER" ;;
esac
}
drain_pending_input() {
local drained=0
while IFS= read -r -s -n 1 -t 0.01 _ 2> /dev/null; do
((drained++))
[[ $drained -gt 100 ]] && break
done
}
# Format menu option display
show_menu_option() {
local number="$1"
local text="$2"
local selected="$3"
if [[ "$selected" == "true" ]]; then
echo -e "${CYAN}${ICON_ARROW} $number. $text${NC}"
else
echo " $number. $text"
fi
}
# Background spinner implementation
INLINE_SPINNER_PID=""
INLINE_SPINNER_STOP_FILE=""
start_inline_spinner() {
stop_inline_spinner 2> /dev/null || true
local message="$1"
if [[ -t 1 ]]; then
# Create unique stop flag file for this spinner instance
INLINE_SPINNER_STOP_FILE="${TMPDIR:-/tmp}/mole_spinner_$$_$RANDOM.stop"
(
local stop_file="$INLINE_SPINNER_STOP_FILE"
local chars
chars="$(mo_spinner_chars)"
[[ -z "$chars" ]] && chars="|/-\\"
local i=0
# Cooperative exit: check for stop file instead of relying on signals
while [[ ! -f "$stop_file" ]]; do
local c="${chars:$((i % ${#chars})):1}"
# Output to stderr to avoid interfering with stdout
printf "\r${MOLE_SPINNER_PREFIX:-}${BLUE}%s${NC} %s" "$c" "$message" >&2 || break
((i++))
sleep 0.1
done
# Clean up stop file before exiting
rm -f "$stop_file" 2> /dev/null || true
exit 0
) &
INLINE_SPINNER_PID=$!
disown 2> /dev/null || true
else
echo -n " ${BLUE}|${NC} $message" >&2 || true
fi
}
stop_inline_spinner() {
if [[ -n "$INLINE_SPINNER_PID" ]]; then
# Cooperative stop: create stop file to signal spinner to exit
if [[ -n "$INLINE_SPINNER_STOP_FILE" ]]; then
touch "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true
fi
# Wait briefly for cooperative exit
local wait_count=0
while kill -0 "$INLINE_SPINNER_PID" 2> /dev/null && [[ $wait_count -lt 5 ]]; do
sleep 0.05 2> /dev/null || true
((wait_count++))
done
# Only use SIGKILL as last resort if process is stuck
if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then
kill -KILL "$INLINE_SPINNER_PID" 2> /dev/null || true
fi
wait "$INLINE_SPINNER_PID" 2> /dev/null || true
# Cleanup
rm -f "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true
INLINE_SPINNER_PID=""
INLINE_SPINNER_STOP_FILE=""
# Clear the line - use \033[2K to clear entire line, not just to end
[[ -t 1 ]] && printf "\r\033[2K" >&2 || true
fi
}
# Run command with a terminal spinner
with_spinner() {
local msg="$1"
shift || true
local timeout=180
start_inline_spinner "$msg"
local exit_code=0
if [[ -n "${MOLE_TIMEOUT_BIN:-}" ]]; then
"$MOLE_TIMEOUT_BIN" "$timeout" "$@" > /dev/null 2>&1 || exit_code=$?
else "$@" > /dev/null 2>&1 || exit_code=$?; fi
stop_inline_spinner "$msg"
return $exit_code
}
# Get spinner characters
mo_spinner_chars() {
local chars="|/-\\"
[[ -z "$chars" ]] && chars="|/-\\"
printf "%s" "$chars"
}
# Format relative time for compact display (e.g., 3d ago)
format_last_used_summary() {
local value="$1"
case "$value" in
"" | "Unknown")
echo "Unknown"
return 0
;;
"Never" | "Recent" | "Today" | "Yesterday" | "This year" | "Old")
echo "$value"
return 0
;;
esac
if [[ $value =~ ^([0-9]+)[[:space:]]+days?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}d ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+weeks?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}w ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+months?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}m ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+month\(s\)\ ago$ ]]; then
echo "${BASH_REMATCH[1]}m ago"
return 0
fi
if [[ $value =~ ^([0-9]+)[[:space:]]+years?\ ago$ ]]; then
echo "${BASH_REMATCH[1]}y ago"
return 0
fi
echo "$value"
}
# Check if terminal has Full Disk Access
# Returns 0 if FDA is granted, 1 if denied, 2 if unknown
has_full_disk_access() {
# Cache the result to avoid repeated checks
if [[ -n "${MOLE_HAS_FDA:-}" ]]; then
if [[ "$MOLE_HAS_FDA" == "1" ]]; then
return 0
elif [[ "$MOLE_HAS_FDA" == "unknown" ]]; then
return 2
else
return 1
fi
fi
# Test access to protected directories that require FDA
# Strategy: Try to access directories that are commonly protected
# If ANY of them are accessible, we likely have FDA
# If ALL fail, we definitely don't have FDA
local -a protected_dirs=(
"$HOME/Library/Safari/LocalStorage"
"$HOME/Library/Mail/V10"
"$HOME/Library/Messages/chat.db"
)
local accessible_count=0
local tested_count=0
for test_path in "${protected_dirs[@]}"; do
# Only test when the protected path exists
if [[ -e "$test_path" ]]; then
tested_count=$((tested_count + 1))
# Try to stat the ACTUAL protected path - this requires FDA
if stat "$test_path" > /dev/null 2>&1; then
accessible_count=$((accessible_count + 1))
fi
fi
done
# Three possible outcomes:
# 1. tested_count = 0: Can't determine (test paths don't exist) → unknown
# 2. tested_count > 0 && accessible_count > 0: Has FDA → yes
# 3. tested_count > 0 && accessible_count = 0: No FDA → no
if [[ $tested_count -eq 0 ]]; then
# Can't determine - test paths don't exist, treat as unknown
export MOLE_HAS_FDA="unknown"
return 2
elif [[ $accessible_count -gt 0 ]]; then
# At least one path is accessible → has FDA
export MOLE_HAS_FDA=1
return 0
else
# Tested paths exist but not accessible → no FDA
export MOLE_HAS_FDA=0
return 1
fi
}

View File

@@ -1,191 +0,0 @@
#!/bin/bash
# Auto-fix Manager
# Unified auto-fix suggestions and execution
set -euo pipefail
# Show system suggestions with auto-fix markers
show_suggestions() {
local has_suggestions=false
local can_auto_fix=false
local -a auto_fix_items=()
local -a manual_items=()
local skip_security_autofix=false
if [[ "${MOLE_SECURITY_FIXES_SHOWN:-}" == "true" ]]; then
skip_security_autofix=true
fi
# Security suggestions
if [[ "$skip_security_autofix" == "false" && -n "${FIREWALL_DISABLED:-}" && "${FIREWALL_DISABLED}" == "true" ]]; then
auto_fix_items+=("Enable Firewall for better security")
has_suggestions=true
can_auto_fix=true
fi
if [[ -n "${FILEVAULT_DISABLED:-}" && "${FILEVAULT_DISABLED}" == "true" ]]; then
manual_items+=("Enable FileVault|System Settings → Privacy & Security → FileVault")
has_suggestions=true
fi
# Configuration suggestions
if [[ "$skip_security_autofix" == "false" && -n "${TOUCHID_NOT_CONFIGURED:-}" && "${TOUCHID_NOT_CONFIGURED}" == "true" ]]; then
auto_fix_items+=("Enable Touch ID for sudo")
has_suggestions=true
can_auto_fix=true
fi
if [[ -n "${ROSETTA_NOT_INSTALLED:-}" && "${ROSETTA_NOT_INSTALLED}" == "true" ]]; then
auto_fix_items+=("Install Rosetta 2 for Intel app support")
has_suggestions=true
can_auto_fix=true
fi
# Health suggestions
if [[ -n "${CACHE_SIZE_GB:-}" ]]; then
local cache_gb="${CACHE_SIZE_GB:-0}"
if (($(echo "$cache_gb > 5" | bc -l 2> /dev/null || echo 0))); then
manual_items+=("Free up ${cache_gb}GB by cleaning caches|Run: mo clean")
has_suggestions=true
fi
fi
if [[ -n "${BREW_HAS_WARNINGS:-}" && "${BREW_HAS_WARNINGS}" == "true" ]]; then
manual_items+=("Fix Homebrew warnings|Run: brew doctor to see details")
has_suggestions=true
fi
if [[ -n "${DISK_FREE_GB:-}" && "${DISK_FREE_GB:-0}" -lt 50 ]]; then
if [[ -z "${CACHE_SIZE_GB:-}" ]] || (($(echo "${CACHE_SIZE_GB:-0} <= 5" | bc -l 2> /dev/null || echo 1))); then
manual_items+=("Low disk space (${DISK_FREE_GB}GB free)|Run: mo analyze to find large files")
has_suggestions=true
fi
fi
# Display suggestions
echo -e "${BLUE}${ICON_ARROW}${NC} Suggestions"
if [[ "$has_suggestions" == "false" ]]; then
echo -e " ${GREEN}${NC} All looks good"
export HAS_AUTO_FIX_SUGGESTIONS="false"
return
fi
# Show auto-fix items
if [[ ${#auto_fix_items[@]} -gt 0 ]]; then
for item in "${auto_fix_items[@]}"; do
echo -e " ${YELLOW}${ICON_WARNING}${NC} ${item} ${GREEN}[auto]${NC}"
done
fi
# Show manual items
if [[ ${#manual_items[@]} -gt 0 ]]; then
for item in "${manual_items[@]}"; do
local title="${item%%|*}"
local hint="${item#*|}"
echo -e " ${YELLOW}${ICON_WARNING}${NC} ${title}"
echo -e " ${GRAY}${hint}${NC}"
done
fi
# Export for use in auto-fix
export HAS_AUTO_FIX_SUGGESTIONS="$can_auto_fix"
}
# Ask user if they want to auto-fix
# Returns: 0 if yes, 1 if no
ask_for_auto_fix() {
if [[ "${HAS_AUTO_FIX_SUGGESTIONS:-false}" != "true" ]]; then
return 1
fi
echo -ne "${PURPLE}${ICON_ARROW}${NC} Auto-fix issues now? ${GRAY}Enter confirm / Space cancel${NC}: "
local key
if ! key=$(read_key); then
echo "no"
echo ""
return 1
fi
if [[ "$key" == "ENTER" ]]; then
echo "yes"
echo ""
return 0
else
echo "no"
echo ""
return 1
fi
}
# Perform auto-fixes
# Returns: number of fixes applied
perform_auto_fix() {
local fixed_count=0
local -a fixed_items=()
# Ensure sudo access
if ! has_sudo_session; then
if ! ensure_sudo_session "System fixes require admin access"; then
echo -e "${YELLOW}Skipping auto fixes (admin authentication required)${NC}"
echo ""
return 0
fi
fi
# Fix Firewall
if [[ -n "${FIREWALL_DISABLED:-}" && "${FIREWALL_DISABLED}" == "true" ]]; then
echo -e "${BLUE}Enabling Firewall...${NC}"
if sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate on > /dev/null 2>&1; then
echo -e "${GREEN}${NC} Firewall enabled"
((fixed_count++))
fixed_items+=("Firewall enabled")
else
echo -e "${RED}${NC} Failed to enable Firewall"
fi
echo ""
fi
# Fix Touch ID
if [[ -n "${TOUCHID_NOT_CONFIGURED:-}" && "${TOUCHID_NOT_CONFIGURED}" == "true" ]]; then
echo -e "${BLUE}${ICON_ARROW}${NC} Configuring Touch ID for sudo..."
local pam_file="/etc/pam.d/sudo"
if sudo bash -c "grep -q 'pam_tid.so' '$pam_file' 2>/dev/null || sed -i '' '2i\\
auth sufficient pam_tid.so
' '$pam_file'" 2> /dev/null; then
echo -e "${GREEN}${NC} Touch ID configured"
((fixed_count++))
fixed_items+=("Touch ID configured for sudo")
else
echo -e "${RED}${NC} Failed to configure Touch ID"
fi
echo ""
fi
# Install Rosetta 2
if [[ -n "${ROSETTA_NOT_INSTALLED:-}" && "${ROSETTA_NOT_INSTALLED}" == "true" ]]; then
echo -e "${BLUE}Installing Rosetta 2...${NC}"
if sudo softwareupdate --install-rosetta --agree-to-license 2>&1 | grep -qE "(Installing|Installed|already installed)"; then
echo -e "${GREEN}${NC} Rosetta 2 installed"
((fixed_count++))
fixed_items+=("Rosetta 2 installed")
else
echo -e "${RED}${NC} Failed to install Rosetta 2"
fi
echo ""
fi
if [[ $fixed_count -gt 0 ]]; then
AUTO_FIX_SUMMARY="Auto fixes applied: ${fixed_count} issue(s)"
if [[ ${#fixed_items[@]} -gt 0 ]]; then
AUTO_FIX_DETAILS=$(printf '%s\n' "${fixed_items[@]}")
else
AUTO_FIX_DETAILS=""
fi
else
AUTO_FIX_SUMMARY="Auto fixes skipped: No changes were required"
AUTO_FIX_DETAILS=""
fi
export AUTO_FIX_SUMMARY AUTO_FIX_DETAILS
return 0
}

View File

@@ -1,117 +0,0 @@
#!/bin/bash
# Purge paths management functionality
# Opens config file for editing and shows current status
set -euo pipefail
# Get script directory and source dependencies
_MOLE_MANAGE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$_MOLE_MANAGE_DIR/../core/common.sh"
# Only source project.sh if not already loaded (has readonly vars)
if [[ -z "${PURGE_TARGETS:-}" ]]; then
source "$_MOLE_MANAGE_DIR/../clean/project.sh"
fi
# Config file path (use :- to avoid re-declaration if already set)
PURGE_PATHS_CONFIG="${PURGE_PATHS_CONFIG:-$HOME/.config/mole/purge_paths}"
# Ensure config file exists with helpful template
ensure_config_template() {
if [[ ! -f "$PURGE_PATHS_CONFIG" ]]; then
ensure_user_dir "$(dirname "$PURGE_PATHS_CONFIG")"
cat > "$PURGE_PATHS_CONFIG" << 'EOF'
# Mole Purge Paths - Directories to scan for project artifacts
# Add one path per line (supports ~ for home directory)
# Delete all paths or this file to use defaults
#
# Example:
# ~/Documents/MyProjects
# ~/Work/ClientA
# ~/Work/ClientB
EOF
fi
}
# Main management function
manage_purge_paths() {
ensure_config_template
local display_config="${PURGE_PATHS_CONFIG/#$HOME/~}"
# Clear screen
if [[ -t 1 ]]; then
printf '\033[2J\033[H'
fi
echo -e "${PURPLE_BOLD}Purge Paths Configuration${NC}"
echo ""
# Show current status
echo -e "${YELLOW}Current Scan Paths:${NC}"
# Reload config
load_purge_config
if [[ ${#PURGE_SEARCH_PATHS[@]} -gt 0 ]]; then
for path in "${PURGE_SEARCH_PATHS[@]}"; do
local display_path="${path/#$HOME/~}"
if [[ -d "$path" ]]; then
echo -e " ${GREEN}${NC} $display_path"
else
echo -e " ${GRAY}${NC} $display_path ${GRAY}(not found)${NC}"
fi
done
fi
# Check if using custom config
local custom_count=0
if [[ -f "$PURGE_PATHS_CONFIG" ]]; then
while IFS= read -r line; do
line="${line#"${line%%[![:space:]]*}"}"
line="${line%"${line##*[![:space:]]}"}"
[[ -z "$line" || "$line" =~ ^# ]] && continue
((custom_count++))
done < "$PURGE_PATHS_CONFIG"
fi
echo ""
if [[ $custom_count -gt 0 ]]; then
echo -e "${GRAY}Using custom config with $custom_count path(s)${NC}"
else
echo -e "${GRAY}Using ${#DEFAULT_PURGE_SEARCH_PATHS[@]} default paths${NC}"
fi
echo ""
echo -e "${YELLOW}Default Paths:${NC}"
for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do
echo -e " ${GRAY}-${NC} ${path/#$HOME/~}"
done
echo ""
echo -e "${YELLOW}Config File:${NC} $display_config"
echo ""
# Open in editor
local editor="${EDITOR:-${VISUAL:-vim}}"
echo -e "Opening in ${CYAN}$editor${NC}..."
echo -e "${GRAY}Save and exit to apply changes. Leave empty to use defaults.${NC}"
echo ""
# Wait for user to read
sleep 1
# Open editor
"$editor" "$PURGE_PATHS_CONFIG"
# Reload and show updated status
load_purge_config
echo ""
echo -e "${GREEN}${ICON_SUCCESS}${NC} Configuration updated"
echo -e "${GRAY}Run 'mo purge' to clean with new paths${NC}"
echo ""
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
manage_purge_paths
fi

View File

@@ -1,141 +0,0 @@
#!/bin/bash
# Update Manager
# Unified update execution for all update types
set -euo pipefail
# Format Homebrew update label for display
format_brew_update_label() {
local total="${BREW_OUTDATED_COUNT:-0}"
if [[ -z "$total" || "$total" -le 0 ]]; then
return
fi
local -a details=()
local formulas="${BREW_FORMULA_OUTDATED_COUNT:-0}"
local casks="${BREW_CASK_OUTDATED_COUNT:-0}"
((formulas > 0)) && details+=("${formulas} formula")
((casks > 0)) && details+=("${casks} cask")
local detail_str="(${total} updates)"
if ((${#details[@]} > 0)); then
detail_str="($(
IFS=', '
printf '%s' "${details[*]}"
))"
fi
printf " • Homebrew %s" "$detail_str"
}
brew_has_outdated() {
local kind="${1:-formula}"
command -v brew > /dev/null 2>&1 || return 1
if [[ "$kind" == "cask" ]]; then
brew outdated --cask --quiet 2> /dev/null | grep -q .
else
brew outdated --quiet 2> /dev/null | grep -q .
fi
}
# Ask user if they want to update
# Returns: 0 if yes, 1 if no
ask_for_updates() {
local has_updates=false
local -a update_list=()
local brew_entry
brew_entry=$(format_brew_update_label || true)
if [[ -n "$brew_entry" ]]; then
has_updates=true
update_list+=("$brew_entry")
fi
if [[ -n "${APPSTORE_UPDATE_COUNT:-}" && "${APPSTORE_UPDATE_COUNT:-0}" -gt 0 ]]; then
has_updates=true
update_list+=(" • App Store (${APPSTORE_UPDATE_COUNT} apps)")
fi
if [[ -n "${MACOS_UPDATE_AVAILABLE:-}" && "${MACOS_UPDATE_AVAILABLE}" == "true" ]]; then
has_updates=true
update_list+=(" • macOS system")
fi
if [[ -n "${MOLE_UPDATE_AVAILABLE:-}" && "${MOLE_UPDATE_AVAILABLE}" == "true" ]]; then
has_updates=true
update_list+=(" • Mole")
fi
if [[ "$has_updates" == "false" ]]; then
return 1
fi
echo -e "${BLUE}AVAILABLE UPDATES${NC}"
for item in "${update_list[@]}"; do
echo -e "$item"
done
echo ""
# If only Mole is relevant for automation, prompt just for Mole
if [[ "${MOLE_UPDATE_AVAILABLE:-}" == "true" ]]; then
echo ""
echo -ne "${YELLOW}Update Mole now?${NC} ${GRAY}Enter confirm / ESC cancel${NC}: "
local key
if ! key=$(read_key); then
echo "skip"
echo ""
return 1
fi
if [[ "$key" == "ENTER" ]]; then
echo "yes"
echo ""
return 0
fi
fi
echo ""
echo -e "${YELLOW}💡 Run ${GREEN}brew upgrade${YELLOW} to update${NC}"
return 1
}
# Perform all pending updates
# Returns: 0 if all succeeded, 1 if some failed
perform_updates() {
# Only handle Mole updates here; Homebrew/App Store/macOS are manual (tips shown in ask_for_updates)
local updated_count=0
local total_count=0
if [[ -n "${MOLE_UPDATE_AVAILABLE:-}" && "${MOLE_UPDATE_AVAILABLE}" == "true" ]]; then
echo -e "${BLUE}Updating Mole...${NC}"
local mole_bin="${SCRIPT_DIR}/../../mole"
[[ ! -f "$mole_bin" ]] && mole_bin=$(command -v mole 2> /dev/null || echo "")
if [[ -x "$mole_bin" ]]; then
if "$mole_bin" update 2>&1 | grep -qE "(Updated|latest version)"; then
echo -e "${GREEN}${NC} Mole updated"
reset_mole_cache
((updated_count++))
else
echo -e "${RED}${NC} Mole update failed"
fi
else
echo -e "${RED}${NC} Mole executable not found"
fi
echo ""
total_count=1
fi
if [[ $total_count -eq 0 ]]; then
echo -e "${GRAY}No updates to perform${NC}"
return 0
elif [[ $updated_count -eq $total_count ]]; then
echo -e "${GREEN}All updates completed (${updated_count}/${total_count})${NC}"
return 0
else
echo -e "${RED}Update failed (${updated_count}/${total_count})${NC}"
return 1
fi
}

View File

@@ -1,430 +0,0 @@
#!/bin/bash
# Whitelist management functionality
# Shows actual files that would be deleted by dry-run
set -euo pipefail
# Get script directory and source dependencies
_MOLE_MANAGE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$_MOLE_MANAGE_DIR/../core/common.sh"
source "$_MOLE_MANAGE_DIR/../ui/menu_simple.sh"
# Config file paths
readonly WHITELIST_CONFIG_CLEAN="$HOME/.config/mole/whitelist"
readonly WHITELIST_CONFIG_OPTIMIZE="$HOME/.config/mole/whitelist_optimize"
readonly WHITELIST_CONFIG_OPTIMIZE_LEGACY="$HOME/.config/mole/whitelist_checks"
# Default whitelist patterns defined in lib/core/common.sh:
# - DEFAULT_WHITELIST_PATTERNS
# - FINDER_METADATA_SENTINEL
# Save whitelist patterns to config (defaults to "clean" for legacy callers)
save_whitelist_patterns() {
local mode="clean"
if [[ $# -gt 0 ]]; then
case "$1" in
clean | optimize)
mode="$1"
shift
;;
esac
fi
local -a patterns
patterns=("$@")
local config_file
local header_text
if [[ "$mode" == "optimize" ]]; then
config_file="$WHITELIST_CONFIG_OPTIMIZE"
header_text="# Mole Optimization Whitelist - These checks will be skipped during optimization"
else
config_file="$WHITELIST_CONFIG_CLEAN"
header_text="# Mole Whitelist - Protected paths won't be deleted\n# Default protections: Playwright browsers, HuggingFace models, Maven repo, Ollama models, Surge Mac, R renv, Finder metadata\n# Add one pattern per line to keep items safe."
fi
ensure_user_file "$config_file"
echo -e "$header_text" > "$config_file"
if [[ ${#patterns[@]} -gt 0 ]]; then
local -a unique_patterns=()
for pattern in "${patterns[@]}"; do
local duplicate="false"
if [[ ${#unique_patterns[@]} -gt 0 ]]; then
for existing in "${unique_patterns[@]}"; do
if patterns_equivalent "$pattern" "$existing"; then
duplicate="true"
break
fi
done
fi
[[ "$duplicate" == "true" ]] && continue
unique_patterns+=("$pattern")
done
if [[ ${#unique_patterns[@]} -gt 0 ]]; then
printf '\n' >> "$config_file"
for pattern in "${unique_patterns[@]}"; do
echo "$pattern" >> "$config_file"
done
fi
fi
}
# Get all cache items with their patterns
get_all_cache_items() {
# Format: "display_name|pattern|category"
cat << 'EOF'
Apple Mail cache|$HOME/Library/Caches/com.apple.mail/*|system_cache
Gradle build cache (Android Studio, Gradle projects)|$HOME/.gradle/caches/*|ide_cache
Gradle daemon processes cache|$HOME/.gradle/daemon/*|ide_cache
Xcode DerivedData (build outputs, indexes)|$HOME/Library/Developer/Xcode/DerivedData/*|ide_cache
Xcode archives (built app packages)|$HOME/Library/Developer/Xcode/Archives/*|ide_cache
Xcode internal cache files|$HOME/Library/Caches/com.apple.dt.Xcode/*|ide_cache
Xcode iOS device support symbols|$HOME/Library/Developer/Xcode/iOS DeviceSupport/*/Symbols/System/Library/Caches/*|ide_cache
Maven local repository (Java dependencies)|$HOME/.m2/repository/*|ide_cache
JetBrains IDEs data (IntelliJ, PyCharm, WebStorm, GoLand)|$HOME/Library/Application Support/JetBrains/*|ide_cache
JetBrains IDEs cache|$HOME/Library/Caches/JetBrains/*|ide_cache
Android Studio cache and indexes|$HOME/Library/Caches/Google/AndroidStudio*/*|ide_cache
Android build cache|$HOME/.android/build-cache/*|ide_cache
VS Code runtime cache|$HOME/Library/Application Support/Code/Cache/*|ide_cache
VS Code extension and update cache|$HOME/Library/Application Support/Code/CachedData/*|ide_cache
VS Code system cache (Cursor, VSCodium)|$HOME/Library/Caches/com.microsoft.VSCode/*|ide_cache
Cursor editor cache|$HOME/Library/Caches/com.todesktop.230313mzl4w4u92/*|ide_cache
Bazel build cache|$HOME/.cache/bazel/*|compiler_cache
Go build cache and module cache|$HOME/Library/Caches/go-build/*|compiler_cache
Go module cache|$HOME/go/pkg/mod/cache/*|compiler_cache
Rust Cargo registry cache|$HOME/.cargo/registry/cache/*|compiler_cache
Rust documentation cache|$HOME/.rustup/toolchains/*/share/doc/*|compiler_cache
Rustup toolchain downloads|$HOME/.rustup/downloads/*|compiler_cache
ccache compiler cache|$HOME/.ccache/*|compiler_cache
sccache distributed compiler cache|$HOME/.cache/sccache/*|compiler_cache
SBT Scala build cache|$HOME/.sbt/*|compiler_cache
Ivy dependency cache|$HOME/.ivy2/cache/*|compiler_cache
Turbo monorepo build cache|$HOME/.turbo/*|compiler_cache
Next.js build cache|$HOME/.next/*|compiler_cache
Vite build cache|$HOME/.vite/*|compiler_cache
Parcel bundler cache|$HOME/.parcel-cache/*|compiler_cache
pre-commit hooks cache|$HOME/.cache/pre-commit/*|compiler_cache
Ruff Python linter cache|$HOME/.cache/ruff/*|compiler_cache
MyPy type checker cache|$HOME/.cache/mypy/*|compiler_cache
Pytest test cache|$HOME/.pytest_cache/*|compiler_cache
Flutter SDK cache|$HOME/.cache/flutter/*|compiler_cache
Swift Package Manager cache|$HOME/.cache/swift-package-manager/*|compiler_cache
Zig compiler cache|$HOME/.cache/zig/*|compiler_cache
Deno cache|$HOME/Library/Caches/deno/*|compiler_cache
CocoaPods cache (iOS dependencies)|$HOME/Library/Caches/CocoaPods/*|package_manager
npm package cache|$HOME/.npm/_cacache/*|package_manager
pip Python package cache|$HOME/.cache/pip/*|package_manager
uv Python package cache|$HOME/.cache/uv/*|package_manager
R renv global cache (virtual environments)|$HOME/Library/Caches/org.R-project.R/R/renv/*|package_manager
Homebrew downloaded packages|$HOME/Library/Caches/Homebrew/*|package_manager
Yarn package manager cache|$HOME/.cache/yarn/*|package_manager
pnpm package store|$HOME/.pnpm-store/*|package_manager
Composer PHP dependencies cache|$HOME/.composer/cache/*|package_manager
RubyGems cache|$HOME/.gem/cache/*|package_manager
Conda packages cache|$HOME/.conda/pkgs/*|package_manager
Anaconda packages cache|$HOME/anaconda3/pkgs/*|package_manager
PyTorch model cache|$HOME/.cache/torch/*|ai_ml_cache
TensorFlow model and dataset cache|$HOME/.cache/tensorflow/*|ai_ml_cache
HuggingFace models and datasets|$HOME/.cache/huggingface/*|ai_ml_cache
Playwright browser binaries|$HOME/Library/Caches/ms-playwright*|ai_ml_cache
Selenium WebDriver binaries|$HOME/.cache/selenium/*|ai_ml_cache
Ollama local AI models|$HOME/.ollama/models/*|ai_ml_cache
Weights & Biases ML experiments cache|$HOME/.cache/wandb/*|ai_ml_cache
Safari web browser cache|$HOME/Library/Caches/com.apple.Safari/*|browser_cache
Chrome browser cache|$HOME/Library/Caches/Google/Chrome/*|browser_cache
Firefox browser cache|$HOME/Library/Caches/Firefox/*|browser_cache
Brave browser cache|$HOME/Library/Caches/BraveSoftware/Brave-Browser/*|browser_cache
Surge proxy cache|$HOME/Library/Caches/com.nssurge.surge-mac/*|network_tools
Surge configuration and data|$HOME/Library/Application Support/com.nssurge.surge-mac/*|network_tools
Docker Desktop image cache|$HOME/Library/Containers/com.docker.docker/Data/*|container_cache
Podman container cache|$HOME/.local/share/containers/cache/*|container_cache
Font cache|$HOME/Library/Caches/com.apple.FontRegistry/*|system_cache
Spotlight metadata cache|$HOME/Library/Caches/com.apple.spotlight/*|system_cache
CloudKit cache|$HOME/Library/Caches/CloudKit/*|system_cache
Trash|$HOME/.Trash|system_cache
EOF
# Add FINDER_METADATA with constant reference
echo "Finder metadata (.DS_Store)|$FINDER_METADATA_SENTINEL|system_cache"
}
# Get all optimize items with their patterns
get_optimize_whitelist_items() {
# Format: "display_name|pattern|category"
cat << 'EOF'
macOS Firewall check|firewall|security_check
Gatekeeper check|gatekeeper|security_check
macOS system updates check|check_macos_updates|update_check
Mole updates check|check_mole_update|update_check
Homebrew health check (doctor)|check_brew_health|health_check
SIP status check|check_sip|security_check
FileVault status check|check_filevault|security_check
TouchID sudo check|check_touchid|config_check
Rosetta 2 check|check_rosetta|config_check
Git configuration check|check_git_config|config_check
Login items check|check_login_items|config_check
EOF
}
patterns_equivalent() {
local first="${1/#~/$HOME}"
local second="${2/#~/$HOME}"
# Only exact string match, no glob expansion
[[ "$first" == "$second" ]] && return 0
return 1
}
load_whitelist() {
local mode="${1:-clean}"
local -a patterns=()
local config_file
local legacy_file=""
if [[ "$mode" == "optimize" ]]; then
config_file="$WHITELIST_CONFIG_OPTIMIZE"
legacy_file="$WHITELIST_CONFIG_OPTIMIZE_LEGACY"
else
config_file="$WHITELIST_CONFIG_CLEAN"
fi
local using_legacy="false"
if [[ ! -f "$config_file" && -n "$legacy_file" && -f "$legacy_file" ]]; then
config_file="$legacy_file"
using_legacy="true"
fi
if [[ -f "$config_file" ]]; then
while IFS= read -r line; do
# shellcheck disable=SC2295
line="${line#"${line%%[![:space:]]*}"}"
# shellcheck disable=SC2295
line="${line%"${line##*[![:space:]]}"}"
[[ -z "$line" || "$line" =~ ^# ]] && continue
patterns+=("$line")
done < "$config_file"
else
if [[ "$mode" == "clean" ]]; then
patterns=("${DEFAULT_WHITELIST_PATTERNS[@]}")
elif [[ "$mode" == "optimize" ]]; then
patterns=("${DEFAULT_OPTIMIZE_WHITELIST_PATTERNS[@]}")
fi
fi
if [[ ${#patterns[@]} -gt 0 ]]; then
local -a unique_patterns=()
for pattern in "${patterns[@]}"; do
local duplicate="false"
if [[ ${#unique_patterns[@]} -gt 0 ]]; then
for existing in "${unique_patterns[@]}"; do
if patterns_equivalent "$pattern" "$existing"; then
duplicate="true"
break
fi
done
fi
[[ "$duplicate" == "true" ]] && continue
unique_patterns+=("$pattern")
done
CURRENT_WHITELIST_PATTERNS=("${unique_patterns[@]}")
# Migrate legacy optimize config to the new path automatically
if [[ "$mode" == "optimize" && "$using_legacy" == "true" && "$config_file" != "$WHITELIST_CONFIG_OPTIMIZE" ]]; then
save_whitelist_patterns "$mode" "${CURRENT_WHITELIST_PATTERNS[@]}"
fi
else
CURRENT_WHITELIST_PATTERNS=()
fi
}
is_whitelisted() {
local pattern="$1"
local check_pattern="${pattern/#\~/$HOME}"
if [[ ${#CURRENT_WHITELIST_PATTERNS[@]} -eq 0 ]]; then
return 1
fi
for existing in "${CURRENT_WHITELIST_PATTERNS[@]}"; do
local existing_expanded="${existing/#\~/$HOME}"
# Only use exact string match to prevent glob expansion security issues
if [[ "$check_pattern" == "$existing_expanded" ]]; then
return 0
fi
done
return 1
}
manage_whitelist() {
local mode="${1:-clean}"
manage_whitelist_categories "$mode"
}
manage_whitelist_categories() {
local mode="$1"
# Load currently enabled patterns from both sources
load_whitelist "$mode"
# Build cache items list
local -a cache_items=()
local -a cache_patterns=()
local -a menu_options=()
local index=0
# Choose source based on mode
local items_source
local menu_title
local active_config_file
if [[ "$mode" == "optimize" ]]; then
items_source=$(get_optimize_whitelist_items)
active_config_file="$WHITELIST_CONFIG_OPTIMIZE"
local display_config="${active_config_file/#$HOME/~}"
menu_title="Whitelist Manager Select system checks to ignore
${GRAY}Edit: ${display_config}${NC}"
else
items_source=$(get_all_cache_items)
active_config_file="$WHITELIST_CONFIG_CLEAN"
local display_config="${active_config_file/#$HOME/~}"
menu_title="Whitelist Manager Select caches to protect
${GRAY}Edit: ${display_config}${NC}"
fi
while IFS='|' read -r display_name pattern _; do
# Expand $HOME in pattern
pattern="${pattern/\$HOME/$HOME}"
cache_items+=("$display_name")
cache_patterns+=("$pattern")
menu_options+=("$display_name")
((index++)) || true
done <<< "$items_source"
# Identify custom patterns (not in predefined list)
local -a custom_patterns=()
if [[ ${#CURRENT_WHITELIST_PATTERNS[@]} -gt 0 ]]; then
for current_pattern in "${CURRENT_WHITELIST_PATTERNS[@]}"; do
local is_predefined=false
for predefined_pattern in "${cache_patterns[@]}"; do
if patterns_equivalent "$current_pattern" "$predefined_pattern"; then
is_predefined=true
break
fi
done
if [[ "$is_predefined" == "false" ]]; then
custom_patterns+=("$current_pattern")
fi
done
fi
# Prioritize already-selected items to appear first
local -a selected_cache_items=()
local -a selected_cache_patterns=()
local -a selected_menu_options=()
local -a remaining_cache_items=()
local -a remaining_cache_patterns=()
local -a remaining_menu_options=()
for ((i = 0; i < ${#cache_patterns[@]}; i++)); do
if is_whitelisted "${cache_patterns[i]}"; then
selected_cache_items+=("${cache_items[i]}")
selected_cache_patterns+=("${cache_patterns[i]}")
selected_menu_options+=("${menu_options[i]}")
else
remaining_cache_items+=("${cache_items[i]}")
remaining_cache_patterns+=("${cache_patterns[i]}")
remaining_menu_options+=("${menu_options[i]}")
fi
done
cache_items=()
cache_patterns=()
menu_options=()
if [[ ${#selected_cache_items[@]} -gt 0 ]]; then
cache_items=("${selected_cache_items[@]}")
cache_patterns=("${selected_cache_patterns[@]}")
menu_options=("${selected_menu_options[@]}")
fi
if [[ ${#remaining_cache_items[@]} -gt 0 ]]; then
cache_items+=("${remaining_cache_items[@]}")
cache_patterns+=("${remaining_cache_patterns[@]}")
menu_options+=("${remaining_menu_options[@]}")
fi
if [[ ${#selected_cache_patterns[@]} -gt 0 ]]; then
local -a preselected_indices=()
for ((i = 0; i < ${#selected_cache_patterns[@]}; i++)); do
preselected_indices+=("$i")
done
local IFS=','
export MOLE_PRESELECTED_INDICES="${preselected_indices[*]}"
else
unset MOLE_PRESELECTED_INDICES
fi
MOLE_SELECTION_RESULT=""
paginated_multi_select "$menu_title" "${menu_options[@]}"
unset MOLE_PRESELECTED_INDICES
local exit_code=$?
# Normal exit or cancel
if [[ $exit_code -ne 0 ]]; then
return 1
fi
# Convert selected indices to patterns
local -a selected_patterns=()
if [[ -n "$MOLE_SELECTION_RESULT" ]]; then
local -a selected_indices
IFS=',' read -ra selected_indices <<< "$MOLE_SELECTION_RESULT"
for idx in "${selected_indices[@]}"; do
if [[ $idx -ge 0 && $idx -lt ${#cache_patterns[@]} ]]; then
local pattern="${cache_patterns[$idx]}"
# Convert back to portable format with ~
pattern="${pattern/#$HOME/~}"
selected_patterns+=("$pattern")
fi
done
fi
# Merge custom patterns with selected patterns
local -a all_patterns=()
if [[ ${#selected_patterns[@]} -gt 0 ]]; then
all_patterns=("${selected_patterns[@]}")
fi
if [[ ${#custom_patterns[@]} -gt 0 ]]; then
for custom_pattern in "${custom_patterns[@]}"; do
all_patterns+=("$custom_pattern")
done
fi
# Save to whitelist config (bash 3.2 + set -u safe)
if [[ ${#all_patterns[@]} -gt 0 ]]; then
save_whitelist_patterns "$mode" "${all_patterns[@]}"
else
save_whitelist_patterns "$mode"
fi
local total_protected=$((${#selected_patterns[@]} + ${#custom_patterns[@]}))
local -a summary_lines=()
summary_lines+=("Whitelist Updated")
if [[ ${#custom_patterns[@]} -gt 0 ]]; then
summary_lines+=("Protected ${#selected_patterns[@]} predefined + ${#custom_patterns[@]} custom patterns")
else
summary_lines+=("Protected ${total_protected} cache(s)")
fi
local display_config="${active_config_file/#$HOME/~}"
summary_lines+=("Config: ${GRAY}${display_config}${NC}")
print_summary_block "${summary_lines[@]}"
printf '\n'
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
manage_whitelist
fi

View File

@@ -1,53 +0,0 @@
#!/bin/bash
# System Configuration Maintenance Module.
# Fix broken preferences and login items.
set -euo pipefail
# Remove corrupted preference files.
fix_broken_preferences() {
local prefs_dir="$HOME/Library/Preferences"
[[ -d "$prefs_dir" ]] || return 0
local broken_count=0
while IFS= read -r plist_file; do
[[ -f "$plist_file" ]] || continue
local filename
filename=$(basename "$plist_file")
case "$filename" in
com.apple.* | .GlobalPreferences* | loginwindow.plist)
continue
;;
esac
plutil -lint "$plist_file" > /dev/null 2>&1 && continue
safe_remove "$plist_file" true > /dev/null 2>&1 || true
((broken_count++))
done < <(command find "$prefs_dir" -maxdepth 1 -name "*.plist" -type f 2> /dev/null || true)
# Check ByHost preferences.
local byhost_dir="$prefs_dir/ByHost"
if [[ -d "$byhost_dir" ]]; then
while IFS= read -r plist_file; do
[[ -f "$plist_file" ]] || continue
local filename
filename=$(basename "$plist_file")
case "$filename" in
com.apple.* | .GlobalPreferences*)
continue
;;
esac
plutil -lint "$plist_file" > /dev/null 2>&1 && continue
safe_remove "$plist_file" true > /dev/null 2>&1 || true
((broken_count++))
done < <(command find "$byhost_dir" -name "*.plist" -type f 2> /dev/null || true)
fi
echo "$broken_count"
}

View File

@@ -1,779 +0,0 @@
#!/bin/bash
# Optimization Tasks
set -euo pipefail
# Config constants (override via env).
readonly MOLE_TM_THIN_TIMEOUT=180
readonly MOLE_TM_THIN_VALUE=9999999999
readonly MOLE_SQLITE_MAX_SIZE=104857600 # 100MB
# Dry-run aware output.
opt_msg() {
local message="$1"
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $message"
else
echo -e " ${GREEN}${NC} $message"
fi
}
run_launchctl_unload() {
local plist_file="$1"
local need_sudo="${2:-false}"
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
return 0
fi
if [[ "$need_sudo" == "true" ]]; then
sudo launchctl unload "$plist_file" 2> /dev/null || true
else
launchctl unload "$plist_file" 2> /dev/null || true
fi
}
needs_permissions_repair() {
local owner
owner=$(stat -f %Su "$HOME" 2> /dev/null || echo "")
if [[ -n "$owner" && "$owner" != "$USER" ]]; then
return 0
fi
local -a paths=(
"$HOME"
"$HOME/Library"
"$HOME/Library/Preferences"
)
local path
for path in "${paths[@]}"; do
if [[ -e "$path" && ! -w "$path" ]]; then
return 0
fi
done
return 1
}
has_bluetooth_hid_connected() {
local bt_report
bt_report=$(system_profiler SPBluetoothDataType 2> /dev/null || echo "")
if ! echo "$bt_report" | grep -q "Connected: Yes"; then
return 1
fi
if echo "$bt_report" | grep -Eiq "Keyboard|Trackpad|Mouse|HID"; then
return 0
fi
return 1
}
is_ac_power() {
pmset -g batt 2> /dev/null | grep -q "AC Power"
}
is_memory_pressure_high() {
if ! command -v memory_pressure > /dev/null 2>&1; then
return 1
fi
local mp_output
mp_output=$(memory_pressure -Q 2> /dev/null || echo "")
if echo "$mp_output" | grep -Eiq "warning|critical"; then
return 0
fi
return 1
}
flush_dns_cache() {
if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then
MOLE_DNS_FLUSHED=1
return 0
fi
if sudo dscacheutil -flushcache 2> /dev/null && sudo killall -HUP mDNSResponder 2> /dev/null; then
MOLE_DNS_FLUSHED=1
return 0
fi
return 1
}
# Basic system maintenance.
opt_system_maintenance() {
if flush_dns_cache; then
opt_msg "DNS cache flushed"
fi
local spotlight_status
spotlight_status=$(mdutil -s / 2> /dev/null || echo "")
if echo "$spotlight_status" | grep -qi "Indexing disabled"; then
echo -e " ${GRAY}${ICON_EMPTY}${NC} Spotlight indexing disabled"
else
opt_msg "Spotlight index verified"
fi
}
# Refresh Finder caches (QuickLook/icon services).
opt_cache_refresh() {
local total_cache_size=0
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "Finder Cache Refresh" "Refresh QuickLook thumbnails and icon services"
debug_operation_detail "Method" "Remove cache files and rebuild via qlmanage"
debug_operation_detail "Expected outcome" "Faster Finder preview generation, fixed icon display issues"
debug_risk_level "LOW" "Caches are automatically rebuilt"
local -a cache_targets=(
"$HOME/Library/Caches/com.apple.QuickLook.thumbnailcache"
"$HOME/Library/Caches/com.apple.iconservices.store"
"$HOME/Library/Caches/com.apple.iconservices"
)
debug_operation_detail "Files to be removed" ""
for target_path in "${cache_targets[@]}"; do
if [[ -e "$target_path" ]]; then
local size_kb
size_kb=$(get_path_size_kb "$target_path" 2> /dev/null || echo "0")
local size_human="unknown"
if [[ "$size_kb" -gt 0 ]]; then
size_human=$(bytes_to_human "$((size_kb * 1024))")
fi
debug_file_action " Will remove" "$target_path" "$size_human" ""
fi
done
fi
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
qlmanage -r cache > /dev/null 2>&1 || true
qlmanage -r > /dev/null 2>&1 || true
fi
local -a cache_targets=(
"$HOME/Library/Caches/com.apple.QuickLook.thumbnailcache"
"$HOME/Library/Caches/com.apple.iconservices.store"
"$HOME/Library/Caches/com.apple.iconservices"
)
for target_path in "${cache_targets[@]}"; do
if [[ -e "$target_path" ]]; then
if ! should_protect_path "$target_path"; then
local size_kb
size_kb=$(get_path_size_kb "$target_path" 2> /dev/null || echo "0")
if [[ "$size_kb" =~ ^[0-9]+$ ]]; then
total_cache_size=$((total_cache_size + size_kb))
fi
safe_remove "$target_path" true > /dev/null 2>&1
fi
fi
done
export OPTIMIZE_CACHE_CLEANED_KB="${total_cache_size}"
opt_msg "QuickLook thumbnails refreshed"
opt_msg "Icon services cache rebuilt"
}
# Removed: opt_maintenance_scripts - macOS handles log rotation automatically via launchd
# Removed: opt_radio_refresh - Interrupts active user connections (WiFi, Bluetooth), degrading UX
# Old saved states cleanup.
opt_saved_state_cleanup() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "App Saved State Cleanup" "Remove old application saved states"
debug_operation_detail "Method" "Find and remove .savedState folders older than $MOLE_SAVED_STATE_AGE_DAYS days"
debug_operation_detail "Location" "$HOME/Library/Saved Application State"
debug_operation_detail "Expected outcome" "Reduced disk usage, apps start with clean state"
debug_risk_level "LOW" "Old saved states, apps will create new ones"
fi
local state_dir="$HOME/Library/Saved Application State"
if [[ -d "$state_dir" ]]; then
while IFS= read -r -d '' state_path; do
if should_protect_path "$state_path"; then
continue
fi
safe_remove "$state_path" true > /dev/null 2>&1
done < <(command find "$state_dir" -type d -name "*.savedState" -mtime "+$MOLE_SAVED_STATE_AGE_DAYS" -print0 2> /dev/null)
fi
opt_msg "App saved states optimized"
}
# Removed: opt_swap_cleanup - Direct virtual memory operations pose system crash risk
# Removed: opt_startup_cache - Modern macOS has no such mechanism
# Removed: opt_local_snapshots - Deletes user Time Machine recovery points, breaks backup continuity
opt_fix_broken_configs() {
local spinner_started="false"
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking preferences..."
spinner_started="true"
fi
local broken_prefs=$(fix_broken_preferences)
if [[ "$spinner_started" == "true" ]]; then
stop_inline_spinner
fi
export OPTIMIZE_CONFIGS_REPAIRED="${broken_prefs}"
if [[ $broken_prefs -gt 0 ]]; then
opt_msg "Repaired $broken_prefs corrupted preference files"
else
opt_msg "All preference files valid"
fi
}
# DNS cache refresh.
opt_network_optimization() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "Network Optimization" "Refresh DNS cache and restart mDNSResponder"
debug_operation_detail "Method" "Flush DNS cache via dscacheutil and killall mDNSResponder"
debug_operation_detail "Expected outcome" "Faster DNS resolution, fixed network connectivity issues"
debug_risk_level "LOW" "DNS cache is automatically rebuilt"
fi
if [[ "${MOLE_DNS_FLUSHED:-0}" == "1" ]]; then
opt_msg "DNS cache already refreshed"
opt_msg "mDNSResponder already restarted"
return 0
fi
if flush_dns_cache; then
opt_msg "DNS cache refreshed"
opt_msg "mDNSResponder restarted"
else
echo -e " ${YELLOW}!${NC} Failed to refresh DNS cache"
fi
}
# SQLite vacuum for Mail/Messages/Safari (safety checks applied).
opt_sqlite_vacuum() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "Database Optimization" "Vacuum SQLite databases for Mail, Safari, and Messages"
debug_operation_detail "Method" "Run VACUUM command on databases after integrity check"
debug_operation_detail "Safety checks" "Skip if apps are running, verify integrity first, 20s timeout"
debug_operation_detail "Expected outcome" "Reduced database size, faster app performance"
debug_risk_level "LOW" "Only optimizes databases, does not delete data"
fi
if ! command -v sqlite3 > /dev/null 2>&1; then
echo -e " ${GRAY}-${NC} Database optimization already optimal (sqlite3 unavailable)"
return 0
fi
local -a busy_apps=()
local -a check_apps=("Mail" "Safari" "Messages")
local app
for app in "${check_apps[@]}"; do
if pgrep -x "$app" > /dev/null 2>&1; then
busy_apps+=("$app")
fi
done
if [[ ${#busy_apps[@]} -gt 0 ]]; then
echo -e " ${YELLOW}!${NC} Close these apps before database optimization: ${busy_apps[*]}"
return 0
fi
local spinner_started="false"
if [[ "${MOLE_DRY_RUN:-0}" != "1" && -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Optimizing databases..."
spinner_started="true"
fi
local -a db_paths=(
"$HOME/Library/Mail/V*/MailData/Envelope Index*"
"$HOME/Library/Messages/chat.db"
"$HOME/Library/Safari/History.db"
"$HOME/Library/Safari/TopSites.db"
)
local vacuumed=0
local timed_out=0
local failed=0
local skipped=0
for pattern in "${db_paths[@]}"; do
while IFS= read -r db_file; do
[[ ! -f "$db_file" ]] && continue
[[ "$db_file" == *"-wal" || "$db_file" == *"-shm" ]] && continue
should_protect_path "$db_file" && continue
if ! file "$db_file" 2> /dev/null | grep -q "SQLite"; then
continue
fi
# Skip large DBs (>100MB).
local file_size
file_size=$(get_file_size "$db_file")
if [[ "$file_size" -gt "$MOLE_SQLITE_MAX_SIZE" ]]; then
((skipped++))
continue
fi
# Skip if freelist is tiny (already compact).
local page_info=""
page_info=$(run_with_timeout 5 sqlite3 "$db_file" "PRAGMA page_count; PRAGMA freelist_count;" 2> /dev/null || echo "")
local page_count=""
local freelist_count=""
page_count=$(echo "$page_info" | awk 'NR==1 {print $1}' 2> /dev/null || echo "")
freelist_count=$(echo "$page_info" | awk 'NR==2 {print $1}' 2> /dev/null || echo "")
if [[ "$page_count" =~ ^[0-9]+$ && "$freelist_count" =~ ^[0-9]+$ && "$page_count" -gt 0 ]]; then
if ((freelist_count * 100 < page_count * 5)); then
((skipped++))
continue
fi
fi
# Verify integrity before VACUUM.
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
local integrity_check=""
set +e
integrity_check=$(run_with_timeout 10 sqlite3 "$db_file" "PRAGMA integrity_check;" 2> /dev/null)
local integrity_status=$?
set -e
if [[ $integrity_status -ne 0 ]] || ! echo "$integrity_check" | grep -q "ok"; then
((skipped++))
continue
fi
fi
local exit_code=0
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
set +e
run_with_timeout 20 sqlite3 "$db_file" "VACUUM;" 2> /dev/null
exit_code=$?
set -e
if [[ $exit_code -eq 0 ]]; then
((vacuumed++))
elif [[ $exit_code -eq 124 ]]; then
((timed_out++))
else
((failed++))
fi
else
((vacuumed++))
fi
done < <(compgen -G "$pattern" || true)
done
if [[ "$spinner_started" == "true" ]]; then
stop_inline_spinner
fi
export OPTIMIZE_DATABASES_COUNT="${vacuumed}"
if [[ $vacuumed -gt 0 ]]; then
opt_msg "Optimized $vacuumed databases for Mail, Safari, Messages"
elif [[ $timed_out -eq 0 && $failed -eq 0 ]]; then
opt_msg "All databases already optimized"
else
echo -e " ${YELLOW}!${NC} Database optimization incomplete"
fi
if [[ $skipped -gt 0 ]]; then
opt_msg "Already optimal for $skipped databases"
fi
if [[ $timed_out -gt 0 ]]; then
echo -e " ${YELLOW}!${NC} Timed out on $timed_out databases"
fi
if [[ $failed -gt 0 ]]; then
echo -e " ${YELLOW}!${NC} Failed on $failed databases"
fi
}
# LaunchServices rebuild ("Open with" issues).
opt_launch_services_rebuild() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "LaunchServices Rebuild" "Rebuild LaunchServices database"
debug_operation_detail "Method" "Run lsregister -r on system, user, and local domains"
debug_operation_detail "Purpose" "Fix \"Open with\" menu issues and file associations"
debug_operation_detail "Expected outcome" "Correct app associations, fixed duplicate entries"
debug_risk_level "LOW" "Database is automatically rebuilt"
fi
if [[ -t 1 ]]; then
start_inline_spinner ""
fi
local lsregister="/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister"
if [[ -f "$lsregister" ]]; then
local success=0
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
set +e
"$lsregister" -r -domain local -domain user -domain system > /dev/null 2>&1
success=$?
if [[ $success -ne 0 ]]; then
"$lsregister" -r -domain local -domain user > /dev/null 2>&1
success=$?
fi
set -e
else
success=0
fi
if [[ -t 1 ]]; then
stop_inline_spinner
fi
if [[ $success -eq 0 ]]; then
opt_msg "LaunchServices repaired"
opt_msg "File associations refreshed"
else
echo -e " ${YELLOW}!${NC} Failed to rebuild LaunchServices"
fi
else
if [[ -t 1 ]]; then
stop_inline_spinner
fi
echo -e " ${YELLOW}!${NC} lsregister not found"
fi
}
# Font cache rebuild.
opt_font_cache_rebuild() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "Font Cache Rebuild" "Clear and rebuild font cache"
debug_operation_detail "Method" "Run atsutil databases -remove"
debug_operation_detail "Expected outcome" "Fixed font display issues, removed corrupted font cache"
debug_risk_level "LOW" "System automatically rebuilds font database"
fi
local success=false
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
if sudo atsutil databases -remove > /dev/null 2>&1; then
success=true
fi
else
success=true
fi
if [[ "$success" == "true" ]]; then
opt_msg "Font cache cleared"
opt_msg "System will rebuild font database automatically"
else
echo -e " ${YELLOW}!${NC} Failed to clear font cache"
fi
}
# Removed high-risk optimizations:
# - opt_startup_items_cleanup: Risk of deleting legitimate app helpers
# - opt_dyld_cache_update: Low benefit, time-consuming, auto-managed by macOS
# - opt_system_services_refresh: Risk of data loss when killing system services
# Memory pressure relief.
opt_memory_pressure_relief() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "Memory Pressure Relief" "Release inactive memory if pressure is high"
debug_operation_detail "Method" "Run purge command to clear inactive memory"
debug_operation_detail "Condition" "Only runs if memory pressure is warning/critical"
debug_operation_detail "Expected outcome" "More available memory, improved responsiveness"
debug_risk_level "LOW" "Safe system command, does not affect active processes"
fi
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
if ! is_memory_pressure_high; then
opt_msg "Memory pressure already optimal"
return 0
fi
if sudo purge > /dev/null 2>&1; then
opt_msg "Inactive memory released"
opt_msg "System responsiveness improved"
else
echo -e " ${YELLOW}!${NC} Failed to release memory pressure"
fi
else
opt_msg "Inactive memory released"
opt_msg "System responsiveness improved"
fi
}
# Network stack reset (route + ARP).
opt_network_stack_optimize() {
local route_flushed="false"
local arp_flushed="false"
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
local route_ok=true
local dns_ok=true
if ! route -n get default > /dev/null 2>&1; then
route_ok=false
fi
if ! dscacheutil -q host -a name "example.com" > /dev/null 2>&1; then
dns_ok=false
fi
if [[ "$route_ok" == "true" && "$dns_ok" == "true" ]]; then
opt_msg "Network stack already optimal"
return 0
fi
if sudo route -n flush > /dev/null 2>&1; then
route_flushed="true"
fi
if sudo arp -a -d > /dev/null 2>&1; then
arp_flushed="true"
fi
else
route_flushed="true"
arp_flushed="true"
fi
if [[ "$route_flushed" == "true" ]]; then
opt_msg "Network routing table refreshed"
fi
if [[ "$arp_flushed" == "true" ]]; then
opt_msg "ARP cache cleared"
else
if [[ "$route_flushed" == "true" ]]; then
return 0
fi
echo -e " ${YELLOW}!${NC} Failed to optimize network stack"
fi
}
# User directory permissions repair.
opt_disk_permissions_repair() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "Disk Permissions Repair" "Reset user directory permissions"
debug_operation_detail "Method" "Run diskutil resetUserPermissions on user home directory"
debug_operation_detail "Condition" "Only runs if permissions issues are detected"
debug_operation_detail "Expected outcome" "Fixed file access issues, correct ownership"
debug_risk_level "MEDIUM" "Requires sudo, modifies permissions"
fi
local user_id
user_id=$(id -u)
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
if ! needs_permissions_repair; then
opt_msg "User directory permissions already optimal"
return 0
fi
if [[ -t 1 ]]; then
start_inline_spinner "Repairing disk permissions..."
fi
local success=false
if sudo diskutil resetUserPermissions / "$user_id" > /dev/null 2>&1; then
success=true
fi
if [[ -t 1 ]]; then
stop_inline_spinner
fi
if [[ "$success" == "true" ]]; then
opt_msg "User directory permissions repaired"
opt_msg "File access issues resolved"
else
echo -e " ${YELLOW}!${NC} Failed to repair permissions (may not be needed)"
fi
else
opt_msg "User directory permissions repaired"
opt_msg "File access issues resolved"
fi
}
# Bluetooth reset (skip if HID/audio active).
opt_bluetooth_reset() {
if [[ "${MO_DEBUG:-}" == "1" ]]; then
debug_operation_start "Bluetooth Reset" "Restart Bluetooth daemon"
debug_operation_detail "Method" "Kill bluetoothd daemon (auto-restarts)"
debug_operation_detail "Safety" "Skips if active Bluetooth keyboard/mouse/audio detected"
debug_operation_detail "Expected outcome" "Fixed Bluetooth connectivity issues"
debug_risk_level "LOW" "Daemon auto-restarts, connections auto-reconnect"
fi
local spinner_started="false"
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking Bluetooth..."
spinner_started="true"
fi
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
if has_bluetooth_hid_connected; then
if [[ "$spinner_started" == "true" ]]; then
stop_inline_spinner
fi
opt_msg "Bluetooth already optimal"
return 0
fi
local bt_audio_active=false
local audio_info
audio_info=$(system_profiler SPAudioDataType 2> /dev/null || echo "")
local default_output
default_output=$(echo "$audio_info" | awk '/Default Output Device: Yes/,/^$/' 2> /dev/null || echo "")
if echo "$default_output" | grep -qi "Transport:.*Bluetooth"; then
bt_audio_active=true
fi
if [[ "$bt_audio_active" == "false" ]]; then
if system_profiler SPBluetoothDataType 2> /dev/null | grep -q "Connected: Yes"; then
local -a media_apps=("Music" "Spotify" "VLC" "QuickTime Player" "TV" "Podcasts" "Safari" "Google Chrome" "Chrome" "Firefox" "Arc" "IINA" "mpv")
for app in "${media_apps[@]}"; do
if pgrep -x "$app" > /dev/null 2>&1; then
bt_audio_active=true
break
fi
done
fi
fi
if [[ "$bt_audio_active" == "true" ]]; then
if [[ "$spinner_started" == "true" ]]; then
stop_inline_spinner
fi
opt_msg "Bluetooth already optimal"
return 0
fi
if sudo pkill -TERM bluetoothd > /dev/null 2>&1; then
sleep 1
if pgrep -x bluetoothd > /dev/null 2>&1; then
sudo pkill -KILL bluetoothd > /dev/null 2>&1 || true
fi
if [[ "$spinner_started" == "true" ]]; then
stop_inline_spinner
fi
opt_msg "Bluetooth module restarted"
opt_msg "Connectivity issues resolved"
else
if [[ "$spinner_started" == "true" ]]; then
stop_inline_spinner
fi
opt_msg "Bluetooth already optimal"
fi
else
if [[ "$spinner_started" == "true" ]]; then
stop_inline_spinner
fi
opt_msg "Bluetooth module restarted"
opt_msg "Connectivity issues resolved"
fi
}
# Spotlight index check/rebuild (only if slow).
opt_spotlight_index_optimize() {
local spotlight_status
spotlight_status=$(mdutil -s / 2> /dev/null || echo "")
if echo "$spotlight_status" | grep -qi "Indexing disabled"; then
echo -e " ${GRAY}${ICON_EMPTY}${NC} Spotlight indexing is disabled"
return 0
fi
if echo "$spotlight_status" | grep -qi "Indexing enabled" && ! echo "$spotlight_status" | grep -qi "Indexing and searching disabled"; then
local slow_count=0
local test_start test_end test_duration
for _ in 1 2; do
test_start=$(get_epoch_seconds)
mdfind "kMDItemFSName == 'Applications'" > /dev/null 2>&1 || true
test_end=$(get_epoch_seconds)
test_duration=$((test_end - test_start))
if [[ $test_duration -gt 3 ]]; then
((slow_count++))
fi
sleep 1
done
if [[ $slow_count -ge 2 ]]; then
if ! is_ac_power; then
opt_msg "Spotlight index already optimal"
return 0
fi
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
echo -e " ${BLUE}${NC} Spotlight search is slow, rebuilding index (may take 1-2 hours)"
if sudo mdutil -E / > /dev/null 2>&1; then
opt_msg "Spotlight index rebuild started"
echo -e " ${GRAY}Indexing will continue in background${NC}"
else
echo -e " ${YELLOW}!${NC} Failed to rebuild Spotlight index"
fi
else
opt_msg "Spotlight index rebuild started"
fi
else
opt_msg "Spotlight index already optimal"
fi
else
opt_msg "Spotlight index verified"
fi
}
# Dock cache refresh.
opt_dock_refresh() {
local dock_support="$HOME/Library/Application Support/Dock"
local refreshed=false
if [[ -d "$dock_support" ]]; then
while IFS= read -r db_file; do
if [[ -f "$db_file" ]]; then
safe_remove "$db_file" true > /dev/null 2>&1 && refreshed=true
fi
done < <(find "$dock_support" -name "*.db" -type f 2> /dev/null || true)
fi
local dock_plist="$HOME/Library/Preferences/com.apple.dock.plist"
if [[ -f "$dock_plist" ]]; then
touch "$dock_plist" 2> /dev/null || true
fi
if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then
killall Dock 2> /dev/null || true
fi
if [[ "$refreshed" == "true" ]]; then
opt_msg "Dock cache cleared"
fi
opt_msg "Dock refreshed"
}
# Dispatch optimization by action name.
execute_optimization() {
local action="$1"
local path="${2:-}"
case "$action" in
system_maintenance) opt_system_maintenance ;;
cache_refresh) opt_cache_refresh ;;
saved_state_cleanup) opt_saved_state_cleanup ;;
fix_broken_configs) opt_fix_broken_configs ;;
network_optimization) opt_network_optimization ;;
sqlite_vacuum) opt_sqlite_vacuum ;;
launch_services_rebuild) opt_launch_services_rebuild ;;
font_cache_rebuild) opt_font_cache_rebuild ;;
dock_refresh) opt_dock_refresh ;;
memory_pressure_relief) opt_memory_pressure_relief ;;
network_stack_optimize) opt_network_stack_optimize ;;
disk_permissions_repair) opt_disk_permissions_repair ;;
bluetooth_reset) opt_bluetooth_reset ;;
spotlight_index_optimize) opt_spotlight_index_optimize ;;
*)
echo -e "${YELLOW}${ICON_ERROR}${NC} Unknown action: $action"
return 1
;;
esac
}

View File

@@ -1,192 +0,0 @@
#!/bin/bash
# App selection functionality
set -euo pipefail
# Note: get_display_width() is now defined in lib/core/ui.sh
# Format app info for display
format_app_display() {
local display_name="$1" size="$2" last_used="$3"
# Use common function from ui.sh to format last used time
local compact_last_used
compact_last_used=$(format_last_used_summary "$last_used")
# Format size
local size_str="Unknown"
[[ "$size" != "0" && "$size" != "" && "$size" != "Unknown" ]] && size_str="$size"
# Calculate available width for app name based on terminal width
# Accept pre-calculated max_name_width (5th param) to avoid recalculation in loops
local terminal_width="${4:-$(tput cols 2> /dev/null || echo 80)}"
local max_name_width="${5:-}"
local available_width
if [[ -n "$max_name_width" ]]; then
# Use pre-calculated width from caller
available_width=$max_name_width
else
# Fallback: calculate it (slower, but works for standalone calls)
# Fixed elements: " ○ " (4) + " " (1) + size (9) + " | " (3) + max_last (7) = 24
local fixed_width=24
available_width=$((terminal_width - fixed_width))
# Dynamic minimum for better spacing on wide terminals
local min_width=18
if [[ $terminal_width -ge 120 ]]; then
min_width=48
elif [[ $terminal_width -ge 100 ]]; then
min_width=38
elif [[ $terminal_width -ge 80 ]]; then
min_width=25
fi
[[ $available_width -lt $min_width ]] && available_width=$min_width
[[ $available_width -gt 60 ]] && available_width=60
fi
# Truncate long names if needed (based on display width, not char count)
local truncated_name
truncated_name=$(truncate_by_display_width "$display_name" "$available_width")
# Get actual display width after truncation
local current_display_width
current_display_width=$(get_display_width "$truncated_name")
# Calculate padding needed
# Formula: char_count + (available_width - display_width) = padding to add
local char_count=${#truncated_name}
local padding_needed=$((available_width - current_display_width))
local printf_width=$((char_count + padding_needed))
# Use dynamic column width with corrected padding
printf "%-*s %9s | %s" "$printf_width" "$truncated_name" "$size_str" "$compact_last_used"
}
# Global variable to store selection result (bash 3.2 compatible)
MOLE_SELECTION_RESULT=""
# Main app selection function
# shellcheck disable=SC2154 # apps_data is set by caller
select_apps_for_uninstall() {
if [[ ${#apps_data[@]} -eq 0 ]]; then
log_warning "No applications available for uninstallation"
return 1
fi
# Build menu options
# Show loading for large lists (formatting can be slow due to width calculations)
local app_count=${#apps_data[@]}
local terminal_width=$(tput cols 2> /dev/null || echo 80)
if [[ $app_count -gt 100 ]]; then
if [[ -t 2 ]]; then
printf "\rPreparing %d applications... " "$app_count" >&2
fi
fi
# Pre-scan to get actual max name width
local max_name_width=0
for app_data in "${apps_data[@]}"; do
IFS='|' read -r _ _ display_name _ _ _ _ <<< "$app_data"
local name_width=$(get_display_width "$display_name")
[[ $name_width -gt $max_name_width ]] && max_name_width=$name_width
done
# Constrain based on terminal width: fixed=24, min varies by terminal width, max=60
local fixed_width=24
local available=$((terminal_width - fixed_width))
# Dynamic minimum: wider terminals get larger minimum for better spacing
local min_width=18
if [[ $terminal_width -ge 120 ]]; then
min_width=48 # Wide terminals: very generous spacing
elif [[ $terminal_width -ge 100 ]]; then
min_width=38 # Medium-wide terminals: generous spacing
elif [[ $terminal_width -ge 80 ]]; then
min_width=25 # Standard terminals
fi
[[ $max_name_width -lt $min_width ]] && max_name_width=$min_width
[[ $available -lt $max_name_width ]] && max_name_width=$available
[[ $max_name_width -gt 60 ]] && max_name_width=60
local -a menu_options=()
# Prepare metadata (comma-separated) for sorting/filtering inside the menu
local epochs_csv=""
local sizekb_csv=""
local idx=0
for app_data in "${apps_data[@]}"; do
# Keep extended field 7 (size_kb) if present
IFS='|' read -r epoch _ display_name _ size last_used size_kb <<< "$app_data"
menu_options+=("$(format_app_display "$display_name" "$size" "$last_used" "$terminal_width" "$max_name_width")")
# Build csv lists (avoid trailing commas)
if [[ $idx -eq 0 ]]; then
epochs_csv="${epoch:-0}"
sizekb_csv="${size_kb:-0}"
else
epochs_csv+=",${epoch:-0}"
sizekb_csv+=",${size_kb:-0}"
fi
((idx++))
done
# Clear loading message
if [[ $app_count -gt 100 ]]; then
if [[ -t 2 ]]; then
printf "\r\033[K" >&2
fi
fi
# Expose metadata for the paginated menu (optional inputs)
# - MOLE_MENU_META_EPOCHS: numeric last_used_epoch per item
# - MOLE_MENU_META_SIZEKB: numeric size in KB per item
# The menu will gracefully fallback if these are unset or malformed.
export MOLE_MENU_META_EPOCHS="$epochs_csv"
export MOLE_MENU_META_SIZEKB="$sizekb_csv"
# Optional: allow default sort override via env (date|name|size)
# export MOLE_MENU_SORT_DEFAULT="${MOLE_MENU_SORT_DEFAULT:-date}"
# Use paginated menu - result will be stored in MOLE_SELECTION_RESULT
# Note: paginated_multi_select enters alternate screen and handles clearing
MOLE_SELECTION_RESULT=""
paginated_multi_select "Select Apps to Remove" "${menu_options[@]}"
local exit_code=$?
# Clean env leakage for safety
unset MOLE_MENU_META_EPOCHS MOLE_MENU_META_SIZEKB
# leave MOLE_MENU_SORT_DEFAULT untouched if user set it globally
# Refresh signal handling
if [[ $exit_code -eq 10 ]]; then
return 10
fi
if [[ $exit_code -ne 0 ]]; then
return 1
fi
if [[ -z "$MOLE_SELECTION_RESULT" ]]; then
echo "No apps selected"
return 1
fi
# Build selected apps array (global variable in bin/uninstall.sh)
selected_apps=()
# Parse indices and build selected apps array
IFS=',' read -r -a indices_array <<< "$MOLE_SELECTION_RESULT"
for idx in "${indices_array[@]}"; do
if [[ "$idx" =~ ^[0-9]+$ ]] && [[ $idx -ge 0 ]] && [[ $idx -lt ${#apps_data[@]} ]]; then
selected_apps+=("${apps_data[idx]}")
fi
done
return 0
}
# Export function for external use
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
echo "This is a library file. Source it from other scripts." >&2
exit 1
fi

View File

@@ -1,911 +0,0 @@
#!/bin/bash
# Paginated menu with arrow key navigation
set -euo pipefail
# Terminal control functions
enter_alt_screen() {
if command -v tput > /dev/null 2>&1 && [[ -t 1 ]]; then
tput smcup 2> /dev/null || true
fi
}
leave_alt_screen() {
if command -v tput > /dev/null 2>&1 && [[ -t 1 ]]; then
tput rmcup 2> /dev/null || true
fi
}
# Get terminal height with fallback
_pm_get_terminal_height() {
local height=0
# Try stty size first (most reliable, real-time)
# Use </dev/tty to ensure we read from terminal even if stdin is redirected
if [[ -t 0 ]] || [[ -t 2 ]]; then
height=$(stty size < /dev/tty 2> /dev/null | awk '{print $1}')
fi
# Fallback to tput
if [[ -z "$height" || $height -le 0 ]]; then
if command -v tput > /dev/null 2>&1; then
height=$(tput lines 2> /dev/null || echo "24")
else
height=24
fi
fi
echo "$height"
}
# Calculate dynamic items per page based on terminal height
_pm_calculate_items_per_page() {
local term_height=$(_pm_get_terminal_height)
# Reserved: header(1) + blank(1) + blank(1) + footer(1-2) = 4-5 rows
# Use 5 to be safe (leaves 1 row buffer when footer wraps to 2 lines)
local reserved=5
local available=$((term_height - reserved))
# Ensure minimum and maximum bounds
if [[ $available -lt 1 ]]; then
echo 1
elif [[ $available -gt 50 ]]; then
echo 50
else
echo "$available"
fi
}
# Parse CSV into newline list (Bash 3.2)
_pm_parse_csv_to_array() {
local csv="${1:-}"
if [[ -z "$csv" ]]; then
return 0
fi
local IFS=','
for _tok in $csv; do
printf "%s\n" "$_tok"
done
}
# Main paginated multi-select menu function
paginated_multi_select() {
local title="$1"
shift
local -a items=("$@")
local external_alt_screen=false
if [[ "${MOLE_MANAGED_ALT_SCREEN:-}" == "1" || "${MOLE_MANAGED_ALT_SCREEN:-}" == "true" ]]; then
external_alt_screen=true
fi
# Validation
if [[ ${#items[@]} -eq 0 ]]; then
echo "No items provided" >&2
return 1
fi
local total_items=${#items[@]}
local items_per_page=$(_pm_calculate_items_per_page)
local cursor_pos=0
local top_index=0
local filter_query=""
local filter_mode="false" # filter mode toggle
local sort_mode="${MOLE_MENU_SORT_MODE:-${MOLE_MENU_SORT_DEFAULT:-date}}" # date|name|size
local sort_reverse="${MOLE_MENU_SORT_REVERSE:-false}"
# Live query vs applied query
local applied_query=""
local searching="false"
# Metadata (optional)
# epochs[i] -> last_used_epoch (numeric) for item i
# sizekb[i] -> size in KB (numeric) for item i
local -a epochs=()
local -a sizekb=()
local has_metadata="false"
if [[ -n "${MOLE_MENU_META_EPOCHS:-}" ]]; then
while IFS= read -r v; do epochs+=("${v:-0}"); done < <(_pm_parse_csv_to_array "$MOLE_MENU_META_EPOCHS")
has_metadata="true"
fi
if [[ -n "${MOLE_MENU_META_SIZEKB:-}" ]]; then
while IFS= read -r v; do sizekb+=("${v:-0}"); done < <(_pm_parse_csv_to_array "$MOLE_MENU_META_SIZEKB")
has_metadata="true"
fi
# If no metadata, force name sorting and disable sorting controls
if [[ "$has_metadata" == "false" && "$sort_mode" != "name" ]]; then
sort_mode="name"
fi
# Index mappings
local -a orig_indices=()
local -a view_indices=()
local i
for ((i = 0; i < total_items; i++)); do
orig_indices[i]=$i
view_indices[i]=$i
done
# Escape for shell globbing without upsetting highlighters
_pm_escape_glob() {
local s="${1-}" out="" c
local i len=${#s}
for ((i = 0; i < len; i++)); do
c="${s:i:1}"
case "$c" in
$'\\' | '*' | '?' | '[' | ']') out+="\\$c" ;;
*) out+="$c" ;;
esac
done
printf '%s' "$out"
}
# Case-insensitive fuzzy match (substring search)
_pm_match() {
local hay="$1" q="$2"
q="$(_pm_escape_glob "$q")"
local pat="*${q}*"
shopt -s nocasematch
local ok=1
# shellcheck disable=SC2254 # intentional glob match with a computed pattern
case "$hay" in
$pat) ok=0 ;;
esac
shopt -u nocasematch
return $ok
}
local -a selected=()
local selected_count=0 # Cache selection count to avoid O(n) loops on every draw
# Initialize selection array
for ((i = 0; i < total_items; i++)); do
selected[i]=false
done
if [[ -n "${MOLE_PRESELECTED_INDICES:-}" ]]; then
local cleaned_preselect="${MOLE_PRESELECTED_INDICES//[[:space:]]/}"
local -a initial_indices=()
IFS=',' read -ra initial_indices <<< "$cleaned_preselect"
for idx in "${initial_indices[@]}"; do
if [[ "$idx" =~ ^[0-9]+$ && $idx -ge 0 && $idx -lt $total_items ]]; then
# Only count if not already selected (handles duplicates)
if [[ ${selected[idx]} != true ]]; then
selected[idx]=true
((selected_count++))
fi
fi
done
fi
# Preserve original TTY settings so we can restore them reliably
local original_stty=""
if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then
original_stty=$(stty -g 2> /dev/null || echo "")
fi
restore_terminal() {
show_cursor
if [[ -n "${original_stty-}" ]]; then
stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || stty echo icanon 2> /dev/null || true
else
stty sane 2> /dev/null || stty echo icanon 2> /dev/null || true
fi
if [[ "${external_alt_screen:-false}" == false ]]; then
leave_alt_screen
fi
}
# Cleanup function
cleanup() {
trap - EXIT INT TERM
export MOLE_MENU_SORT_MODE="$sort_mode"
export MOLE_MENU_SORT_REVERSE="$sort_reverse"
restore_terminal
unset MOLE_READ_KEY_FORCE_CHAR
}
# Interrupt handler
# shellcheck disable=SC2329
handle_interrupt() {
cleanup
exit 130 # Standard exit code for Ctrl+C
}
trap cleanup EXIT
trap handle_interrupt INT TERM
# Setup terminal - preserve interrupt character
stty -echo -icanon intr ^C 2> /dev/null || true
if [[ $external_alt_screen == false ]]; then
enter_alt_screen
# Clear screen once on entry to alt screen
printf "\033[2J\033[H" >&2
else
printf "\033[H" >&2
fi
hide_cursor
# Helper functions
# shellcheck disable=SC2329
print_line() { printf "\r\033[2K%s\n" "$1" >&2; }
# Print footer lines wrapping only at separators
_print_wrapped_controls() {
local sep="$1"
shift
local -a segs=("$@")
local cols="${COLUMNS:-}"
[[ -z "$cols" ]] && cols=$(tput cols 2> /dev/null || echo 80)
[[ "$cols" =~ ^[0-9]+$ ]] || cols=80
_strip_ansi_len() {
local text="$1"
local stripped
stripped=$(printf "%s" "$text" | LC_ALL=C awk '{gsub(/\033\[[0-9;]*[A-Za-z]/,""); print}' || true)
[[ -z "$stripped" ]] && stripped="$text"
printf "%d" "${#stripped}"
}
local line="" s candidate
local clear_line=$'\r\033[2K'
for s in "${segs[@]}"; do
if [[ -z "$line" ]]; then
candidate="$s"
else
candidate="$line${sep}${s}"
fi
local candidate_len
candidate_len=$(_strip_ansi_len "$candidate")
[[ -z "$candidate_len" ]] && candidate_len=0
if ((candidate_len > cols)); then
printf "%s%s\n" "$clear_line" "$line" >&2
line="$s"
else
line="$candidate"
fi
done
printf "%s%s\n" "$clear_line" "$line" >&2
}
# Rebuild the view_indices applying filter and sort
rebuild_view() {
# Filter
local -a filtered=()
local effective_query=""
if [[ "$filter_mode" == "true" ]]; then
# Live editing: empty query -> show all items
effective_query="$filter_query"
if [[ -z "$effective_query" ]]; then
filtered=("${orig_indices[@]}")
else
local idx
for ((idx = 0; idx < total_items; idx++)); do
if _pm_match "${items[idx]}" "$effective_query"; then
filtered+=("$idx")
fi
done
fi
else
# Normal mode: use applied query; empty -> show all
effective_query="$applied_query"
if [[ -z "$effective_query" ]]; then
filtered=("${orig_indices[@]}")
else
local idx
for ((idx = 0; idx < total_items; idx++)); do
if _pm_match "${items[idx]}" "$effective_query"; then
filtered+=("$idx")
fi
done
fi
fi
# Sort (skip if no metadata)
if [[ "$has_metadata" == "false" ]]; then
# No metadata: just use filtered list (already sorted by name naturally)
view_indices=("${filtered[@]}")
elif [[ ${#filtered[@]} -eq 0 ]]; then
view_indices=()
else
# Build sort key
local sort_key
if [[ "$sort_mode" == "date" ]]; then
# Date: ascending by default (oldest first)
sort_key="-k1,1n"
[[ "$sort_reverse" == "true" ]] && sort_key="-k1,1nr"
elif [[ "$sort_mode" == "size" ]]; then
# Size: descending by default (largest first)
sort_key="-k1,1nr"
[[ "$sort_reverse" == "true" ]] && sort_key="-k1,1n"
else
# Name: ascending by default (A to Z)
sort_key="-k1,1f"
[[ "$sort_reverse" == "true" ]] && sort_key="-k1,1fr"
fi
# Create temporary file for sorting
local tmpfile
tmpfile=$(mktemp 2> /dev/null) || tmpfile=""
if [[ -n "$tmpfile" ]]; then
local k id
for id in "${filtered[@]}"; do
case "$sort_mode" in
date) k="${epochs[id]:-0}" ;;
size) k="${sizekb[id]:-0}" ;;
name | *) k="${items[id]}|${id}" ;;
esac
printf "%s\t%s\n" "$k" "$id" >> "$tmpfile"
done
view_indices=()
while IFS=$'\t' read -r _key _id; do
[[ -z "$_id" ]] && continue
view_indices+=("$_id")
done < <(LC_ALL=C sort -t $'\t' $sort_key -- "$tmpfile" 2> /dev/null)
rm -f "$tmpfile"
else
# Fallback: no sorting
view_indices=("${filtered[@]}")
fi
fi
# Clamp cursor into visible range
local visible_count=${#view_indices[@]}
local max_top
if [[ $visible_count -gt $items_per_page ]]; then
max_top=$((visible_count - items_per_page))
else
max_top=0
fi
[[ $top_index -gt $max_top ]] && top_index=$max_top
local current_visible=$((visible_count - top_index))
[[ $current_visible -gt $items_per_page ]] && current_visible=$items_per_page
if [[ $cursor_pos -ge $current_visible ]]; then
cursor_pos=$((current_visible > 0 ? current_visible - 1 : 0))
fi
[[ $cursor_pos -lt 0 ]] && cursor_pos=0
}
# Initial view (default sort)
rebuild_view
render_item() {
# $1: visible row index (0..items_per_page-1 in current window)
# $2: is_current flag
local vrow=$1 is_current=$2
local idx=$((top_index + vrow))
local real="${view_indices[idx]:--1}"
[[ $real -lt 0 ]] && return
local checkbox="$ICON_EMPTY"
[[ ${selected[real]} == true ]] && checkbox="$ICON_SOLID"
if [[ $is_current == true ]]; then
printf "\r\033[2K${CYAN}${ICON_ARROW} %s %s${NC}\n" "$checkbox" "${items[real]}" >&2
else
printf "\r\033[2K %s %s\n" "$checkbox" "${items[real]}" >&2
fi
}
# Draw the complete menu
draw_menu() {
# Recalculate items_per_page dynamically to handle window resize
items_per_page=$(_pm_calculate_items_per_page)
printf "\033[H" >&2
local clear_line="\r\033[2K"
# Use cached selection count (maintained incrementally on toggle)
# No need to loop through all items anymore!
# Header only
printf "${clear_line}${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2
# Visible slice
local visible_total=${#view_indices[@]}
if [[ $visible_total -eq 0 ]]; then
if [[ "$filter_mode" == "true" ]]; then
# While editing: do not show "No items available"
for ((i = 0; i < items_per_page; i++)); do
printf "${clear_line}\n" >&2
done
printf "${clear_line}${GRAY}Type to filter | Delete | Enter Confirm | ESC Cancel${NC}\n" >&2
printf "${clear_line}" >&2
return
else
if [[ "$searching" == "true" ]]; then
printf "${clear_line}Searching…\n" >&2
for ((i = 0; i < items_per_page; i++)); do
printf "${clear_line}\n" >&2
done
printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | / Filter | Q Exit${NC}\n" >&2
printf "${clear_line}" >&2
return
else
# Post-search: truly empty list
printf "${clear_line}No items available\n" >&2
for ((i = 0; i < items_per_page; i++)); do
printf "${clear_line}\n" >&2
done
printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | / Filter | Q Exit${NC}\n" >&2
printf "${clear_line}" >&2
return
fi
fi
fi
local visible_count=$((visible_total - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
[[ $visible_count -le 0 ]] && visible_count=1
if [[ $cursor_pos -ge $visible_count ]]; then
cursor_pos=$((visible_count - 1))
[[ $cursor_pos -lt 0 ]] && cursor_pos=0
fi
printf "${clear_line}\n" >&2
# Items for current window
local start_idx=$top_index
local end_idx=$((top_index + items_per_page - 1))
[[ $end_idx -ge $visible_total ]] && end_idx=$((visible_total - 1))
for ((i = start_idx; i <= end_idx; i++)); do
[[ $i -lt 0 ]] && continue
local is_current=false
[[ $((i - start_idx)) -eq $cursor_pos ]] && is_current=true
render_item $((i - start_idx)) $is_current
done
# Fill empty slots to clear previous content
local items_shown=$((end_idx - start_idx + 1))
[[ $items_shown -lt 0 ]] && items_shown=0
for ((i = items_shown; i < items_per_page; i++)); do
printf "${clear_line}\n" >&2
done
printf "${clear_line}\n" >&2
# Build sort and filter status
local sort_label=""
case "$sort_mode" in
date) sort_label="Date" ;;
name) sort_label="Name" ;;
size) sort_label="Size" ;;
esac
local sort_status="${sort_label}"
local filter_status=""
if [[ "$filter_mode" == "true" ]]; then
filter_status="${filter_query:-_}"
elif [[ -n "$applied_query" ]]; then
filter_status="${applied_query}"
else
filter_status="—"
fi
# Footer: single line with controls
local sep=" ${GRAY}|${NC} "
# Helper to calculate display length without ANSI codes
_calc_len() {
local text="$1"
local stripped
stripped=$(printf "%s" "$text" | LC_ALL=C awk '{gsub(/\033\[[0-9;]*[A-Za-z]/,""); print}')
printf "%d" "${#stripped}"
}
# Common menu items
local nav="${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN}${NC}"
local space_select="${GRAY}Space Select${NC}"
local space="${GRAY}Space${NC}"
local enter="${GRAY}Enter${NC}"
local exit="${GRAY}Q Exit${NC}"
if [[ "$filter_mode" == "true" ]]; then
# Filter mode: simple controls without sort
local -a _segs_filter=(
"${GRAY}Search: ${filter_status}${NC}"
"${GRAY}Delete${NC}"
"${GRAY}Enter Confirm${NC}"
"${GRAY}ESC Cancel${NC}"
)
_print_wrapped_controls "$sep" "${_segs_filter[@]}"
else
# Normal mode - prepare dynamic items
local reverse_arrow="↑"
[[ "$sort_reverse" == "true" ]] && reverse_arrow="↓"
local filter_text="/ Search"
[[ -n "$applied_query" ]] && filter_text="/ Clear"
local refresh="${GRAY}R Refresh${NC}"
local search="${GRAY}${filter_text}${NC}"
local sort_ctrl="${GRAY}S ${sort_status}${NC}"
local order_ctrl="${GRAY}O ${reverse_arrow}${NC}"
if [[ "$has_metadata" == "true" ]]; then
if [[ -n "$applied_query" ]]; then
# Filtering active: hide sort controls
local -a _segs_all=("$nav" "$space" "$enter" "$refresh" "$search" "$exit")
_print_wrapped_controls "$sep" "${_segs_all[@]}"
else
# Normal: show full controls with dynamic reduction
local term_width="${COLUMNS:-}"
[[ -z "$term_width" ]] && term_width=$(tput cols 2> /dev/null || echo 80)
[[ "$term_width" =~ ^[0-9]+$ ]] || term_width=80
# Level 0: Full controls
local -a _segs=("$nav" "$space_select" "$enter" "$refresh" "$search" "$sort_ctrl" "$order_ctrl" "$exit")
# Calculate width
local total_len=0 seg_count=${#_segs[@]}
for i in "${!_segs[@]}"; do
total_len=$((total_len + $(_calc_len "${_segs[i]}")))
[[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3))
done
# Level 1: Remove "Space Select"
if [[ $total_len -gt $term_width ]]; then
_segs=("$nav" "$enter" "$refresh" "$search" "$sort_ctrl" "$order_ctrl" "$exit")
total_len=0
seg_count=${#_segs[@]}
for i in "${!_segs[@]}"; do
total_len=$((total_len + $(_calc_len "${_segs[i]}")))
[[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3))
done
# Level 2: Remove "S ${sort_status}"
if [[ $total_len -gt $term_width ]]; then
_segs=("$nav" "$enter" "$refresh" "$search" "$order_ctrl" "$exit")
fi
fi
_print_wrapped_controls "$sep" "${_segs[@]}"
fi
else
# Without metadata: basic controls
local -a _segs_simple=("$nav" "$space_select" "$enter" "$refresh" "$search" "$exit")
_print_wrapped_controls "$sep" "${_segs_simple[@]}"
fi
fi
printf "${clear_line}" >&2
}
# Track previous cursor position for incremental rendering
local prev_cursor_pos=$cursor_pos
local prev_top_index=$top_index
local need_full_redraw=true
# Main interaction loop
while true; do
if [[ "$need_full_redraw" == "true" ]]; then
draw_menu
need_full_redraw=false
# Update tracking variables after full redraw
prev_cursor_pos=$cursor_pos
prev_top_index=$top_index
fi
local key
key=$(read_key)
case "$key" in
"QUIT")
if [[ "$filter_mode" == "true" ]]; then
filter_mode="false"
unset MOLE_READ_KEY_FORCE_CHAR
filter_query=""
applied_query=""
top_index=0
cursor_pos=0
rebuild_view
need_full_redraw=true
continue
fi
cleanup
return 1
;;
"UP")
if [[ ${#view_indices[@]} -eq 0 ]]; then
:
elif [[ $cursor_pos -gt 0 ]]; then
# Simple cursor move - only redraw affected rows
local old_cursor=$cursor_pos
((cursor_pos--))
local new_cursor=$cursor_pos
# Calculate terminal row positions (+3: row 1=header, row 2=blank, row 3=first item)
local old_row=$((old_cursor + 3))
local new_row=$((new_cursor + 3))
# Quick redraw: update only the two affected rows
printf "\033[%d;1H" "$old_row" >&2
render_item "$old_cursor" false
printf "\033[%d;1H" "$new_row" >&2
render_item "$new_cursor" true
# CRITICAL: Move cursor to footer to avoid visual artifacts
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
prev_cursor_pos=$cursor_pos
continue # Skip full redraw
elif [[ $top_index -gt 0 ]]; then
((top_index--))
prev_cursor_pos=$cursor_pos
prev_top_index=$top_index
need_full_redraw=true # Scrolling requires full redraw
fi
;;
"DOWN")
if [[ ${#view_indices[@]} -eq 0 ]]; then
:
else
local absolute_index=$((top_index + cursor_pos))
local last_index=$((${#view_indices[@]} - 1))
if [[ $absolute_index -lt $last_index ]]; then
local visible_count=$((${#view_indices[@]} - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
# Simple cursor move - only redraw affected rows
local old_cursor=$cursor_pos
((cursor_pos++))
local new_cursor=$cursor_pos
# Calculate terminal row positions (+3: row 1=header, row 2=blank, row 3=first item)
local old_row=$((old_cursor + 3))
local new_row=$((new_cursor + 3))
# Quick redraw: update only the two affected rows
printf "\033[%d;1H" "$old_row" >&2
render_item "$old_cursor" false
printf "\033[%d;1H" "$new_row" >&2
render_item "$new_cursor" true
# CRITICAL: Move cursor to footer to avoid visual artifacts
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
prev_cursor_pos=$cursor_pos
continue # Skip full redraw
elif [[ $((top_index + visible_count)) -lt ${#view_indices[@]} ]]; then
((top_index++))
visible_count=$((${#view_indices[@]} - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -ge $visible_count ]]; then
cursor_pos=$((visible_count - 1))
fi
prev_cursor_pos=$cursor_pos
prev_top_index=$top_index
need_full_redraw=true # Scrolling requires full redraw
fi
fi
fi
;;
"SPACE")
local idx=$((top_index + cursor_pos))
if [[ $idx -lt ${#view_indices[@]} ]]; then
local real="${view_indices[idx]}"
if [[ ${selected[real]} == true ]]; then
selected[real]=false
((selected_count--))
else
selected[real]=true
((selected_count++))
fi
# Incremental update: only redraw header (for count) and current row
# Header is at row 1
printf "\033[1;1H\033[2K${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2
# Redraw current item row (+3: row 1=header, row 2=blank, row 3=first item)
local item_row=$((cursor_pos + 3))
printf "\033[%d;1H" "$item_row" >&2
render_item "$cursor_pos" true
# Move cursor to footer to avoid visual artifacts (items + header + 2 blanks)
printf "\033[%d;1H" "$((items_per_page + 4))" >&2
continue # Skip full redraw
fi
;;
"RETRY")
# 'R' toggles reverse order (only if metadata available)
if [[ "$has_metadata" == "true" ]]; then
if [[ "$sort_reverse" == "true" ]]; then
sort_reverse="false"
else
sort_reverse="true"
fi
rebuild_view
need_full_redraw=true
fi
;;
"CHAR:s" | "CHAR:S")
if [[ "$filter_mode" == "true" ]]; then
local ch="${key#CHAR:}"
filter_query+="$ch"
need_full_redraw=true
elif [[ "$has_metadata" == "true" ]]; then
# Cycle sort mode (only if metadata available)
case "$sort_mode" in
date) sort_mode="name" ;;
name) sort_mode="size" ;;
size) sort_mode="date" ;;
esac
rebuild_view
need_full_redraw=true
fi
;;
"FILTER")
# / key: toggle between filter and return
if [[ -n "$applied_query" ]]; then
# Already filtering, clear and return to full list
applied_query=""
filter_query=""
top_index=0
cursor_pos=0
rebuild_view
need_full_redraw=true
else
# Enter filter mode
filter_mode="true"
export MOLE_READ_KEY_FORCE_CHAR=1
filter_query=""
top_index=0
cursor_pos=0
rebuild_view
need_full_redraw=true
fi
;;
"CHAR:j")
if [[ "$filter_mode" != "true" ]]; then
# Down navigation
if [[ ${#view_indices[@]} -gt 0 ]]; then
local absolute_index=$((top_index + cursor_pos))
local last_index=$((${#view_indices[@]} - 1))
if [[ $absolute_index -lt $last_index ]]; then
local visible_count=$((${#view_indices[@]} - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
((cursor_pos++))
elif [[ $((top_index + visible_count)) -lt ${#view_indices[@]} ]]; then
((top_index++))
fi
fi
fi
else
filter_query+="j"
fi
;;
"CHAR:k")
if [[ "$filter_mode" != "true" ]]; then
# Up navigation
if [[ ${#view_indices[@]} -gt 0 ]]; then
if [[ $cursor_pos -gt 0 ]]; then
((cursor_pos--))
elif [[ $top_index -gt 0 ]]; then
((top_index--))
fi
fi
else
filter_query+="k"
fi
;;
"CHAR:f" | "CHAR:F")
if [[ "$filter_mode" == "true" ]]; then
filter_query+="${key#CHAR:}"
fi
# F is currently unbound in normal mode to avoid conflict with Refresh (R)
;;
"CHAR:r" | "CHAR:R")
if [[ "$filter_mode" == "true" ]]; then
filter_query+="${key#CHAR:}"
else
# Trigger Refresh signal (Unified with Analyze)
cleanup
return 10
fi
;;
"CHAR:o" | "CHAR:O")
if [[ "$filter_mode" == "true" ]]; then
filter_query+="${key#CHAR:}"
elif [[ "$has_metadata" == "true" ]]; then
# O toggles reverse order (Unified Sort Order)
if [[ "$sort_reverse" == "true" ]]; then
sort_reverse="false"
else
sort_reverse="true"
fi
rebuild_view
need_full_redraw=true
fi
;;
"DELETE")
# Backspace filter
if [[ "$filter_mode" == "true" && -n "$filter_query" ]]; then
filter_query="${filter_query%?}"
need_full_redraw=true
fi
;;
CHAR:*)
if [[ "$filter_mode" == "true" ]]; then
local ch="${key#CHAR:}"
# avoid accidental leading spaces
if [[ -n "$filter_query" || "$ch" != " " ]]; then
filter_query+="$ch"
need_full_redraw=true
fi
fi
;;
"ENTER")
if [[ "$filter_mode" == "true" ]]; then
applied_query="$filter_query"
filter_mode="false"
unset MOLE_READ_KEY_FORCE_CHAR
top_index=0
cursor_pos=0
searching="true"
draw_menu # paint "searching..."
drain_pending_input # drop any extra keypresses (e.g., double-Enter)
rebuild_view
searching="false"
draw_menu
continue
fi
# In normal mode: smart Enter behavior
# 1. Check if any items are already selected
local has_selection=false
for ((i = 0; i < total_items; i++)); do
if [[ ${selected[i]} == true ]]; then
has_selection=true
break
fi
done
# 2. If nothing selected, auto-select current item
if [[ $has_selection == false ]]; then
local idx=$((top_index + cursor_pos))
if [[ $idx -lt ${#view_indices[@]} ]]; then
local real="${view_indices[idx]}"
selected[real]=true
((selected_count++))
fi
fi
# 3. Confirm and exit with current selections
local -a selected_indices=()
for ((i = 0; i < total_items; i++)); do
if [[ ${selected[i]} == true ]]; then
selected_indices+=("$i")
fi
done
local final_result=""
if [[ ${#selected_indices[@]} -gt 0 ]]; then
local IFS=','
final_result="${selected_indices[*]}"
fi
trap - EXIT INT TERM
MOLE_SELECTION_RESULT="$final_result"
export MOLE_MENU_SORT_MODE="$sort_mode"
export MOLE_MENU_SORT_REVERSE="$sort_reverse"
restore_terminal
return 0
;;
esac
# Drain any accumulated input after processing (e.g., mouse wheel events)
# This prevents buffered events from causing jumps, without blocking keyboard input
drain_pending_input
done
}
# Export function for external use
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
echo "This is a library file. Source it from other scripts." >&2
exit 1
fi

View File

@@ -1,318 +0,0 @@
#!/bin/bash
# Paginated menu with arrow key navigation
set -euo pipefail
# Terminal control functions
enter_alt_screen() { tput smcup 2> /dev/null || true; }
leave_alt_screen() { tput rmcup 2> /dev/null || true; }
# Get terminal height with fallback
_ms_get_terminal_height() {
local height=0
# Try stty size first (most reliable, real-time)
# Use </dev/tty to ensure we read from terminal even if stdin is redirected
if [[ -t 0 ]] || [[ -t 2 ]]; then
height=$(stty size < /dev/tty 2> /dev/null | awk '{print $1}')
fi
# Fallback to tput
if [[ -z "$height" || $height -le 0 ]]; then
if command -v tput > /dev/null 2>&1; then
height=$(tput lines 2> /dev/null || echo "24")
else
height=24
fi
fi
echo "$height"
}
# Calculate dynamic items per page based on terminal height
_ms_calculate_items_per_page() {
local term_height=$(_ms_get_terminal_height)
# Layout: header(1) + spacing(1) + items + spacing(1) + footer(1) + clear(1) = 5 fixed lines
local reserved=6 # Increased to prevent header from being overwritten
local available=$((term_height - reserved))
# Ensure minimum and maximum bounds
if [[ $available -lt 1 ]]; then
echo 1
elif [[ $available -gt 50 ]]; then
echo 50
else
echo "$available"
fi
}
# Main paginated multi-select menu function
paginated_multi_select() {
local title="$1"
shift
local -a items=("$@")
local external_alt_screen=false
if [[ "${MOLE_MANAGED_ALT_SCREEN:-}" == "1" || "${MOLE_MANAGED_ALT_SCREEN:-}" == "true" ]]; then
external_alt_screen=true
fi
# Validation
if [[ ${#items[@]} -eq 0 ]]; then
echo "No items provided" >&2
return 1
fi
local total_items=${#items[@]}
local items_per_page=$(_ms_calculate_items_per_page)
local cursor_pos=0
local top_index=0
local -a selected=()
# Initialize selection array
for ((i = 0; i < total_items; i++)); do
selected[i]=false
done
if [[ -n "${MOLE_PRESELECTED_INDICES:-}" ]]; then
local cleaned_preselect="${MOLE_PRESELECTED_INDICES//[[:space:]]/}"
local -a initial_indices=()
IFS=',' read -ra initial_indices <<< "$cleaned_preselect"
for idx in "${initial_indices[@]}"; do
if [[ "$idx" =~ ^[0-9]+$ && $idx -ge 0 && $idx -lt $total_items ]]; then
selected[idx]=true
fi
done
fi
# Preserve original TTY settings so we can restore them reliably
local original_stty=""
if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then
original_stty=$(stty -g 2> /dev/null || echo "")
fi
restore_terminal() {
show_cursor
if [[ -n "${original_stty-}" ]]; then
stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || stty echo icanon 2> /dev/null || true
else
stty sane 2> /dev/null || stty echo icanon 2> /dev/null || true
fi
if [[ "${external_alt_screen:-false}" == false ]]; then
leave_alt_screen
fi
}
# Cleanup function
cleanup() {
trap - EXIT INT TERM
restore_terminal
}
# Interrupt handler
# shellcheck disable=SC2329
handle_interrupt() {
cleanup
exit 130 # Standard exit code for Ctrl+C
}
trap cleanup EXIT
trap handle_interrupt INT TERM
# Setup terminal - preserve interrupt character
stty -echo -icanon intr ^C 2> /dev/null || true
if [[ $external_alt_screen == false ]]; then
enter_alt_screen
# Clear screen once on entry to alt screen
printf "\033[2J\033[H" >&2
else
printf "\033[H" >&2
fi
hide_cursor
# Helper functions
# shellcheck disable=SC2329
print_line() { printf "\r\033[2K%s\n" "$1" >&2; }
render_item() {
local idx=$1 is_current=$2
local checkbox="$ICON_EMPTY"
[[ ${selected[idx]} == true ]] && checkbox="$ICON_SOLID"
if [[ $is_current == true ]]; then
printf "\r\033[2K${CYAN}${ICON_ARROW} %s %s${NC}\n" "$checkbox" "${items[idx]}" >&2
else
printf "\r\033[2K %s %s\n" "$checkbox" "${items[idx]}" >&2
fi
}
# Draw the complete menu
draw_menu() {
# Recalculate items_per_page dynamically to handle window resize
items_per_page=$(_ms_calculate_items_per_page)
# Move to home position without clearing (reduces flicker)
printf "\033[H" >&2
# Clear each line as we go instead of clearing entire screen
local clear_line="\r\033[2K"
# Count selections for header display
local selected_count=0
for ((i = 0; i < total_items; i++)); do
[[ ${selected[i]} == true ]] && ((selected_count++))
done
# Header
printf "${clear_line}${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2
if [[ $total_items -eq 0 ]]; then
printf "${clear_line}${GRAY}No items available${NC}\n" >&2
printf "${clear_line}\n" >&2
printf "${clear_line}${GRAY}Q${NC} Quit\n" >&2
printf "${clear_line}" >&2
return
fi
if [[ $top_index -gt $((total_items - 1)) ]]; then
if [[ $total_items -gt $items_per_page ]]; then
top_index=$((total_items - items_per_page))
else
top_index=0
fi
fi
local visible_count=$((total_items - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
[[ $visible_count -le 0 ]] && visible_count=1
if [[ $cursor_pos -ge $visible_count ]]; then
cursor_pos=$((visible_count - 1))
[[ $cursor_pos -lt 0 ]] && cursor_pos=0
fi
printf "${clear_line}\n" >&2
# Items for current window
local start_idx=$top_index
local end_idx=$((top_index + items_per_page - 1))
[[ $end_idx -ge $total_items ]] && end_idx=$((total_items - 1))
for ((i = start_idx; i <= end_idx; i++)); do
[[ $i -lt 0 ]] && continue
local is_current=false
[[ $((i - start_idx)) -eq $cursor_pos ]] && is_current=true
render_item $i $is_current
done
# Fill empty slots to clear previous content
local items_shown=$((end_idx - start_idx + 1))
[[ $items_shown -lt 0 ]] && items_shown=0
for ((i = items_shown; i < items_per_page; i++)); do
printf "${clear_line}\n" >&2
done
# Clear any remaining lines at bottom
printf "${clear_line}\n" >&2
printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | Q Exit${NC}\n" >&2
# Clear one more line to ensure no artifacts
printf "${clear_line}" >&2
}
# Main interaction loop
while true; do
draw_menu
local key=$(read_key)
case "$key" in
"QUIT")
cleanup
return 1
;;
"UP")
if [[ $total_items -eq 0 ]]; then
:
elif [[ $cursor_pos -gt 0 ]]; then
((cursor_pos--))
elif [[ $top_index -gt 0 ]]; then
((top_index--))
fi
;;
"DOWN")
if [[ $total_items -eq 0 ]]; then
:
else
local absolute_index=$((top_index + cursor_pos))
if [[ $absolute_index -lt $((total_items - 1)) ]]; then
local visible_count=$((total_items - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then
((cursor_pos++))
elif [[ $((top_index + visible_count)) -lt $total_items ]]; then
((top_index++))
visible_count=$((total_items - top_index))
[[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page
if [[ $cursor_pos -ge $visible_count ]]; then
cursor_pos=$((visible_count - 1))
fi
fi
fi
fi
;;
"SPACE")
local idx=$((top_index + cursor_pos))
if [[ $idx -lt $total_items ]]; then
if [[ ${selected[idx]} == true ]]; then
selected[idx]=false
else
selected[idx]=true
fi
fi
;;
"ALL")
for ((i = 0; i < total_items; i++)); do
selected[i]=true
done
;;
"NONE")
for ((i = 0; i < total_items; i++)); do
selected[i]=false
done
;;
"ENTER")
# Store result in global variable instead of returning via stdout
local -a selected_indices=()
for ((i = 0; i < total_items; i++)); do
if [[ ${selected[i]} == true ]]; then
selected_indices+=("$i")
fi
done
# Allow empty selection - don't auto-select cursor position
# This fixes the bug where unselecting all items would still select the last cursor position
local final_result=""
if [[ ${#selected_indices[@]} -gt 0 ]]; then
local IFS=','
final_result="${selected_indices[*]}"
fi
# Remove the trap to avoid cleanup on normal exit
trap - EXIT INT TERM
# Store result in global variable
MOLE_SELECTION_RESULT="$final_result"
# Manually cleanup terminal before returning
restore_terminal
return 0
;;
esac
done
}
# Export function for external use
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
echo "This is a library file. Source it from other scripts." >&2
exit 1
fi

View File

@@ -1,492 +0,0 @@
#!/bin/bash
set -euo pipefail
# Ensure common.sh is loaded.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
[[ -z "${MOLE_COMMON_LOADED:-}" ]] && source "$SCRIPT_DIR/lib/core/common.sh"
# Batch uninstall with a single confirmation.
# User data detection patterns (prompt user to backup if found).
readonly SENSITIVE_DATA_PATTERNS=(
"\.warp" # Warp terminal configs/themes
"/\.config/" # Standard Unix config directory
"/themes/" # Theme customizations
"/settings/" # Settings directories
"/Application Support/[^/]+/User Data" # Chrome/Electron user data
"/Preferences/[^/]+\.plist" # User preference files
"/Documents/" # User documents
"/\.ssh/" # SSH keys and configs (critical)
"/\.gnupg/" # GPG keys (critical)
)
# Join patterns into a single regex for grep.
SENSITIVE_DATA_REGEX=$(
IFS='|'
echo "${SENSITIVE_DATA_PATTERNS[*]}"
)
# Decode and validate base64 file list (safe for set -e).
decode_file_list() {
local encoded="$1"
local app_name="$2"
local decoded
# macOS uses -D, GNU uses -d. Always return 0 for set -e safety.
if ! decoded=$(printf '%s' "$encoded" | base64 -D 2> /dev/null); then
if ! decoded=$(printf '%s' "$encoded" | base64 -d 2> /dev/null); then
log_error "Failed to decode file list for $app_name" >&2
echo ""
return 0 # Return success with empty string
fi
fi
if [[ "$decoded" =~ $'\0' ]]; then
log_warning "File list for $app_name contains null bytes, rejecting" >&2
echo ""
return 0 # Return success with empty string
fi
while IFS= read -r line; do
if [[ -n "$line" && ! "$line" =~ ^/ ]]; then
log_warning "Invalid path in file list for $app_name: $line" >&2
echo ""
return 0 # Return success with empty string
fi
done <<< "$decoded"
echo "$decoded"
return 0
}
# Note: find_app_files() and calculate_total_size() are in lib/core/common.sh.
# Stop Launch Agents/Daemons for an app.
stop_launch_services() {
local bundle_id="$1"
local has_system_files="${2:-false}"
[[ -z "$bundle_id" || "$bundle_id" == "unknown" ]] && return 0
if [[ -d ~/Library/LaunchAgents ]]; then
while IFS= read -r -d '' plist; do
launchctl unload "$plist" 2> /dev/null || true
done < <(find ~/Library/LaunchAgents -maxdepth 1 -name "${bundle_id}*.plist" -print0 2> /dev/null)
fi
if [[ "$has_system_files" == "true" ]]; then
if [[ -d /Library/LaunchAgents ]]; then
while IFS= read -r -d '' plist; do
sudo launchctl unload "$plist" 2> /dev/null || true
done < <(find /Library/LaunchAgents -maxdepth 1 -name "${bundle_id}*.plist" -print0 2> /dev/null)
fi
if [[ -d /Library/LaunchDaemons ]]; then
while IFS= read -r -d '' plist; do
sudo launchctl unload "$plist" 2> /dev/null || true
done < <(find /Library/LaunchDaemons -maxdepth 1 -name "${bundle_id}*.plist" -print0 2> /dev/null)
fi
fi
}
# Remove files (handles symlinks, optional sudo).
remove_file_list() {
local file_list="$1"
local use_sudo="${2:-false}"
local count=0
while IFS= read -r file; do
[[ -n "$file" && -e "$file" ]] || continue
if [[ -L "$file" ]]; then
if [[ "$use_sudo" == "true" ]]; then
sudo rm "$file" 2> /dev/null && ((count++)) || true
else
rm "$file" 2> /dev/null && ((count++)) || true
fi
else
if [[ "$use_sudo" == "true" ]]; then
safe_sudo_remove "$file" && ((count++)) || true
else
safe_remove "$file" true && ((count++)) || true
fi
fi
done <<< "$file_list"
echo "$count"
}
# Batch uninstall with single confirmation.
batch_uninstall_applications() {
local total_size_freed=0
# shellcheck disable=SC2154
if [[ ${#selected_apps[@]} -eq 0 ]]; then
log_warning "No applications selected for uninstallation"
return 0
fi
# Pre-scan: running apps, sudo needs, size.
local -a running_apps=()
local -a sudo_apps=()
local total_estimated_size=0
local -a app_details=()
if [[ -t 1 ]]; then start_inline_spinner "Scanning files..."; fi
for selected_app in "${selected_apps[@]}"; do
[[ -z "$selected_app" ]] && continue
IFS='|' read -r _ app_path app_name bundle_id _ _ <<< "$selected_app"
# Check running app by bundle executable if available.
local exec_name=""
if [[ -e "$app_path/Contents/Info.plist" ]]; then
exec_name=$(defaults read "$app_path/Contents/Info.plist" CFBundleExecutable 2> /dev/null || echo "")
fi
local check_pattern="${exec_name:-$app_name}"
if pgrep -x "$check_pattern" > /dev/null 2>&1; then
running_apps+=("$app_name")
fi
# Sudo needed if bundle owner/dir is not writable or system files exist.
local needs_sudo=false
local app_owner=$(get_file_owner "$app_path")
local current_user=$(whoami)
if [[ ! -w "$(dirname "$app_path")" ]] ||
[[ "$app_owner" == "root" ]] ||
[[ -n "$app_owner" && "$app_owner" != "$current_user" ]]; then
needs_sudo=true
fi
# Size estimate includes related and system files.
local app_size_kb=$(get_path_size_kb "$app_path")
local related_files=$(find_app_files "$bundle_id" "$app_name")
local related_size_kb=$(calculate_total_size "$related_files")
# system_files is a newline-separated string, not an array.
# shellcheck disable=SC2178,SC2128
local system_files=$(find_app_system_files "$bundle_id" "$app_name")
# shellcheck disable=SC2128
local system_size_kb=$(calculate_total_size "$system_files")
local total_kb=$((app_size_kb + related_size_kb + system_size_kb))
((total_estimated_size += total_kb))
# shellcheck disable=SC2128
if [[ -n "$system_files" ]]; then
needs_sudo=true
fi
if [[ "$needs_sudo" == "true" ]]; then
sudo_apps+=("$app_name")
fi
# Check for sensitive user data once.
local has_sensitive_data="false"
if [[ -n "$related_files" ]] && echo "$related_files" | grep -qE "$SENSITIVE_DATA_REGEX"; then
has_sensitive_data="true"
fi
# Store details for later use (base64 keeps lists on one line).
local encoded_files
encoded_files=$(printf '%s' "$related_files" | base64 | tr -d '\n')
local encoded_system_files
encoded_system_files=$(printf '%s' "$system_files" | base64 | tr -d '\n')
app_details+=("$app_name|$app_path|$bundle_id|$total_kb|$encoded_files|$encoded_system_files|$has_sensitive_data|$needs_sudo")
done
if [[ -t 1 ]]; then stop_inline_spinner; fi
local size_display=$(bytes_to_human "$((total_estimated_size * 1024))")
echo ""
echo -e "${PURPLE_BOLD}Files to be removed:${NC}"
echo ""
# Warn if user data is detected.
local has_user_data=false
for detail in "${app_details[@]}"; do
IFS='|' read -r _ _ _ _ _ _ has_sensitive_data <<< "$detail"
if [[ "$has_sensitive_data" == "true" ]]; then
has_user_data=true
break
fi
done
if [[ "$has_user_data" == "true" ]]; then
echo -e "${YELLOW}${ICON_WARNING}${NC} ${YELLOW}Note: Some apps contain user configurations/themes${NC}"
echo ""
fi
for detail in "${app_details[@]}"; do
IFS='|' read -r app_name app_path bundle_id total_kb encoded_files encoded_system_files has_sensitive_data needs_sudo_flag <<< "$detail"
local related_files=$(decode_file_list "$encoded_files" "$app_name")
local system_files=$(decode_file_list "$encoded_system_files" "$app_name")
local app_size_display=$(bytes_to_human "$((total_kb * 1024))")
echo -e "${BLUE}${ICON_CONFIRM}${NC} ${app_name} ${GRAY}(${app_size_display})${NC}"
echo -e " ${GREEN}${ICON_SUCCESS}${NC} ${app_path/$HOME/~}"
# Show related files (limit to 5).
local file_count=0
local max_files=5
while IFS= read -r file; do
if [[ -n "$file" && -e "$file" ]]; then
if [[ $file_count -lt $max_files ]]; then
echo -e " ${GREEN}${ICON_SUCCESS}${NC} ${file/$HOME/~}"
fi
((file_count++))
fi
done <<< "$related_files"
# Show system files (limit to 5).
local sys_file_count=0
while IFS= read -r file; do
if [[ -n "$file" && -e "$file" ]]; then
if [[ $sys_file_count -lt $max_files ]]; then
echo -e " ${BLUE}${ICON_SOLID}${NC} System: $file"
fi
((sys_file_count++))
fi
done <<< "$system_files"
local total_hidden=$((file_count > max_files ? file_count - max_files : 0))
((total_hidden += sys_file_count > max_files ? sys_file_count - max_files : 0))
if [[ $total_hidden -gt 0 ]]; then
echo -e " ${GRAY} ... and ${total_hidden} more files${NC}"
fi
done
# Confirmation before requesting sudo.
local app_total=${#selected_apps[@]}
local app_text="app"
[[ $app_total -gt 1 ]] && app_text="apps"
echo ""
local removal_note="Remove ${app_total} ${app_text}"
[[ -n "$size_display" ]] && removal_note+=" (${size_display})"
if [[ ${#running_apps[@]} -gt 0 ]]; then
removal_note+=" ${YELLOW}[Running]${NC}"
fi
echo -ne "${PURPLE}${ICON_ARROW}${NC} ${removal_note} ${GREEN}Enter${NC} confirm, ${GRAY}ESC${NC} cancel: "
drain_pending_input # Clean up any pending input before confirmation
IFS= read -r -s -n1 key || key=""
drain_pending_input # Clean up any escape sequence remnants
case "$key" in
$'\e' | q | Q)
echo ""
echo ""
return 0
;;
"" | $'\n' | $'\r' | y | Y)
printf "\r\033[K" # Clear the prompt line
;;
*)
echo ""
echo ""
return 0
;;
esac
# Request sudo if needed.
if [[ ${#sudo_apps[@]} -gt 0 ]]; then
if ! sudo -n true 2> /dev/null; then
if ! request_sudo_access "Admin required for system apps: ${sudo_apps[*]}"; then
echo ""
log_error "Admin access denied"
return 1
fi
fi
# Keep sudo alive during uninstall.
parent_pid=$$
(while true; do
if ! kill -0 "$parent_pid" 2> /dev/null; then
exit 0
fi
sudo -n true
sleep 60
done 2> /dev/null) &
sudo_keepalive_pid=$!
fi
if [[ -t 1 ]]; then start_inline_spinner "Uninstalling apps..."; fi
# Perform uninstallations (silent mode, show results at end).
if [[ -t 1 ]]; then stop_inline_spinner; fi
local success_count=0 failed_count=0
local -a failed_items=()
local -a success_items=()
for detail in "${app_details[@]}"; do
IFS='|' read -r app_name app_path bundle_id total_kb encoded_files encoded_system_files has_sensitive_data needs_sudo <<< "$detail"
local related_files=$(decode_file_list "$encoded_files" "$app_name")
local system_files=$(decode_file_list "$encoded_system_files" "$app_name")
local reason=""
# Stop Launch Agents/Daemons before removal.
local has_system_files="false"
[[ -n "$system_files" ]] && has_system_files="true"
stop_launch_services "$bundle_id" "$has_system_files"
if ! force_kill_app "$app_name" "$app_path"; then
reason="still running"
fi
# Remove the application only if not running.
if [[ -z "$reason" ]]; then
if [[ "$needs_sudo" == true ]]; then
if ! safe_sudo_remove "$app_path"; then
local app_owner=$(get_file_owner "$app_path")
local current_user=$(whoami)
if [[ -n "$app_owner" && "$app_owner" != "$current_user" && "$app_owner" != "root" ]]; then
reason="owned by $app_owner"
else
reason="permission denied"
fi
fi
else
safe_remove "$app_path" true || reason="remove failed"
fi
fi
# Remove related files if app removal succeeded.
if [[ -z "$reason" ]]; then
remove_file_list "$related_files" "false" > /dev/null
remove_file_list "$system_files" "true" > /dev/null
# Clean up macOS defaults (preference domains).
if [[ -n "$bundle_id" && "$bundle_id" != "unknown" ]]; then
if defaults read "$bundle_id" &> /dev/null; then
defaults delete "$bundle_id" 2> /dev/null || true
fi
# ByHost preferences (machine-specific).
if [[ -d ~/Library/Preferences/ByHost ]]; then
find ~/Library/Preferences/ByHost -maxdepth 1 -name "${bundle_id}.*.plist" -delete 2> /dev/null || true
fi
fi
((total_size_freed += total_kb))
((success_count++))
((files_cleaned++))
((total_items++))
success_items+=("$app_name")
else
((failed_count++))
failed_items+=("$app_name:$reason")
fi
done
# Summary
local freed_display
freed_display=$(bytes_to_human "$((total_size_freed * 1024))")
local summary_status="success"
local -a summary_details=()
if [[ $success_count -gt 0 ]]; then
local success_list="${success_items[*]}"
local success_text="app"
[[ $success_count -gt 1 ]] && success_text="apps"
local success_line="Removed ${success_count} ${success_text}"
if [[ -n "$freed_display" ]]; then
success_line+=", freed ${GREEN}${freed_display}${NC}"
fi
# Format app list with max 3 per line.
if [[ -n "$success_list" ]]; then
local idx=0
local is_first_line=true
local current_line=""
for app_name in "${success_items[@]}"; do
local display_item="${GREEN}${app_name}${NC}"
if ((idx % 3 == 0)); then
if [[ -n "$current_line" ]]; then
summary_details+=("$current_line")
fi
if [[ "$is_first_line" == true ]]; then
current_line="${success_line}: $display_item"
is_first_line=false
else
current_line="$display_item"
fi
else
current_line="$current_line, $display_item"
fi
((idx++))
done
if [[ -n "$current_line" ]]; then
summary_details+=("$current_line")
fi
else
summary_details+=("$success_line")
fi
fi
if [[ $failed_count -gt 0 ]]; then
summary_status="warn"
local failed_names=()
for item in "${failed_items[@]}"; do
local name=${item%%:*}
failed_names+=("$name")
done
local failed_list="${failed_names[*]}"
local reason_summary="could not be removed"
if [[ $failed_count -eq 1 ]]; then
local first_reason=${failed_items[0]#*:}
case "$first_reason" in
still*running*) reason_summary="is still running" ;;
remove*failed*) reason_summary="could not be removed" ;;
permission*denied*) reason_summary="permission denied" ;;
owned*by*) reason_summary="$first_reason (try with sudo)" ;;
*) reason_summary="$first_reason" ;;
esac
fi
summary_details+=("Failed: ${RED}${failed_list}${NC} ${reason_summary}")
fi
if [[ $success_count -eq 0 && $failed_count -eq 0 ]]; then
summary_status="info"
summary_details+=("No applications were uninstalled.")
fi
local title="Uninstall complete"
if [[ "$summary_status" == "warn" ]]; then
title="Uninstall incomplete"
fi
print_summary_block "$title" "${summary_details[@]}"
printf '\n'
# Clean up Dock entries for uninstalled apps.
if [[ $success_count -gt 0 ]]; then
local -a removed_paths=()
for detail in "${app_details[@]}"; do
IFS='|' read -r app_name app_path _ _ _ _ <<< "$detail"
for success_name in "${success_items[@]}"; do
if [[ "$success_name" == "$app_name" ]]; then
removed_paths+=("$app_path")
break
fi
done
done
if [[ ${#removed_paths[@]} -gt 0 ]]; then
remove_apps_from_dock "${removed_paths[@]}" 2> /dev/null || true
fi
fi
# Clean up sudo keepalive if it was started.
if [[ -n "${sudo_keepalive_pid:-}" ]]; then
kill "$sudo_keepalive_pid" 2> /dev/null || true
wait "$sudo_keepalive_pid" 2> /dev/null || true
sudo_keepalive_pid=""
fi
# Invalidate cache if any apps were successfully uninstalled.
if [[ $success_count -gt 0 ]]; then
local cache_file="$HOME/.cache/mole/app_scan_cache"
rm -f "$cache_file" 2> /dev/null || true
fi
((total_size_cleaned += total_size_freed))
unset failed_items
}

7
mo
View File

@@ -1,7 +0,0 @@
#!/bin/bash
# Lightweight alias to run Mole via `mo`
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
exec "$SCRIPT_DIR/mole" "$@"

787
mole
View File

@@ -1,787 +0,0 @@
#!/bin/bash
# Mole - Main CLI entrypoint.
# Routes subcommands and interactive menu.
# Handles update/remove flows.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/lib/core/common.sh"
source "$SCRIPT_DIR/lib/core/commands.sh"
trap cleanup_temp_files EXIT INT TERM
# Version and update helpers
VERSION="1.20.0"
MOLE_TAGLINE="Deep clean and optimize your Mac."
is_touchid_configured() {
local pam_sudo_file="/etc/pam.d/sudo"
[[ -f "$pam_sudo_file" ]] && grep -q "pam_tid.so" "$pam_sudo_file" 2> /dev/null
}
get_latest_version() {
curl -fsSL --connect-timeout 2 --max-time 3 -H "Cache-Control: no-cache" \
"https://raw.githubusercontent.com/tw93/mole/main/mole" 2> /dev/null |
grep '^VERSION=' | head -1 | sed 's/VERSION="\(.*\)"/\1/'
}
get_latest_version_from_github() {
local version
version=$(curl -fsSL --connect-timeout 2 --max-time 3 \
"https://api.github.com/repos/tw93/mole/releases/latest" 2> /dev/null |
grep '"tag_name"' | head -1 | sed -E 's/.*"([^"]+)".*/\1/')
version="${version#v}"
version="${version#V}"
echo "$version"
}
# Install detection (Homebrew vs manual).
is_homebrew_install() {
local mole_path
mole_path=$(command -v mole 2> /dev/null) || return 1
if [[ -L "$mole_path" ]] && readlink "$mole_path" | grep -q "Cellar/mole"; then
if command -v brew > /dev/null 2>&1; then
brew list --formula 2> /dev/null | grep -q "^mole$" && return 0
else
return 1
fi
fi
if [[ -f "$mole_path" ]]; then
case "$mole_path" in
/opt/homebrew/bin/mole | /usr/local/bin/mole)
if [[ -d /opt/homebrew/Cellar/mole ]] || [[ -d /usr/local/Cellar/mole ]]; then
if command -v brew > /dev/null 2>&1; then
brew list --formula 2> /dev/null | grep -q "^mole$" && return 0
else
return 0 # Cellar exists, probably Homebrew install
fi
fi
;;
esac
fi
if command -v brew > /dev/null 2>&1; then
local brew_prefix
brew_prefix=$(brew --prefix 2> /dev/null)
if [[ -n "$brew_prefix" && "$mole_path" == "$brew_prefix/bin/mole" && -d "$brew_prefix/Cellar/mole" ]]; then
brew list --formula 2> /dev/null | grep -q "^mole$" && return 0
fi
fi
return 1
}
# Background update notice
check_for_updates() {
local msg_cache="$HOME/.cache/mole/update_message"
ensure_user_dir "$(dirname "$msg_cache")"
ensure_user_file "$msg_cache"
(
local latest
latest=$(get_latest_version_from_github)
if [[ -z "$latest" ]]; then
latest=$(get_latest_version)
fi
if [[ -n "$latest" && "$VERSION" != "$latest" && "$(printf '%s\n' "$VERSION" "$latest" | sort -V | head -1)" == "$VERSION" ]]; then
printf "\nUpdate available: %s → %s, run %smo update%s\n\n" "$VERSION" "$latest" "$GREEN" "$NC" > "$msg_cache"
else
echo -n > "$msg_cache"
fi
) &
disown 2> /dev/null || true
}
show_update_notification() {
local msg_cache="$HOME/.cache/mole/update_message"
if [[ -f "$msg_cache" && -s "$msg_cache" ]]; then
cat "$msg_cache"
echo
fi
}
# UI helpers
show_brand_banner() {
cat << EOF
${GREEN} __ __ _ ${NC}
${GREEN}| \/ | ___ | | ___ ${NC}
${GREEN}| |\/| |/ _ \| |/ _ \\${NC}
${GREEN}| | | | (_) | | __/${NC} ${BLUE}https://github.com/tw93/mole${NC}
${GREEN}|_| |_|\___/|_|\___|${NC} ${GREEN}${MOLE_TAGLINE}${NC}
EOF
}
animate_mole_intro() {
if [[ ! -t 1 ]]; then
return
fi
clear_screen
printf '\n'
hide_cursor
local -a mole_lines=()
while IFS= read -r line; do
mole_lines+=("$line")
done << 'EOF'
/\_/\
____/ o o \
/~____ =o= /
(______)__m_m)
/ \
__/ /\ \__
/__/ \__\_
EOF
local idx
local body_cutoff=4
local body_color="${PURPLE}"
local ground_color="${GREEN}"
for idx in "${!mole_lines[@]}"; do
if ((idx < body_cutoff)); then
printf "%s\n" "${body_color}${mole_lines[$idx]}${NC}"
else
printf "%s\n" "${ground_color}${mole_lines[$idx]}${NC}"
fi
sleep 0.1
done
printf '\n'
sleep 0.5
printf '\033[2J\033[H'
show_cursor
}
show_version() {
local os_ver
if command -v sw_vers > /dev/null; then
os_ver=$(sw_vers -productVersion)
else
os_ver="Unknown"
fi
local arch
arch=$(uname -m)
local kernel
kernel=$(uname -r)
local sip_status
if command -v csrutil > /dev/null; then
sip_status=$(csrutil status 2> /dev/null | grep -o "enabled\|disabled" || echo "Unknown")
sip_status="$(LC_ALL=C tr '[:lower:]' '[:upper:]' <<< "${sip_status:0:1}")${sip_status:1}"
else
sip_status="Unknown"
fi
local disk_free
disk_free=$(df -h / 2> /dev/null | awk 'NR==2 {print $4}' || echo "Unknown")
local install_method="Manual"
if is_homebrew_install; then
install_method="Homebrew"
fi
printf '\nMole version %s\n' "$VERSION"
printf 'macOS: %s\n' "$os_ver"
printf 'Architecture: %s\n' "$arch"
printf 'Kernel: %s\n' "$kernel"
printf 'SIP: %s\n' "$sip_status"
printf 'Disk Free: %s\n' "$disk_free"
printf 'Install: %s\n' "$install_method"
printf 'Shell: %s\n\n' "${SHELL:-Unknown}"
}
show_help() {
show_brand_banner
echo
printf "%s%s%s\n" "$BLUE" "COMMANDS" "$NC"
printf " %s%-28s%s %s\n" "$GREEN" "mo" "$NC" "Main menu"
for entry in "${MOLE_COMMANDS[@]}"; do
local name="${entry%%:*}"
local desc="${entry#*:}"
local display="mo $name"
[[ "$name" == "help" ]] && display="mo --help"
[[ "$name" == "version" ]] && display="mo --version"
printf " %s%-28s%s %s\n" "$GREEN" "$display" "$NC" "$desc"
done
echo
printf " %s%-28s%s %s\n" "$GREEN" "mo clean --dry-run" "$NC" "Preview cleanup"
printf " %s%-28s%s %s\n" "$GREEN" "mo clean --whitelist" "$NC" "Manage protected caches"
printf " %s%-28s%s %s\n" "$GREEN" "mo optimize --dry-run" "$NC" "Preview optimization"
printf " %s%-28s%s %s\n" "$GREEN" "mo optimize --whitelist" "$NC" "Manage protected items"
printf " %s%-28s%s %s\n" "$GREEN" "mo purge --paths" "$NC" "Configure scan directories"
echo
printf "%s%s%s\n" "$BLUE" "OPTIONS" "$NC"
printf " %s%-28s%s %s\n" "$GREEN" "--debug" "$NC" "Show detailed operation logs"
echo
}
# Update flow (Homebrew or installer).
update_mole() {
local update_interrupted=false
trap 'update_interrupted=true; echo ""; exit 130' INT TERM
if is_homebrew_install; then
update_via_homebrew "$VERSION"
exit 0
fi
local latest
latest=$(get_latest_version_from_github)
[[ -z "$latest" ]] && latest=$(get_latest_version)
if [[ -z "$latest" ]]; then
log_error "Unable to check for updates. Check network connection."
echo -e "${YELLOW}Tip:${NC} Check if you can access GitHub (https://github.com)"
echo -e "${YELLOW}Tip:${NC} Try again with: ${GRAY}mo update${NC}"
exit 1
fi
if [[ "$VERSION" == "$latest" ]]; then
echo ""
echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version (${VERSION})"
echo ""
exit 0
fi
if [[ -t 1 ]]; then
start_inline_spinner "Downloading latest version..."
else
echo "Downloading latest version..."
fi
local installer_url="https://raw.githubusercontent.com/tw93/mole/main/install.sh"
local tmp_installer
tmp_installer="$(mktemp_file)" || {
log_error "Update failed"
exit 1
}
local download_error=""
if command -v curl > /dev/null 2>&1; then
download_error=$(curl -fsSL --connect-timeout 10 --max-time 60 "$installer_url" -o "$tmp_installer" 2>&1) || {
local curl_exit=$?
if [[ -t 1 ]]; then stop_inline_spinner; fi
rm -f "$tmp_installer"
log_error "Update failed (curl error: $curl_exit)"
case $curl_exit in
6) echo -e "${YELLOW}Tip:${NC} Could not resolve host. Check DNS or network connection." ;;
7) echo -e "${YELLOW}Tip:${NC} Failed to connect. Check network or proxy settings." ;;
22) echo -e "${YELLOW}Tip:${NC} HTTP 404 Not Found. The installer may have moved." ;;
28) echo -e "${YELLOW}Tip:${NC} Connection timed out. Try again or check firewall." ;;
*) echo -e "${YELLOW}Tip:${NC} Check network connection and try again." ;;
esac
echo -e "${YELLOW}Tip:${NC} URL: $installer_url"
exit 1
}
elif command -v wget > /dev/null 2>&1; then
download_error=$(wget --timeout=10 --tries=3 -qO "$tmp_installer" "$installer_url" 2>&1) || {
if [[ -t 1 ]]; then stop_inline_spinner; fi
rm -f "$tmp_installer"
log_error "Update failed (wget error)"
echo -e "${YELLOW}Tip:${NC} Check network connection and try again."
echo -e "${YELLOW}Tip:${NC} URL: $installer_url"
exit 1
}
else
if [[ -t 1 ]]; then stop_inline_spinner; fi
rm -f "$tmp_installer"
log_error "curl or wget required"
echo -e "${YELLOW}Tip:${NC} Install curl with: ${GRAY}brew install curl${NC}"
exit 1
fi
if [[ -t 1 ]]; then stop_inline_spinner; fi
chmod +x "$tmp_installer"
local mole_path
mole_path="$(command -v mole 2> /dev/null || echo "$0")"
local install_dir
install_dir="$(cd "$(dirname "$mole_path")" && pwd)"
local requires_sudo="false"
if [[ ! -w "$install_dir" ]]; then
requires_sudo="true"
elif [[ -e "$install_dir/mole" && ! -w "$install_dir/mole" ]]; then
requires_sudo="true"
fi
if [[ "$requires_sudo" == "true" ]]; then
if ! request_sudo_access "Mole update requires admin access"; then
log_error "Update aborted (admin access denied)"
rm -f "$tmp_installer"
exit 1
fi
fi
if [[ -t 1 ]]; then
start_inline_spinner "Installing update..."
else
echo "Installing update..."
fi
process_install_output() {
local output="$1"
if [[ -t 1 ]]; then stop_inline_spinner; fi
local filtered_output
filtered_output=$(printf '%s\n' "$output" | sed '/^$/d')
if [[ -n "$filtered_output" ]]; then
printf '\n%s\n' "$filtered_output"
fi
if ! printf '%s\n' "$output" | grep -Eq "Updated to latest version|Already on latest version"; then
local new_version
new_version=$("$mole_path" --version 2> /dev/null | awk 'NR==1 && NF {print $NF}' || echo "")
printf '\n%s\n\n' "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version (${new_version:-unknown})"
else
printf '\n'
fi
}
local install_output
local update_tag="V${latest#V}"
local config_dir="${MOLE_CONFIG_DIR:-$SCRIPT_DIR}"
if [[ ! -f "$config_dir/lib/core/common.sh" ]]; then
config_dir="$HOME/.config/mole"
fi
if install_output=$(MOLE_VERSION="$update_tag" "$tmp_installer" --prefix "$install_dir" --config "$config_dir" --update 2>&1); then
process_install_output "$install_output"
else
if install_output=$(MOLE_VERSION="$update_tag" "$tmp_installer" --prefix "$install_dir" --config "$config_dir" 2>&1); then
process_install_output "$install_output"
else
if [[ -t 1 ]]; then stop_inline_spinner; fi
rm -f "$tmp_installer"
log_error "Update failed"
echo "$install_output" | tail -10 >&2 # Show last 10 lines of error
exit 1
fi
fi
rm -f "$tmp_installer"
rm -f "$HOME/.cache/mole/update_message"
}
# Remove flow (Homebrew + manual + config/cache).
remove_mole() {
if [[ -t 1 ]]; then
start_inline_spinner "Detecting Mole installations..."
else
echo "Detecting installations..."
fi
local is_homebrew=false
local brew_cmd=""
local brew_has_mole="false"
local -a manual_installs=()
local -a alias_installs=()
if command -v brew > /dev/null 2>&1; then
brew_cmd="brew"
elif [[ -x "/opt/homebrew/bin/brew" ]]; then
brew_cmd="/opt/homebrew/bin/brew"
elif [[ -x "/usr/local/bin/brew" ]]; then
brew_cmd="/usr/local/bin/brew"
fi
if [[ -n "$brew_cmd" ]]; then
if "$brew_cmd" list --formula 2> /dev/null | grep -q "^mole$"; then
brew_has_mole="true"
fi
fi
if [[ "$brew_has_mole" == "true" ]] || is_homebrew_install; then
is_homebrew=true
fi
local found_mole
found_mole=$(command -v mole 2> /dev/null || true)
if [[ -n "$found_mole" && -f "$found_mole" ]]; then
if [[ ! -L "$found_mole" ]] || ! readlink "$found_mole" | grep -q "Cellar/mole"; then
manual_installs+=("$found_mole")
fi
fi
local -a fallback_paths=(
"/usr/local/bin/mole"
"$HOME/.local/bin/mole"
"/opt/local/bin/mole"
)
for path in "${fallback_paths[@]}"; do
if [[ -f "$path" && "$path" != "$found_mole" ]]; then
if [[ ! -L "$path" ]] || ! readlink "$path" | grep -q "Cellar/mole"; then
manual_installs+=("$path")
fi
fi
done
local found_mo
found_mo=$(command -v mo 2> /dev/null || true)
if [[ -n "$found_mo" && -f "$found_mo" ]]; then
alias_installs+=("$found_mo")
fi
local -a alias_fallback=(
"/usr/local/bin/mo"
"$HOME/.local/bin/mo"
"/opt/local/bin/mo"
)
for alias in "${alias_fallback[@]}"; do
if [[ -f "$alias" && "$alias" != "$found_mo" ]]; then
alias_installs+=("$alias")
fi
done
if [[ -t 1 ]]; then
stop_inline_spinner
fi
printf '\n'
local manual_count=${#manual_installs[@]}
local alias_count=${#alias_installs[@]}
if [[ "$is_homebrew" == "false" && ${manual_count:-0} -eq 0 && ${alias_count:-0} -eq 0 ]]; then
printf '%s\n\n' "${YELLOW}No Mole installation detected${NC}"
exit 0
fi
echo -e "${YELLOW}Remove Mole${NC} - will delete the following:"
if [[ "$is_homebrew" == "true" ]]; then
echo " - Mole via Homebrew"
fi
for install in ${manual_installs[@]+"${manual_installs[@]}"} ${alias_installs[@]+"${alias_installs[@]}"}; do
echo " - $install"
done
echo " - ~/.config/mole"
echo " - ~/.cache/mole"
echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to confirm, ${GRAY}ESC${NC} to cancel: "
IFS= read -r -s -n1 key || key=""
drain_pending_input # Clean up any escape sequence remnants
case "$key" in
$'\e')
exit 0
;;
"" | $'\n' | $'\r')
printf "\r\033[K" # Clear the prompt line
;;
*)
exit 0
;;
esac
local has_error=false
if [[ "$is_homebrew" == "true" ]]; then
if [[ -z "$brew_cmd" ]]; then
log_error "Homebrew command not found. Please ensure Homebrew is installed and in your PATH."
log_warning "You may need to manually run: brew uninstall --force mole"
exit 1
fi
log_admin "Attempting to uninstall Mole via Homebrew..."
local brew_uninstall_output
if ! brew_uninstall_output=$("$brew_cmd" uninstall --force mole 2>&1); then
has_error=true
log_error "Homebrew uninstallation failed:"
printf "%s\n" "$brew_uninstall_output" | sed "s/^/${RED} | ${NC}/" >&2
log_warning "Please manually run: ${YELLOW}brew uninstall --force mole${NC}"
echo "" # Add a blank line for readability
else
log_success "Mole uninstalled via Homebrew."
fi
fi
if [[ ${manual_count:-0} -gt 0 ]]; then
for install in "${manual_installs[@]}"; do
if [[ -f "$install" ]]; then
if [[ ! -w "$(dirname "$install")" ]]; then
if ! sudo rm -f "$install" 2> /dev/null; then
has_error=true
fi
else
if ! rm -f "$install" 2> /dev/null; then
has_error=true
fi
fi
fi
done
fi
if [[ ${alias_count:-0} -gt 0 ]]; then
for alias in "${alias_installs[@]}"; do
if [[ -f "$alias" ]]; then
if [[ ! -w "$(dirname "$alias")" ]]; then
if ! sudo rm -f "$alias" 2> /dev/null; then
has_error=true
fi
else
if ! rm -f "$alias" 2> /dev/null; then
has_error=true
fi
fi
fi
done
fi
if [[ -d "$HOME/.cache/mole" ]]; then
rm -rf "$HOME/.cache/mole" 2> /dev/null || true
fi
if [[ -d "$HOME/.config/mole" ]]; then
rm -rf "$HOME/.config/mole" 2> /dev/null || true
fi
local final_message
if [[ "$has_error" == "true" ]]; then
final_message="${YELLOW}${ICON_ERROR} Mole uninstalled with some errors, thank you for using Mole!${NC}"
else
final_message="${GREEN}${ICON_SUCCESS} Mole uninstalled successfully, thank you for using Mole!${NC}"
fi
printf '\n%s\n\n' "$final_message"
exit 0
}
# Menu UI
show_main_menu() {
local selected="${1:-1}"
local _full_draw="${2:-true}" # Kept for compatibility (unused)
local banner="${MAIN_MENU_BANNER:-}"
local update_message="${MAIN_MENU_UPDATE_MESSAGE:-}"
if [[ -z "$banner" ]]; then
banner="$(show_brand_banner)"
MAIN_MENU_BANNER="$banner"
fi
printf '\033[H'
local line=""
printf '\r\033[2K\n'
while IFS= read -r line || [[ -n "$line" ]]; do
printf '\r\033[2K%s\n' "$line"
done <<< "$banner"
if [[ -n "$update_message" ]]; then
while IFS= read -r line || [[ -n "$line" ]]; do
printf '\r\033[2K%s\n' "$line"
done <<< "$update_message"
fi
printf '\r\033[2K\n'
printf '\r\033[2K%s\n' "$(show_menu_option 1 "Clean Free up disk space" "$([[ $selected -eq 1 ]] && echo true || echo false)")"
printf '\r\033[2K%s\n' "$(show_menu_option 2 "Uninstall Remove apps completely" "$([[ $selected -eq 2 ]] && echo true || echo false)")"
printf '\r\033[2K%s\n' "$(show_menu_option 3 "Optimize Check and maintain system" "$([[ $selected -eq 3 ]] && echo true || echo false)")"
printf '\r\033[2K%s\n' "$(show_menu_option 4 "Analyze Explore disk usage" "$([[ $selected -eq 4 ]] && echo true || echo false)")"
printf '\r\033[2K%s\n' "$(show_menu_option 5 "Status Monitor system health" "$([[ $selected -eq 5 ]] && echo true || echo false)")"
if [[ -t 0 ]]; then
printf '\r\033[2K\n'
local controls="${GRAY}↑↓ | Enter | M More | "
if ! is_touchid_configured; then
controls="${controls}T TouchID"
else
controls="${controls}U Update"
fi
controls="${controls} | Q Quit${NC}"
printf '\r\033[2K%s\n' "$controls"
printf '\r\033[2K\n'
fi
printf '\033[J'
}
interactive_main_menu() {
if [[ -t 1 ]]; then
local tty_name
tty_name=$(tty 2> /dev/null || echo "")
if [[ -n "$tty_name" ]]; then
local flag_file
local cache_dir="$HOME/.cache/mole"
ensure_user_dir "$cache_dir"
flag_file="$cache_dir/intro_$(echo "$tty_name" | LC_ALL=C tr -c '[:alnum:]_' '_')"
if [[ ! -f "$flag_file" ]]; then
animate_mole_intro
ensure_user_file "$flag_file"
fi
fi
fi
local current_option=1
local first_draw=true
local brand_banner=""
local msg_cache="$HOME/.cache/mole/update_message"
local update_message=""
brand_banner="$(show_brand_banner)"
MAIN_MENU_BANNER="$brand_banner"
if [[ -f "$msg_cache" && -s "$msg_cache" ]]; then
update_message="$(cat "$msg_cache" 2> /dev/null || echo "")"
fi
MAIN_MENU_UPDATE_MESSAGE="$update_message"
cleanup_and_exit() {
show_cursor
exit 0
}
trap cleanup_and_exit INT
hide_cursor
while true; do
show_main_menu $current_option "$first_draw"
if [[ "$first_draw" == "true" ]]; then
first_draw=false
fi
local key
if ! key=$(read_key); then
continue
fi
case "$key" in
"UP") ((current_option > 1)) && ((current_option--)) ;;
"DOWN") ((current_option < 5)) && ((current_option++)) ;;
"ENTER")
show_cursor
case $current_option in
1) exec "$SCRIPT_DIR/bin/clean.sh" ;;
2) exec "$SCRIPT_DIR/bin/uninstall.sh" ;;
3) exec "$SCRIPT_DIR/bin/optimize.sh" ;;
4) exec "$SCRIPT_DIR/bin/analyze.sh" ;;
5) exec "$SCRIPT_DIR/bin/status.sh" ;;
esac
;;
"CHAR:1")
show_cursor
exec "$SCRIPT_DIR/bin/clean.sh"
;;
"CHAR:2")
show_cursor
exec "$SCRIPT_DIR/bin/uninstall.sh"
;;
"CHAR:3")
show_cursor
exec "$SCRIPT_DIR/bin/optimize.sh"
;;
"CHAR:4")
show_cursor
exec "$SCRIPT_DIR/bin/analyze.sh"
;;
"CHAR:5")
show_cursor
exec "$SCRIPT_DIR/bin/status.sh"
;;
"MORE")
show_cursor
clear
show_help
exit 0
;;
"VERSION")
show_cursor
clear
show_version
exit 0
;;
"TOUCHID")
show_cursor
exec "$SCRIPT_DIR/bin/touchid.sh"
;;
"UPDATE")
show_cursor
clear
update_mole
exit 0
;;
"QUIT") cleanup_and_exit ;;
esac
drain_pending_input
done
}
# CLI dispatch
main() {
local -a args=()
for arg in "$@"; do
case "$arg" in
--debug)
export MO_DEBUG=1
;;
*)
args+=("$arg")
;;
esac
done
case "${args[0]:-""}" in
"optimize")
exec "$SCRIPT_DIR/bin/optimize.sh" "${args[@]:1}"
;;
"clean")
exec "$SCRIPT_DIR/bin/clean.sh" "${args[@]:1}"
;;
"uninstall")
exec "$SCRIPT_DIR/bin/uninstall.sh" "${args[@]:1}"
;;
"analyze")
exec "$SCRIPT_DIR/bin/analyze.sh" "${args[@]:1}"
;;
"status")
exec "$SCRIPT_DIR/bin/status.sh" "${args[@]:1}"
;;
"purge")
exec "$SCRIPT_DIR/bin/purge.sh" "${args[@]:1}"
;;
"installer")
exec "$SCRIPT_DIR/bin/installer.sh" "${args[@]:1}"
;;
"touchid")
exec "$SCRIPT_DIR/bin/touchid.sh" "${args[@]:1}"
;;
"completion")
exec "$SCRIPT_DIR/bin/completion.sh" "${args[@]:1}"
;;
"update")
update_mole
exit 0
;;
"remove")
remove_mole
;;
"help" | "--help" | "-h")
show_help
exit 0
;;
"version" | "--version" | "-V")
show_version
exit 0
;;
"")
check_for_updates
interactive_main_menu
;;
*)
echo "Unknown command: ${args[0]}"
echo "Use 'mole --help' for usage information."
exit 1
;;
esac
}
main "$@"

View File

@@ -1,221 +0,0 @@
#!/bin/bash
# Code quality checks for Mole.
# Auto-formats code, then runs lint and syntax checks.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
MODE="all"
usage() {
cat << 'EOF'
Usage: ./scripts/check.sh [--format|--no-format]
Options:
--format Apply formatting fixes only (shfmt, gofmt)
--no-format Skip formatting and run checks only
--help Show this help
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--format)
MODE="format"
shift
;;
--no-format)
MODE="check"
shift
;;
--help | -h)
usage
exit 0
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
cd "$PROJECT_ROOT"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
readonly ICON_SUCCESS="✓"
readonly ICON_ERROR="☻"
readonly ICON_WARNING="●"
readonly ICON_LIST="•"
echo -e "${BLUE}=== Mole Check (${MODE}) ===${NC}\n"
SHELL_FILES=$(find . -type f \( -name "*.sh" -o -name "mole" \) \
-not -path "./.git/*" \
-not -path "*/node_modules/*" \
-not -path "*/tests/tmp-*/*" \
-not -path "*/.*" \
2> /dev/null)
if [[ "$MODE" == "format" ]]; then
echo -e "${YELLOW}Formatting shell scripts...${NC}"
if command -v shfmt > /dev/null 2>&1; then
echo "$SHELL_FILES" | xargs shfmt -i 4 -ci -sr -w
echo -e "${GREEN}${ICON_SUCCESS} Shell formatting complete${NC}\n"
else
echo -e "${RED}${ICON_ERROR} shfmt not installed${NC}"
exit 1
fi
if command -v goimports > /dev/null 2>&1; then
echo -e "${YELLOW}Formatting Go code (goimports)...${NC}"
goimports -w -local github.com/tw93/Mole ./cmd
echo -e "${GREEN}${ICON_SUCCESS} Go formatting complete${NC}\n"
elif command -v go > /dev/null 2>&1; then
echo -e "${YELLOW}Formatting Go code (gofmt)...${NC}"
gofmt -w ./cmd
echo -e "${GREEN}${ICON_SUCCESS} Go formatting complete${NC}\n"
else
echo -e "${YELLOW}${ICON_WARNING} go not installed, skipping gofmt${NC}\n"
fi
echo -e "${GREEN}=== Format Completed ===${NC}"
exit 0
fi
if [[ "$MODE" != "check" ]]; then
echo -e "${YELLOW}1. Formatting shell scripts...${NC}"
if command -v shfmt > /dev/null 2>&1; then
echo "$SHELL_FILES" | xargs shfmt -i 4 -ci -sr -w
echo -e "${GREEN}${ICON_SUCCESS} Shell formatting applied${NC}\n"
else
echo -e "${YELLOW}${ICON_WARNING} shfmt not installed, skipping${NC}\n"
fi
if command -v goimports > /dev/null 2>&1; then
echo -e "${YELLOW}2. Formatting Go code (goimports)...${NC}"
goimports -w -local github.com/tw93/Mole ./cmd
echo -e "${GREEN}${ICON_SUCCESS} Go formatting applied${NC}\n"
elif command -v go > /dev/null 2>&1; then
echo -e "${YELLOW}2. Formatting Go code (gofmt)...${NC}"
gofmt -w ./cmd
echo -e "${GREEN}${ICON_SUCCESS} Go formatting applied${NC}\n"
fi
fi
echo -e "${YELLOW}3. Running Go linters...${NC}"
if command -v golangci-lint > /dev/null 2>&1; then
if ! golangci-lint config verify; then
echo -e "${RED}${ICON_ERROR} golangci-lint config invalid${NC}\n"
exit 1
fi
if golangci-lint run ./cmd/...; then
echo -e "${GREEN}${ICON_SUCCESS} golangci-lint passed${NC}\n"
else
echo -e "${RED}${ICON_ERROR} golangci-lint failed${NC}\n"
exit 1
fi
elif command -v go > /dev/null 2>&1; then
echo -e "${YELLOW}${ICON_WARNING} golangci-lint not installed, falling back to go vet${NC}"
if go vet ./cmd/...; then
echo -e "${GREEN}${ICON_SUCCESS} go vet passed${NC}\n"
else
echo -e "${RED}${ICON_ERROR} go vet failed${NC}\n"
exit 1
fi
else
echo -e "${YELLOW}${ICON_WARNING} Go not installed, skipping Go checks${NC}\n"
fi
echo -e "${YELLOW}4. Running ShellCheck...${NC}"
if command -v shellcheck > /dev/null 2>&1; then
if shellcheck mole bin/*.sh lib/*/*.sh scripts/*.sh; then
echo -e "${GREEN}${ICON_SUCCESS} ShellCheck passed${NC}\n"
else
echo -e "${RED}${ICON_ERROR} ShellCheck failed${NC}\n"
exit 1
fi
else
echo -e "${YELLOW}${ICON_WARNING} shellcheck not installed, skipping${NC}\n"
fi
echo -e "${YELLOW}5. Running syntax check...${NC}"
if ! bash -n mole; then
echo -e "${RED}${ICON_ERROR} Syntax check failed (mole)${NC}\n"
exit 1
fi
for script in bin/*.sh; do
if ! bash -n "$script"; then
echo -e "${RED}${ICON_ERROR} Syntax check failed ($script)${NC}\n"
exit 1
fi
done
find lib -name "*.sh" | while read -r script; do
if ! bash -n "$script"; then
echo -e "${RED}${ICON_ERROR} Syntax check failed ($script)${NC}\n"
exit 1
fi
done
echo -e "${GREEN}${ICON_SUCCESS} Syntax check passed${NC}\n"
echo -e "${YELLOW}6. Checking optimizations...${NC}"
OPTIMIZATION_SCORE=0
TOTAL_CHECKS=0
((TOTAL_CHECKS++))
if grep -q "read -r -s -n 1 -t 1" lib/core/ui.sh; then
echo -e "${GREEN} ${ICON_SUCCESS} Keyboard timeout configured${NC}"
((OPTIMIZATION_SCORE++))
else
echo -e "${YELLOW} ${ICON_WARNING} Keyboard timeout may be misconfigured${NC}"
fi
((TOTAL_CHECKS++))
DRAIN_PASSES=$(grep -c "while IFS= read -r -s -n 1" lib/core/ui.sh 2> /dev/null || true)
DRAIN_PASSES=${DRAIN_PASSES:-0}
if [[ $DRAIN_PASSES -eq 1 ]]; then
echo -e "${GREEN} ${ICON_SUCCESS} drain_pending_input optimized${NC}"
((OPTIMIZATION_SCORE++))
else
echo -e "${YELLOW} ${ICON_WARNING} drain_pending_input has multiple passes${NC}"
fi
((TOTAL_CHECKS++))
if grep -q "rotate_log_once" lib/core/log.sh; then
echo -e "${GREEN} ${ICON_SUCCESS} Log rotation optimized${NC}"
((OPTIMIZATION_SCORE++))
else
echo -e "${YELLOW} ${ICON_WARNING} Log rotation not optimized${NC}"
fi
((TOTAL_CHECKS++))
if ! grep -q "cache_meta\|cache_dir_mtime" bin/uninstall.sh; then
echo -e "${GREEN} ${ICON_SUCCESS} Cache validation simplified${NC}"
((OPTIMIZATION_SCORE++))
else
echo -e "${YELLOW} ${ICON_WARNING} Cache still uses redundant metadata${NC}"
fi
((TOTAL_CHECKS++))
if grep -q "Consecutive slashes" bin/clean.sh; then
echo -e "${GREEN} ${ICON_SUCCESS} Path validation enhanced${NC}"
((OPTIMIZATION_SCORE++))
else
echo -e "${YELLOW} ${ICON_WARNING} Path validation not enhanced${NC}"
fi
echo -e "${BLUE} Optimization score: $OPTIMIZATION_SCORE/$TOTAL_CHECKS${NC}\n"
echo -e "${GREEN}=== Checks Completed ===${NC}"
if [[ $OPTIMIZATION_SCORE -eq $TOTAL_CHECKS ]]; then
echo -e "${GREEN}${ICON_SUCCESS} All optimizations applied${NC}"
else
echo -e "${YELLOW}${ICON_WARNING} Some optimizations missing${NC}"
fi

View File

@@ -1,424 +0,0 @@
#!/bin/bash
# Create Raycast script commands and Alfred keywords for Mole (clean + uninstall).
set -euo pipefail
BLUE='\033[0;34m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
ICON_STEP="➜"
ICON_SUCCESS="✓"
ICON_WARN="!"
ICON_ERR="✗"
log_step() { echo -e "${BLUE}${ICON_STEP}${NC} $1"; }
log_success() { echo -e "${GREEN}${ICON_SUCCESS}${NC} $1"; }
log_warn() { echo -e "${YELLOW}${ICON_WARN}${NC} $1"; }
log_error() { echo -e "${RED}${ICON_ERR}${NC} $1"; }
log_header() { echo -e "\n${BLUE}==== $1 ====${NC}\n"; }
is_interactive() { [[ -t 1 && -r /dev/tty ]]; }
prompt_enter() {
local prompt="$1"
if is_interactive; then
read -r -p "$prompt" < /dev/tty || true
else
echo "$prompt"
fi
}
detect_mo() {
if command -v mo > /dev/null 2>&1; then
command -v mo
elif command -v mole > /dev/null 2>&1; then
command -v mole
else
log_error "Mole not found. Install it first via Homebrew or ./install.sh."
exit 1
fi
}
write_raycast_script() {
local target="$1"
local title="$2"
local mo_bin="$3"
local subcommand="$4"
local raw_cmd="\"${mo_bin}\" ${subcommand}"
local cmd_escaped="${raw_cmd//\\/\\\\}"
cmd_escaped="${cmd_escaped//\"/\\\"}"
cat > "$target" << EOF
#!/bin/bash
# Required parameters:
# @raycast.schemaVersion 1
# @raycast.title ${title}
# @raycast.mode fullOutput
# @raycast.packageName Mole
# Optional parameters:
# @raycast.icon 🐹
set -euo pipefail
echo "🐹 Running ${title}..."
echo ""
CMD="${raw_cmd}"
CMD_ESCAPED="${cmd_escaped}"
has_app() {
local name="\$1"
[[ -d "/Applications/\${name}.app" || -d "\$HOME/Applications/\${name}.app" ]]
}
has_bin() {
command -v "\$1" >/dev/null 2>&1
}
launcher_available() {
local app="\$1"
case "\$app" in
Terminal) return 0 ;;
iTerm|iTerm2) has_app "iTerm" || has_app "iTerm2" ;;
Alacritty) has_app "Alacritty" ;;
Kitty) has_bin "kitty" || has_app "kitty" ;;
WezTerm) has_bin "wezterm" || has_app "WezTerm" ;;
Ghostty) has_bin "ghostty" || has_app "Ghostty" ;;
Hyper) has_app "Hyper" ;;
WindTerm) has_app "WindTerm" ;;
Warp) has_app "Warp" ;;
*)
return 1 ;;
esac
}
detect_launcher_app() {
if [[ -n "\${MO_LAUNCHER_APP:-}" ]]; then
echo "\${MO_LAUNCHER_APP}"
return
fi
local candidates=(Warp Ghostty Alacritty Kitty WezTerm WindTerm Hyper iTerm2 iTerm Terminal)
local app
for app in "\${candidates[@]}"; do
if launcher_available "\$app"; then
echo "\$app"
return
fi
done
echo "Terminal"
}
launch_with_app() {
local app="\$1"
case "\$app" in
Terminal)
if command -v osascript >/dev/null 2>&1; then
osascript <<'APPLESCRIPT'
set targetCommand to "${cmd_escaped}"
tell application "Terminal"
activate
do script targetCommand
end tell
APPLESCRIPT
return 0
fi
;;
iTerm|iTerm2)
if command -v osascript >/dev/null 2>&1; then
osascript <<'APPLESCRIPT'
set targetCommand to "${cmd_escaped}"
tell application "iTerm2"
activate
try
tell current window
tell current session
write text targetCommand
end tell
end tell
on error
create window with default profile
tell current window
tell current session
write text targetCommand
end tell
end tell
end try
end tell
APPLESCRIPT
return 0
fi
;;
Alacritty)
if launcher_available "Alacritty" && command -v open >/dev/null 2>&1; then
open -na "Alacritty" --args -e /bin/zsh -lc "${raw_cmd}"
return \$?
fi
;;
Kitty)
if has_bin "kitty"; then
kitty --hold /bin/zsh -lc "${raw_cmd}"
return \$?
elif [[ -x "/Applications/kitty.app/Contents/MacOS/kitty" ]]; then
"/Applications/kitty.app/Contents/MacOS/kitty" --hold /bin/zsh -lc "${raw_cmd}"
return \$?
fi
;;
WezTerm)
if has_bin "wezterm"; then
wezterm start -- /bin/zsh -lc "${raw_cmd}"
return \$?
elif [[ -x "/Applications/WezTerm.app/Contents/MacOS/wezterm" ]]; then
"/Applications/WezTerm.app/Contents/MacOS/wezterm" start -- /bin/zsh -lc "${raw_cmd}"
return \$?
fi
;;
Ghostty)
if has_bin "ghostty"; then
ghostty --command "/bin/zsh" -- -lc "${raw_cmd}"
return \$?
elif [[ -x "/Applications/Ghostty.app/Contents/MacOS/ghostty" ]]; then
"/Applications/Ghostty.app/Contents/MacOS/ghostty" --command "/bin/zsh" -- -lc "${raw_cmd}"
return \$?
fi
;;
Hyper)
if launcher_available "Hyper" && command -v open >/dev/null 2>&1; then
open -na "Hyper" --args /bin/zsh -lc "${raw_cmd}"
return \$?
fi
;;
WindTerm)
if launcher_available "WindTerm" && command -v open >/dev/null 2>&1; then
open -na "WindTerm" --args /bin/zsh -lc "${raw_cmd}"
return \$?
fi
;;
Warp)
if launcher_available "Warp" && command -v open >/dev/null 2>&1; then
open -na "Warp" --args /bin/zsh -lc "${raw_cmd}"
return \$?
fi
;;
esac
return 1
}
if [[ -n "\${TERM:-}" && "\${TERM}" != "dumb" ]]; then
"${mo_bin}" ${subcommand}
exit \$?
fi
TERM_APP="\$(detect_launcher_app)"
if launch_with_app "\$TERM_APP"; then
exit 0
fi
if [[ "\$TERM_APP" != "Terminal" ]]; then
echo "Could not control \$TERM_APP, falling back to Terminal..."
if launch_with_app "Terminal"; then
exit 0
fi
fi
echo "TERM environment variable not set and no launcher succeeded."
echo "Run this manually:"
echo " ${raw_cmd}"
exit 1
EOF
chmod +x "$target"
}
create_raycast_commands() {
local mo_bin="$1"
local default_dir="$HOME/Library/Application Support/Raycast/script-commands"
local dir="$default_dir"
log_step "Installing Raycast commands..."
mkdir -p "$dir"
write_raycast_script "$dir/mole-clean.sh" "clean" "$mo_bin" "clean"
write_raycast_script "$dir/mole-uninstall.sh" "uninstall" "$mo_bin" "uninstall"
write_raycast_script "$dir/mole-optimize.sh" "optimize" "$mo_bin" "optimize"
write_raycast_script "$dir/mole-analyze.sh" "analyze" "$mo_bin" "analyze"
write_raycast_script "$dir/mole-status.sh" "status" "$mo_bin" "status"
log_success "Scripts ready in: $dir"
log_header "Raycast Configuration"
if command -v open > /dev/null 2>&1; then
if open "raycast://extensions/raycast/raycast-settings/extensions" > /dev/null 2>&1; then
log_step "Raycast settings opened."
else
log_warn "Could not auto-open Raycast."
fi
else
log_warn "open command not available; please open Raycast manually."
fi
echo "If Raycast asks to add a Script Directory, use:"
echo " $dir"
if is_interactive; then
log_header "Finalizing Setup"
prompt_enter "Press [Enter] to reload script directories in Raycast..."
if command -v open > /dev/null 2>&1 && open "raycast://extensions/raycast/raycast/reload-script-directories" > /dev/null 2>&1; then
log_step "Raycast script directories reloaded."
else
log_warn "Could not auto-reload Raycast script directories."
fi
log_success "Raycast setup complete!"
else
log_warn "Non-interactive mode; skip Raycast reload. Please run 'Reload Script Directories' in Raycast."
fi
}
uuid() {
if command -v uuidgen > /dev/null 2>&1; then
uuidgen
else
# Fallback pseudo UUID in format: 8-4-4-4-12
local hex=$(openssl rand -hex 16)
echo "${hex:0:8}-${hex:8:4}-${hex:12:4}-${hex:16:4}-${hex:20:12}"
fi
}
create_alfred_workflow() {
local mo_bin="$1"
local prefs_dir="${ALFRED_PREFS_DIR:-$HOME/Library/Application Support/Alfred/Alfred.alfredpreferences}"
local workflows_dir="$prefs_dir/workflows"
if [[ ! -d "$workflows_dir" ]]; then
return
fi
log_step "Installing Alfred workflows..."
local workflows=(
"fun.tw93.mole.clean|Mole clean|clean|Run Mole clean|\"${mo_bin}\" clean"
"fun.tw93.mole.uninstall|Mole uninstall|uninstall|Uninstall apps via Mole|\"${mo_bin}\" uninstall"
"fun.tw93.mole.optimize|Mole optimize|optimize|System health & optimization|\"${mo_bin}\" optimize"
"fun.tw93.mole.analyze|Mole analyze|analyze|Disk space analysis|\"${mo_bin}\" analyze"
"fun.tw93.mole.status|Mole status|status|Live system dashboard|\"${mo_bin}\" status"
)
for entry in "${workflows[@]}"; do
IFS="|" read -r bundle name keyword subtitle command <<< "$entry"
local workflow_uid="user.workflow.$(uuid | LC_ALL=C tr '[:upper:]' '[:lower:]')"
local input_uid
local action_uid
input_uid="$(uuid)"
action_uid="$(uuid)"
local dir="$workflows_dir/$workflow_uid"
mkdir -p "$dir"
cat > "$dir/info.plist" << EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>bundleid</key>
<string>${bundle}</string>
<key>createdby</key>
<string>Mole</string>
<key>name</key>
<string>${name}</string>
<key>objects</key>
<array>
<dict>
<key>config</key>
<dict>
<key>argumenttype</key>
<integer>2</integer>
<key>keyword</key>
<string>${keyword}</string>
<key>subtext</key>
<string>${subtitle}</string>
<key>text</key>
<string>${name}</string>
<key>withspace</key>
<true/>
</dict>
<key>type</key>
<string>alfred.workflow.input.keyword</string>
<key>uid</key>
<string>${input_uid}</string>
<key>version</key>
<integer>1</integer>
</dict>
<dict>
<key>config</key>
<dict>
<key>concurrently</key>
<true/>
<key>escaping</key>
<integer>102</integer>
<key>script</key>
<string>#!/bin/bash
PATH="/usr/local/bin:/opt/homebrew/bin:/usr/bin:/bin"
${command}
</string>
<key>scriptargtype</key>
<integer>1</integer>
<key>scriptfile</key>
<string></string>
<key>type</key>
<integer>0</integer>
</dict>
<key>type</key>
<string>alfred.workflow.action.script</string>
<key>uid</key>
<string>${action_uid}</string>
<key>version</key>
<integer>2</integer>
</dict>
</array>
<key>connections</key>
<dict>
<key>${input_uid}</key>
<array>
<dict>
<key>destinationuid</key>
<string>${action_uid}</string>
<key>modifiers</key>
<integer>0</integer>
<key>modifiersubtext</key>
<string></string>
</dict>
</array>
</dict>
<key>uid</key>
<string>${workflow_uid}</string>
<key>version</key>
<integer>1</integer>
</dict>
</plist>
EOF
log_success "Workflow ready: ${name} (keyword: ${keyword})"
done
log_step "Open Alfred preferences → Workflows if you need to adjust keywords."
}
main() {
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " Mole Quick Launchers"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
local mo_bin
mo_bin="$(detect_mo)"
log_step "Detected Mole binary at: ${mo_bin}"
create_raycast_commands "$mo_bin"
create_alfred_workflow "$mo_bin"
echo ""
log_success "Done! Raycast and Alfred are ready with 5 commands:"
echo " • clean - Deep system cleanup"
echo " • uninstall - Remove applications"
echo " • optimize - System health & tuning"
echo " • analyze - Disk space explorer"
echo " • status - Live system monitor"
echo ""
}
main "$@"

Some files were not shown because too many files have changed in this diff Show More