mirror of
https://github.com/tw93/Mole.git
synced 2026-02-07 17:24:17 +00:00
fix: improve cache freshness fallback and proxy detection
This commit is contained in:
@@ -36,6 +36,8 @@ readonly MOLE_UNINSTALL_META_CACHE_FILE="$MOLE_UNINSTALL_META_CACHE_DIR/uninstal
|
||||
readonly MOLE_UNINSTALL_META_CACHE_LOCK="${MOLE_UNINSTALL_META_CACHE_FILE}.lock"
|
||||
readonly MOLE_UNINSTALL_META_REFRESH_TTL=604800 # 7 days
|
||||
readonly MOLE_UNINSTALL_SCAN_SPINNER_DELAY_SEC="0.15"
|
||||
readonly MOLE_UNINSTALL_INLINE_METADATA_LIMIT=4
|
||||
readonly MOLE_UNINSTALL_INLINE_MDLS_TIMEOUT_SEC="0.08"
|
||||
|
||||
uninstall_relative_time_from_epoch() {
|
||||
local value_epoch="${1:-0}"
|
||||
@@ -154,6 +156,34 @@ uninstall_release_metadata_lock() {
|
||||
[[ -d "$lock_dir" ]] && rmdir "$lock_dir" 2> /dev/null || true
|
||||
}
|
||||
|
||||
uninstall_collect_inline_metadata() {
|
||||
local app_path="$1"
|
||||
local app_mtime="${2:-0}"
|
||||
local now_epoch="${3:-0}"
|
||||
|
||||
local size_kb
|
||||
size_kb=$(get_path_size_kb "$app_path")
|
||||
[[ "$size_kb" =~ ^[0-9]+$ ]] || size_kb=0
|
||||
|
||||
local last_used_epoch=0
|
||||
local metadata_date
|
||||
metadata_date=$(run_with_timeout "$MOLE_UNINSTALL_INLINE_MDLS_TIMEOUT_SEC" mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "")
|
||||
if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then
|
||||
last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0")
|
||||
fi
|
||||
|
||||
# Fallback to app mtime so first scan does not show "...".
|
||||
if [[ ! "$last_used_epoch" =~ ^[0-9]+$ || $last_used_epoch -le 0 ]]; then
|
||||
if [[ "$app_mtime" =~ ^[0-9]+$ && $app_mtime -gt 0 ]]; then
|
||||
last_used_epoch="$app_mtime"
|
||||
else
|
||||
last_used_epoch=0
|
||||
fi
|
||||
fi
|
||||
|
||||
printf "%s|%s|%s\n" "$size_kb" "$last_used_epoch" "$now_epoch"
|
||||
}
|
||||
|
||||
start_uninstall_metadata_refresh() {
|
||||
local refresh_file="$1"
|
||||
[[ ! -s "$refresh_file" ]] && {
|
||||
@@ -567,6 +597,7 @@ scan_applications() {
|
||||
|
||||
local current_epoch
|
||||
current_epoch=$(get_epoch_seconds)
|
||||
local inline_metadata_count=0
|
||||
|
||||
while IFS='|' read -r app_path display_name bundle_id app_mtime cached_mtime cached_size_kb cached_epoch cached_updated_epoch cached_bundle_id cached_display_name; do
|
||||
[[ -n "$app_path" && -e "$app_path" ]] || continue
|
||||
@@ -610,6 +641,24 @@ scan_applications() {
|
||||
fi
|
||||
|
||||
if [[ $needs_refresh == true ]]; then
|
||||
if [[ $inline_metadata_count -lt $MOLE_UNINSTALL_INLINE_METADATA_LIMIT ]]; then
|
||||
local inline_metadata inline_size_kb inline_epoch inline_updated_epoch
|
||||
inline_metadata=$(uninstall_collect_inline_metadata "$app_path" "${app_mtime:-0}" "$current_epoch")
|
||||
IFS='|' read -r inline_size_kb inline_epoch inline_updated_epoch <<< "$inline_metadata"
|
||||
((inline_metadata_count++))
|
||||
|
||||
if [[ "$inline_size_kb" =~ ^[0-9]+$ && $inline_size_kb -gt 0 ]]; then
|
||||
final_size_kb="$inline_size_kb"
|
||||
final_size=$(bytes_to_human "$((inline_size_kb * 1024))")
|
||||
fi
|
||||
if [[ "$inline_epoch" =~ ^[0-9]+$ && $inline_epoch -gt 0 ]]; then
|
||||
final_epoch="$inline_epoch"
|
||||
final_last_used=$(uninstall_relative_time_from_epoch "$final_epoch" "$current_epoch")
|
||||
fi
|
||||
if [[ "$inline_updated_epoch" =~ ^[0-9]+$ && $inline_updated_epoch -gt 0 ]]; then
|
||||
cached_updated_epoch="$inline_updated_epoch"
|
||||
fi
|
||||
fi
|
||||
printf "%s|%s|%s|%s\n" "$app_path" "${app_mtime:-0}" "$bundle_id" "$display_name" >> "$refresh_file"
|
||||
fi
|
||||
|
||||
|
||||
@@ -415,6 +415,221 @@ func TestLoadCacheExpiresWhenDirectoryChanges(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadCacheReusesRecentEntryAfterDirectoryChanges(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
|
||||
target := filepath.Join(home, "recent-change-target")
|
||||
if err := os.MkdirAll(target, 0o755); err != nil {
|
||||
t.Fatalf("create target: %v", err)
|
||||
}
|
||||
|
||||
result := scanResult{TotalSize: 5, TotalFiles: 1}
|
||||
if err := saveCacheToDisk(target, result); err != nil {
|
||||
t.Fatalf("saveCacheToDisk: %v", err)
|
||||
}
|
||||
|
||||
cachePath, err := getCachePath(target)
|
||||
if err != nil {
|
||||
t.Fatalf("getCachePath: %v", err)
|
||||
}
|
||||
|
||||
file, err := os.Open(cachePath)
|
||||
if err != nil {
|
||||
t.Fatalf("open cache: %v", err)
|
||||
}
|
||||
var entry cacheEntry
|
||||
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
|
||||
t.Fatalf("decode cache: %v", err)
|
||||
}
|
||||
_ = file.Close()
|
||||
|
||||
// Make cache entry look recently scanned, but older than mod time grace.
|
||||
entry.ModTime = time.Now().Add(-2 * time.Hour)
|
||||
entry.ScanTime = time.Now().Add(-1 * time.Hour)
|
||||
|
||||
tmp := cachePath + ".tmp"
|
||||
f, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
t.Fatalf("create tmp cache: %v", err)
|
||||
}
|
||||
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
|
||||
t.Fatalf("encode tmp cache: %v", err)
|
||||
}
|
||||
_ = f.Close()
|
||||
if err := os.Rename(tmp, cachePath); err != nil {
|
||||
t.Fatalf("rename tmp cache: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Chtimes(target, time.Now(), time.Now()); err != nil {
|
||||
t.Fatalf("chtimes target: %v", err)
|
||||
}
|
||||
|
||||
if _, err := loadCacheFromDisk(target); err != nil {
|
||||
t.Fatalf("expected recent cache to be reused, got error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadCacheExpiresWhenModifiedAndReuseWindowPassed(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
|
||||
target := filepath.Join(home, "reuse-window-target")
|
||||
if err := os.MkdirAll(target, 0o755); err != nil {
|
||||
t.Fatalf("create target: %v", err)
|
||||
}
|
||||
|
||||
result := scanResult{TotalSize: 5, TotalFiles: 1}
|
||||
if err := saveCacheToDisk(target, result); err != nil {
|
||||
t.Fatalf("saveCacheToDisk: %v", err)
|
||||
}
|
||||
|
||||
cachePath, err := getCachePath(target)
|
||||
if err != nil {
|
||||
t.Fatalf("getCachePath: %v", err)
|
||||
}
|
||||
|
||||
file, err := os.Open(cachePath)
|
||||
if err != nil {
|
||||
t.Fatalf("open cache: %v", err)
|
||||
}
|
||||
var entry cacheEntry
|
||||
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
|
||||
t.Fatalf("decode cache: %v", err)
|
||||
}
|
||||
_ = file.Close()
|
||||
|
||||
// Within overall 7-day TTL but beyond reuse window.
|
||||
entry.ModTime = time.Now().Add(-48 * time.Hour)
|
||||
entry.ScanTime = time.Now().Add(-(cacheReuseWindow + time.Hour))
|
||||
|
||||
tmp := cachePath + ".tmp"
|
||||
f, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
t.Fatalf("create tmp cache: %v", err)
|
||||
}
|
||||
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
|
||||
t.Fatalf("encode tmp cache: %v", err)
|
||||
}
|
||||
_ = f.Close()
|
||||
if err := os.Rename(tmp, cachePath); err != nil {
|
||||
t.Fatalf("rename tmp cache: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Chtimes(target, time.Now(), time.Now()); err != nil {
|
||||
t.Fatalf("chtimes target: %v", err)
|
||||
}
|
||||
|
||||
if _, err := loadCacheFromDisk(target); err == nil {
|
||||
t.Fatalf("expected cache load to fail after reuse window passes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadStaleCacheFromDiskAllowsRecentExpiredCache(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
|
||||
target := filepath.Join(home, "stale-cache-target")
|
||||
if err := os.MkdirAll(target, 0o755); err != nil {
|
||||
t.Fatalf("create target: %v", err)
|
||||
}
|
||||
|
||||
result := scanResult{TotalSize: 7, TotalFiles: 2}
|
||||
if err := saveCacheToDisk(target, result); err != nil {
|
||||
t.Fatalf("saveCacheToDisk: %v", err)
|
||||
}
|
||||
|
||||
cachePath, err := getCachePath(target)
|
||||
if err != nil {
|
||||
t.Fatalf("getCachePath: %v", err)
|
||||
}
|
||||
file, err := os.Open(cachePath)
|
||||
if err != nil {
|
||||
t.Fatalf("open cache: %v", err)
|
||||
}
|
||||
var entry cacheEntry
|
||||
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
|
||||
t.Fatalf("decode cache: %v", err)
|
||||
}
|
||||
_ = file.Close()
|
||||
|
||||
// Expired for normal cache validation but still inside stale fallback window.
|
||||
entry.ModTime = time.Now().Add(-48 * time.Hour)
|
||||
entry.ScanTime = time.Now().Add(-48 * time.Hour)
|
||||
|
||||
tmp := cachePath + ".tmp"
|
||||
f, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
t.Fatalf("create tmp cache: %v", err)
|
||||
}
|
||||
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
|
||||
t.Fatalf("encode tmp cache: %v", err)
|
||||
}
|
||||
_ = f.Close()
|
||||
if err := os.Rename(tmp, cachePath); err != nil {
|
||||
t.Fatalf("rename tmp cache: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Chtimes(target, time.Now(), time.Now()); err != nil {
|
||||
t.Fatalf("chtimes target: %v", err)
|
||||
}
|
||||
|
||||
if _, err := loadCacheFromDisk(target); err == nil {
|
||||
t.Fatalf("expected normal cache load to fail")
|
||||
}
|
||||
if _, err := loadStaleCacheFromDisk(target); err != nil {
|
||||
t.Fatalf("expected stale cache load to succeed, got error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadStaleCacheFromDiskExpiresByStaleTTL(t *testing.T) {
|
||||
home := t.TempDir()
|
||||
t.Setenv("HOME", home)
|
||||
|
||||
target := filepath.Join(home, "stale-cache-expired-target")
|
||||
if err := os.MkdirAll(target, 0o755); err != nil {
|
||||
t.Fatalf("create target: %v", err)
|
||||
}
|
||||
|
||||
result := scanResult{TotalSize: 9, TotalFiles: 3}
|
||||
if err := saveCacheToDisk(target, result); err != nil {
|
||||
t.Fatalf("saveCacheToDisk: %v", err)
|
||||
}
|
||||
|
||||
cachePath, err := getCachePath(target)
|
||||
if err != nil {
|
||||
t.Fatalf("getCachePath: %v", err)
|
||||
}
|
||||
file, err := os.Open(cachePath)
|
||||
if err != nil {
|
||||
t.Fatalf("open cache: %v", err)
|
||||
}
|
||||
var entry cacheEntry
|
||||
if err := gob.NewDecoder(file).Decode(&entry); err != nil {
|
||||
t.Fatalf("decode cache: %v", err)
|
||||
}
|
||||
_ = file.Close()
|
||||
|
||||
entry.ScanTime = time.Now().Add(-(staleCacheTTL + time.Hour))
|
||||
|
||||
tmp := cachePath + ".tmp"
|
||||
f, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
t.Fatalf("create tmp cache: %v", err)
|
||||
}
|
||||
if err := gob.NewEncoder(f).Encode(&entry); err != nil {
|
||||
t.Fatalf("encode tmp cache: %v", err)
|
||||
}
|
||||
_ = f.Close()
|
||||
if err := os.Rename(tmp, cachePath); err != nil {
|
||||
t.Fatalf("rename tmp cache: %v", err)
|
||||
}
|
||||
|
||||
if _, err := loadStaleCacheFromDisk(target); err == nil {
|
||||
t.Fatalf("expected stale cache load to fail after stale TTL")
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanPathPermissionError(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
lockedDir := filepath.Join(root, "locked")
|
||||
|
||||
@@ -182,7 +182,7 @@ func getCachePath(path string) (string, error) {
|
||||
return filepath.Join(cacheDir, filename), nil
|
||||
}
|
||||
|
||||
func loadCacheFromDisk(path string) (*cacheEntry, error) {
|
||||
func loadRawCacheFromDisk(path string) (*cacheEntry, error) {
|
||||
cachePath, err := getCachePath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -200,23 +200,56 @@ func loadCacheFromDisk(path string) (*cacheEntry, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &entry, nil
|
||||
}
|
||||
|
||||
func loadCacheFromDisk(path string) (*cacheEntry, error) {
|
||||
entry, err := loadRawCacheFromDisk(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.ModTime().After(entry.ModTime) {
|
||||
// Allow grace window.
|
||||
if cacheModTimeGrace <= 0 || info.ModTime().Sub(entry.ModTime) > cacheModTimeGrace {
|
||||
return nil, fmt.Errorf("cache expired: directory modified")
|
||||
}
|
||||
}
|
||||
|
||||
if time.Since(entry.ScanTime) > 7*24*time.Hour {
|
||||
scanAge := time.Since(entry.ScanTime)
|
||||
if scanAge > 7*24*time.Hour {
|
||||
return nil, fmt.Errorf("cache expired: too old")
|
||||
}
|
||||
|
||||
return &entry, nil
|
||||
if info.ModTime().After(entry.ModTime) {
|
||||
// Allow grace window.
|
||||
if cacheModTimeGrace <= 0 || info.ModTime().Sub(entry.ModTime) > cacheModTimeGrace {
|
||||
// Directory mod time is noisy on macOS; reuse recent cache to avoid
|
||||
// frequent full rescans while still forcing refresh for older entries.
|
||||
if cacheReuseWindow <= 0 || scanAge > cacheReuseWindow {
|
||||
return nil, fmt.Errorf("cache expired: directory modified")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// loadStaleCacheFromDisk loads cache without strict freshness checks.
|
||||
// It is used for fast first paint before triggering a background refresh.
|
||||
func loadStaleCacheFromDisk(path string) (*cacheEntry, error) {
|
||||
entry, err := loadRawCacheFromDisk(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if time.Since(entry.ScanTime) > staleCacheTTL {
|
||||
return nil, fmt.Errorf("stale cache expired")
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func saveCacheToDisk(path string, result scanResult) error {
|
||||
|
||||
@@ -16,6 +16,8 @@ const (
|
||||
maxConcurrentOverview = 8
|
||||
batchUpdateSize = 100
|
||||
cacheModTimeGrace = 30 * time.Minute
|
||||
cacheReuseWindow = 24 * time.Hour
|
||||
staleCacheTTL = 3 * 24 * time.Hour
|
||||
|
||||
// Worker pool limits.
|
||||
minWorkers = 16
|
||||
|
||||
@@ -63,8 +63,10 @@ type historyEntry struct {
|
||||
}
|
||||
|
||||
type scanResultMsg struct {
|
||||
path string
|
||||
result scanResult
|
||||
err error
|
||||
stale bool
|
||||
}
|
||||
|
||||
type overviewSizeMsg struct {
|
||||
@@ -369,9 +371,19 @@ func (m model) scanCmd(path string) tea.Cmd {
|
||||
Entries: cached.Entries,
|
||||
LargeFiles: cached.LargeFiles,
|
||||
TotalSize: cached.TotalSize,
|
||||
TotalFiles: 0, // Cache doesn't store file count currently, minor UI limitation
|
||||
TotalFiles: cached.TotalFiles,
|
||||
}
|
||||
return scanResultMsg{result: result, err: nil}
|
||||
return scanResultMsg{path: path, result: result, err: nil}
|
||||
}
|
||||
|
||||
if stale, err := loadStaleCacheFromDisk(path); err == nil {
|
||||
result := scanResult{
|
||||
Entries: stale.Entries,
|
||||
LargeFiles: stale.LargeFiles,
|
||||
TotalSize: stale.TotalSize,
|
||||
TotalFiles: stale.TotalFiles,
|
||||
}
|
||||
return scanResultMsg{path: path, result: result, err: nil, stale: true}
|
||||
}
|
||||
|
||||
v, err, _ := scanGroup.Do(path, func() (any, error) {
|
||||
@@ -379,7 +391,7 @@ func (m model) scanCmd(path string) tea.Cmd {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return scanResultMsg{err: err}
|
||||
return scanResultMsg{path: path, err: err}
|
||||
}
|
||||
|
||||
result := v.(scanResult)
|
||||
@@ -390,7 +402,28 @@ func (m model) scanCmd(path string) tea.Cmd {
|
||||
}
|
||||
}(path, result)
|
||||
|
||||
return scanResultMsg{result: result, err: nil}
|
||||
return scanResultMsg{path: path, result: result, err: nil}
|
||||
}
|
||||
}
|
||||
|
||||
func (m model) scanFreshCmd(path string) tea.Cmd {
|
||||
return func() tea.Msg {
|
||||
v, err, _ := scanGroup.Do(path, func() (any, error) {
|
||||
return scanPathConcurrent(path, m.filesScanned, m.dirsScanned, m.bytesScanned, m.currentPath)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return scanResultMsg{path: path, err: err}
|
||||
}
|
||||
|
||||
result := v.(scanResult)
|
||||
go func(p string, r scanResult) {
|
||||
if err := saveCacheToDisk(p, r); err != nil {
|
||||
_ = err
|
||||
}
|
||||
}(path, result)
|
||||
|
||||
return scanResultMsg{path: path, result: result}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -442,6 +475,9 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
}
|
||||
return m, nil
|
||||
case scanResultMsg:
|
||||
if msg.path != "" && msg.path != m.path {
|
||||
return m, nil
|
||||
}
|
||||
m.scanning = false
|
||||
if msg.err != nil {
|
||||
m.status = fmt.Sprintf("Scan failed: %v", msg.err)
|
||||
@@ -457,7 +493,6 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
m.largeFiles = msg.result.LargeFiles
|
||||
m.totalSize = msg.result.TotalSize
|
||||
m.totalFiles = msg.result.TotalFiles
|
||||
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
|
||||
m.clampEntrySelection()
|
||||
m.clampLargeSelection()
|
||||
m.cache[m.path] = cacheSnapshot(m)
|
||||
@@ -470,6 +505,23 @@ func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
_ = storeOverviewSize(path, size)
|
||||
}(m.path, m.totalSize)
|
||||
}
|
||||
|
||||
if msg.stale {
|
||||
m.status = fmt.Sprintf("Loaded cached data for %s, refreshing...", displayPath(m.path))
|
||||
m.scanning = true
|
||||
if m.totalFiles > 0 {
|
||||
m.lastTotalFiles = m.totalFiles
|
||||
}
|
||||
atomic.StoreInt64(m.filesScanned, 0)
|
||||
atomic.StoreInt64(m.dirsScanned, 0)
|
||||
atomic.StoreInt64(m.bytesScanned, 0)
|
||||
if m.currentPath != nil {
|
||||
m.currentPath.Store("")
|
||||
}
|
||||
return m, tea.Batch(m.scanFreshCmd(m.path), tickCmd())
|
||||
}
|
||||
|
||||
m.status = fmt.Sprintf("Scanned %s", humanizeBytes(m.totalSize))
|
||||
return m, nil
|
||||
case overviewSizeMsg:
|
||||
delete(m.overviewScanningSet, msg.Path)
|
||||
|
||||
@@ -160,7 +160,7 @@ const NetworkHistorySize = 120 // Increased history size for wider graph
|
||||
|
||||
type ProxyStatus struct {
|
||||
Enabled bool
|
||||
Type string // HTTP, SOCKS, System
|
||||
Type string // HTTP, HTTPS, SOCKS, PAC, WPAD, TUN
|
||||
Host string
|
||||
}
|
||||
|
||||
|
||||
@@ -2,9 +2,11 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -114,23 +116,8 @@ func isNoiseInterface(name string) bool {
|
||||
}
|
||||
|
||||
func collectProxy() ProxyStatus {
|
||||
// Check environment variables first.
|
||||
for _, env := range []string{"https_proxy", "HTTPS_PROXY", "http_proxy", "HTTP_PROXY"} {
|
||||
if val := os.Getenv(env); val != "" {
|
||||
proxyType := "HTTP"
|
||||
if strings.HasPrefix(val, "socks") {
|
||||
proxyType = "SOCKS"
|
||||
}
|
||||
// Extract host.
|
||||
host := val
|
||||
if strings.Contains(host, "://") {
|
||||
host = strings.SplitN(host, "://", 2)[1]
|
||||
}
|
||||
if idx := strings.Index(host, "@"); idx >= 0 {
|
||||
host = host[idx+1:]
|
||||
}
|
||||
return ProxyStatus{Enabled: true, Type: proxyType, Host: host}
|
||||
}
|
||||
if proxy := collectProxyFromEnv(os.Getenv); proxy.Enabled {
|
||||
return proxy
|
||||
}
|
||||
|
||||
// macOS: check system proxy via scutil.
|
||||
@@ -139,14 +126,166 @@ func collectProxy() ProxyStatus {
|
||||
defer cancel()
|
||||
out, err := runCmd(ctx, "scutil", "--proxy")
|
||||
if err == nil {
|
||||
if strings.Contains(out, "HTTPEnable : 1") || strings.Contains(out, "HTTPSEnable : 1") {
|
||||
return ProxyStatus{Enabled: true, Type: "System", Host: "System Proxy"}
|
||||
}
|
||||
if strings.Contains(out, "SOCKSEnable : 1") {
|
||||
return ProxyStatus{Enabled: true, Type: "SOCKS", Host: "System Proxy"}
|
||||
if proxy := collectProxyFromScutilOutput(out); proxy.Enabled {
|
||||
return proxy
|
||||
}
|
||||
}
|
||||
|
||||
if proxy := collectProxyFromTunInterfaces(); proxy.Enabled {
|
||||
return proxy
|
||||
}
|
||||
}
|
||||
|
||||
return ProxyStatus{Enabled: false}
|
||||
}
|
||||
|
||||
func collectProxyFromEnv(getenv func(string) string) ProxyStatus {
|
||||
// Include ALL_PROXY for users running proxy tools that only export a single variable.
|
||||
envKeys := []string{
|
||||
"https_proxy", "HTTPS_PROXY",
|
||||
"http_proxy", "HTTP_PROXY",
|
||||
"all_proxy", "ALL_PROXY",
|
||||
}
|
||||
for _, key := range envKeys {
|
||||
val := strings.TrimSpace(getenv(key))
|
||||
if val == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
proxyType := "HTTP"
|
||||
lower := strings.ToLower(val)
|
||||
if strings.HasPrefix(lower, "socks") {
|
||||
proxyType = "SOCKS"
|
||||
}
|
||||
|
||||
host := parseProxyHost(val)
|
||||
if host == "" {
|
||||
host = val
|
||||
}
|
||||
return ProxyStatus{Enabled: true, Type: proxyType, Host: host}
|
||||
}
|
||||
|
||||
return ProxyStatus{Enabled: false}
|
||||
}
|
||||
|
||||
func collectProxyFromScutilOutput(out string) ProxyStatus {
|
||||
if out == "" {
|
||||
return ProxyStatus{Enabled: false}
|
||||
}
|
||||
|
||||
if scutilProxyEnabled(out, "SOCKSEnable") {
|
||||
host := joinHostPort(scutilProxyValue(out, "SOCKSProxy"), scutilProxyValue(out, "SOCKSPort"))
|
||||
if host == "" {
|
||||
host = "System Proxy"
|
||||
}
|
||||
return ProxyStatus{Enabled: true, Type: "SOCKS", Host: host}
|
||||
}
|
||||
|
||||
if scutilProxyEnabled(out, "HTTPSEnable") {
|
||||
host := joinHostPort(scutilProxyValue(out, "HTTPSProxy"), scutilProxyValue(out, "HTTPSPort"))
|
||||
if host == "" {
|
||||
host = "System Proxy"
|
||||
}
|
||||
return ProxyStatus{Enabled: true, Type: "HTTPS", Host: host}
|
||||
}
|
||||
|
||||
if scutilProxyEnabled(out, "HTTPEnable") {
|
||||
host := joinHostPort(scutilProxyValue(out, "HTTPProxy"), scutilProxyValue(out, "HTTPPort"))
|
||||
if host == "" {
|
||||
host = "System Proxy"
|
||||
}
|
||||
return ProxyStatus{Enabled: true, Type: "HTTP", Host: host}
|
||||
}
|
||||
|
||||
if scutilProxyEnabled(out, "ProxyAutoConfigEnable") {
|
||||
pacURL := scutilProxyValue(out, "ProxyAutoConfigURLString")
|
||||
host := parseProxyHost(pacURL)
|
||||
if host == "" {
|
||||
host = "PAC"
|
||||
}
|
||||
return ProxyStatus{Enabled: true, Type: "PAC", Host: host}
|
||||
}
|
||||
|
||||
if scutilProxyEnabled(out, "ProxyAutoDiscoveryEnable") {
|
||||
return ProxyStatus{Enabled: true, Type: "WPAD", Host: "Auto Discovery"}
|
||||
}
|
||||
|
||||
return ProxyStatus{Enabled: false}
|
||||
}
|
||||
|
||||
func collectProxyFromTunInterfaces() ProxyStatus {
|
||||
stats, err := net.IOCounters(true)
|
||||
if err != nil {
|
||||
return ProxyStatus{Enabled: false}
|
||||
}
|
||||
|
||||
var activeTun []string
|
||||
for _, s := range stats {
|
||||
lower := strings.ToLower(s.Name)
|
||||
if strings.HasPrefix(lower, "utun") || strings.HasPrefix(lower, "tun") {
|
||||
if s.BytesRecv+s.BytesSent > 0 {
|
||||
activeTun = append(activeTun, s.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(activeTun) == 0 {
|
||||
return ProxyStatus{Enabled: false}
|
||||
}
|
||||
sort.Strings(activeTun)
|
||||
host := activeTun[0]
|
||||
if len(activeTun) > 1 {
|
||||
host = activeTun[0] + "+"
|
||||
}
|
||||
return ProxyStatus{Enabled: true, Type: "TUN", Host: host}
|
||||
}
|
||||
|
||||
func scutilProxyEnabled(out, key string) bool {
|
||||
return scutilProxyValue(out, key) == "1"
|
||||
}
|
||||
|
||||
func scutilProxyValue(out, key string) string {
|
||||
prefix := key + " :"
|
||||
for _, line := range strings.Split(out, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, prefix) {
|
||||
return strings.TrimSpace(strings.TrimPrefix(line, prefix))
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func parseProxyHost(raw string) string {
|
||||
raw = strings.TrimSpace(raw)
|
||||
if raw == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
target := raw
|
||||
if !strings.Contains(target, "://") {
|
||||
target = "http://" + target
|
||||
}
|
||||
parsed, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
host := parsed.Host
|
||||
if host == "" {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimPrefix(host, "@")
|
||||
}
|
||||
|
||||
func joinHostPort(host, port string) string {
|
||||
host = strings.TrimSpace(host)
|
||||
port = strings.TrimSpace(port)
|
||||
if host == "" {
|
||||
return ""
|
||||
}
|
||||
if port == "" {
|
||||
return host
|
||||
}
|
||||
if _, err := strconv.Atoi(port); err != nil {
|
||||
return host
|
||||
}
|
||||
return host + ":" + port
|
||||
}
|
||||
|
||||
60
cmd/status/metrics_network_test.go
Normal file
60
cmd/status/metrics_network_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCollectProxyFromEnvSupportsAllProxy(t *testing.T) {
|
||||
env := map[string]string{
|
||||
"ALL_PROXY": "socks5://127.0.0.1:7890",
|
||||
}
|
||||
getenv := func(key string) string {
|
||||
return env[key]
|
||||
}
|
||||
|
||||
got := collectProxyFromEnv(getenv)
|
||||
if !got.Enabled {
|
||||
t.Fatalf("expected proxy enabled")
|
||||
}
|
||||
if got.Type != "SOCKS" {
|
||||
t.Fatalf("expected SOCKS type, got %s", got.Type)
|
||||
}
|
||||
if got.Host != "127.0.0.1:7890" {
|
||||
t.Fatalf("unexpected host: %s", got.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectProxyFromScutilOutputPAC(t *testing.T) {
|
||||
out := `
|
||||
<dictionary> {
|
||||
ProxyAutoConfigEnable : 1
|
||||
ProxyAutoConfigURLString : http://127.0.0.1:6152/proxy.pac
|
||||
}`
|
||||
got := collectProxyFromScutilOutput(out)
|
||||
if !got.Enabled {
|
||||
t.Fatalf("expected proxy enabled")
|
||||
}
|
||||
if got.Type != "PAC" {
|
||||
t.Fatalf("expected PAC type, got %s", got.Type)
|
||||
}
|
||||
if got.Host != "127.0.0.1:6152" {
|
||||
t.Fatalf("unexpected host: %s", got.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectProxyFromScutilOutputHTTPHostPort(t *testing.T) {
|
||||
out := `
|
||||
<dictionary> {
|
||||
HTTPEnable : 1
|
||||
HTTPProxy : 127.0.0.1
|
||||
HTTPPort : 7890
|
||||
}`
|
||||
got := collectProxyFromScutilOutput(out)
|
||||
if !got.Enabled {
|
||||
t.Fatalf("expected proxy enabled")
|
||||
}
|
||||
if got.Type != "HTTP" {
|
||||
t.Fatalf("expected HTTP type, got %s", got.Type)
|
||||
}
|
||||
if got.Host != "127.0.0.1:7890" {
|
||||
t.Fatalf("unexpected host: %s", got.Host)
|
||||
}
|
||||
}
|
||||
@@ -365,7 +365,7 @@ safe_sudo_remove() {
|
||||
fi
|
||||
|
||||
local output
|
||||
local ret
|
||||
local ret=0
|
||||
output=$(sudo rm -rf "$path" 2>&1) || ret=$? # safe_remove
|
||||
|
||||
if [[ $ret -eq 0 ]]; then
|
||||
|
||||
Reference in New Issue
Block a user