mirror of
https://github.com/tw93/Mole.git
synced 2026-03-22 15:00:07 +00:00
feat(status): alert on persistent high-cpu processes (#602)
* feat(status): alert on persistent high-cpu processes * refactor(status): keep high-cpu alerts read-only * fix(status): address lint and sudo test regressions --------- Co-authored-by: Tw93 <hitw93@gmail.com>
This commit is contained in:
@@ -210,6 +210,8 @@ Health score is based on CPU, memory, disk, temperature, and I/O load, with colo
|
||||
|
||||
Shortcuts: In `mo status`, press `k` to toggle the cat and save the preference, and `q` to quit.
|
||||
|
||||
When enabled, `mo status` shows a read-only alert banner for processes that stay above the configured CPU threshold for a sustained window. Use `--proc-cpu-threshold`, `--proc-cpu-window`, or `--proc-cpu-alerts=false` to tune or disable it.
|
||||
|
||||
#### Machine-Readable Output
|
||||
|
||||
Both `mo analyze` and `mo status` support a `--json` flag for scripting and automation.
|
||||
|
||||
@@ -21,7 +21,10 @@ var (
|
||||
BuildTime = ""
|
||||
|
||||
// Command-line flags
|
||||
jsonOutput = flag.Bool("json", false, "output metrics as JSON instead of TUI")
|
||||
jsonOutput = flag.Bool("json", false, "output metrics as JSON instead of TUI")
|
||||
procCPUThreshold = flag.Float64("proc-cpu-threshold", 100, "alert when a process stays above this CPU percent")
|
||||
procCPUWindow = flag.Duration("proc-cpu-window", 5*time.Minute, "continuous duration a process must exceed the CPU threshold")
|
||||
procCPUAlerts = flag.Bool("proc-cpu-alerts", true, "enable persistent high-CPU process alerts")
|
||||
)
|
||||
|
||||
func shouldUseJSONOutput(forceJSON bool, stdout *os.File) bool {
|
||||
@@ -116,11 +119,29 @@ func saveCatHidden(hidden bool) {
|
||||
|
||||
func newModel() model {
|
||||
return model{
|
||||
collector: NewCollector(),
|
||||
collector: NewCollector(processWatchOptionsFromFlags()),
|
||||
catHidden: loadCatHidden(),
|
||||
}
|
||||
}
|
||||
|
||||
func processWatchOptionsFromFlags() ProcessWatchOptions {
|
||||
return ProcessWatchOptions{
|
||||
Enabled: *procCPUAlerts,
|
||||
CPUThreshold: *procCPUThreshold,
|
||||
Window: *procCPUWindow,
|
||||
}
|
||||
}
|
||||
|
||||
func validateFlags() error {
|
||||
if *procCPUThreshold < 0 {
|
||||
return fmt.Errorf("--proc-cpu-threshold must be >= 0")
|
||||
}
|
||||
if *procCPUWindow <= 0 {
|
||||
return fmt.Errorf("--proc-cpu-window must be > 0")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m model) Init() tea.Cmd {
|
||||
return tea.Batch(tickAfter(0), animTick())
|
||||
}
|
||||
@@ -179,6 +200,7 @@ func (m model) View() string {
|
||||
}
|
||||
|
||||
header, mole := renderHeader(m.metrics, m.errMessage, m.animFrame, termWidth, m.catHidden)
|
||||
alertBar := renderProcessAlertBar(m.metrics.ProcessAlerts, termWidth)
|
||||
|
||||
var cardContent string
|
||||
if termWidth <= 80 {
|
||||
@@ -204,6 +226,9 @@ func (m model) View() string {
|
||||
|
||||
// Combine header, mole, and cards with consistent spacing
|
||||
parts := []string{header}
|
||||
if alertBar != "" {
|
||||
parts = append(parts, alertBar)
|
||||
}
|
||||
if mole != "" {
|
||||
parts = append(parts, mole)
|
||||
}
|
||||
@@ -235,7 +260,7 @@ func animTickWithSpeed(cpuUsage float64) tea.Cmd {
|
||||
|
||||
// runJSONMode collects metrics once and outputs as JSON.
|
||||
func runJSONMode() {
|
||||
collector := NewCollector()
|
||||
collector := NewCollector(processWatchOptionsFromFlags())
|
||||
|
||||
// First collection initializes network state (returns nil for network)
|
||||
_, _ = collector.Collect()
|
||||
@@ -269,6 +294,10 @@ func runTUIMode() {
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if err := validateFlags(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
if shouldUseJSONOutput(*jsonOutput, os.Stdout) {
|
||||
runJSONMode()
|
||||
@@ -276,3 +305,13 @@ func main() {
|
||||
runTUIMode()
|
||||
}
|
||||
}
|
||||
|
||||
func activeAlerts(alerts []ProcessAlert) []ProcessAlert {
|
||||
var active []ProcessAlert
|
||||
for _, alert := range alerts {
|
||||
if alert.Status == "active" {
|
||||
active = append(active, alert)
|
||||
}
|
||||
}
|
||||
return active
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestShouldUseJSONOutput_ForceFlag(t *testing.T) {
|
||||
@@ -42,3 +43,50 @@ func TestShouldUseJSONOutput_NonTTYFile(t *testing.T) {
|
||||
t.Fatalf("expected file stdout to use JSON mode")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessWatchOptionsFromFlags(t *testing.T) {
|
||||
oldThreshold := *procCPUThreshold
|
||||
oldWindow := *procCPUWindow
|
||||
oldAlerts := *procCPUAlerts
|
||||
defer func() {
|
||||
*procCPUThreshold = oldThreshold
|
||||
*procCPUWindow = oldWindow
|
||||
*procCPUAlerts = oldAlerts
|
||||
}()
|
||||
|
||||
*procCPUThreshold = 125
|
||||
*procCPUWindow = 2 * time.Minute
|
||||
*procCPUAlerts = false
|
||||
|
||||
opts := processWatchOptionsFromFlags()
|
||||
if opts.CPUThreshold != 125 {
|
||||
t.Fatalf("CPUThreshold = %v, want 125", opts.CPUThreshold)
|
||||
}
|
||||
if opts.Window != 2*time.Minute {
|
||||
t.Fatalf("Window = %v, want 2m", opts.Window)
|
||||
}
|
||||
if opts.Enabled {
|
||||
t.Fatal("Enabled = true, want false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateFlags(t *testing.T) {
|
||||
oldThreshold := *procCPUThreshold
|
||||
oldWindow := *procCPUWindow
|
||||
defer func() {
|
||||
*procCPUThreshold = oldThreshold
|
||||
*procCPUWindow = oldWindow
|
||||
}()
|
||||
|
||||
*procCPUThreshold = -1
|
||||
*procCPUWindow = 5 * time.Minute
|
||||
if err := validateFlags(); err == nil {
|
||||
t.Fatal("expected negative threshold to fail validation")
|
||||
}
|
||||
|
||||
*procCPUThreshold = 100
|
||||
*procCPUWindow = 0
|
||||
if err := validateFlags(); err == nil {
|
||||
t.Fatal("expected zero window to fail validation")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,19 +66,21 @@ type MetricsSnapshot struct {
|
||||
HealthScore int `json:"health_score"` // 0-100 system health score
|
||||
HealthScoreMsg string `json:"health_score_msg"` // Brief explanation
|
||||
|
||||
CPU CPUStatus `json:"cpu"`
|
||||
GPU []GPUStatus `json:"gpu"`
|
||||
Memory MemoryStatus `json:"memory"`
|
||||
Disks []DiskStatus `json:"disks"`
|
||||
DiskIO DiskIOStatus `json:"disk_io"`
|
||||
Network []NetworkStatus `json:"network"`
|
||||
NetworkHistory NetworkHistory `json:"network_history"`
|
||||
Proxy ProxyStatus `json:"proxy"`
|
||||
Batteries []BatteryStatus `json:"batteries"`
|
||||
Thermal ThermalStatus `json:"thermal"`
|
||||
Sensors []SensorReading `json:"sensors"`
|
||||
Bluetooth []BluetoothDevice `json:"bluetooth"`
|
||||
TopProcesses []ProcessInfo `json:"top_processes"`
|
||||
CPU CPUStatus `json:"cpu"`
|
||||
GPU []GPUStatus `json:"gpu"`
|
||||
Memory MemoryStatus `json:"memory"`
|
||||
Disks []DiskStatus `json:"disks"`
|
||||
DiskIO DiskIOStatus `json:"disk_io"`
|
||||
Network []NetworkStatus `json:"network"`
|
||||
NetworkHistory NetworkHistory `json:"network_history"`
|
||||
Proxy ProxyStatus `json:"proxy"`
|
||||
Batteries []BatteryStatus `json:"batteries"`
|
||||
Thermal ThermalStatus `json:"thermal"`
|
||||
Sensors []SensorReading `json:"sensors"`
|
||||
Bluetooth []BluetoothDevice `json:"bluetooth"`
|
||||
TopProcesses []ProcessInfo `json:"top_processes"`
|
||||
ProcessWatch ProcessWatchConfig `json:"process_watch"`
|
||||
ProcessAlerts []ProcessAlert `json:"process_alerts"`
|
||||
}
|
||||
|
||||
type HardwareInfo struct {
|
||||
@@ -96,9 +98,12 @@ type DiskIOStatus struct {
|
||||
}
|
||||
|
||||
type ProcessInfo struct {
|
||||
Name string `json:"name"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory float64 `json:"memory"`
|
||||
PID int `json:"pid"`
|
||||
PPID int `json:"ppid"`
|
||||
Name string `json:"name"`
|
||||
Command string `json:"command"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory float64 `json:"memory"`
|
||||
}
|
||||
|
||||
type CPUStatus struct {
|
||||
@@ -215,13 +220,19 @@ type Collector struct {
|
||||
cachedGPU []GPUStatus
|
||||
prevDiskIO disk.IOCountersStat
|
||||
lastDiskAt time.Time
|
||||
|
||||
watchMu sync.Mutex
|
||||
processWatch ProcessWatchConfig
|
||||
processWatcher *ProcessWatcher
|
||||
}
|
||||
|
||||
func NewCollector() *Collector {
|
||||
func NewCollector(options ProcessWatchOptions) *Collector {
|
||||
return &Collector{
|
||||
prevNet: make(map[string]net.IOCountersStat),
|
||||
rxHistoryBuf: NewRingBuffer(NetworkHistorySize),
|
||||
txHistoryBuf: NewRingBuffer(NetworkHistorySize),
|
||||
prevNet: make(map[string]net.IOCountersStat),
|
||||
rxHistoryBuf: NewRingBuffer(NetworkHistorySize),
|
||||
txHistoryBuf: NewRingBuffer(NetworkHistorySize),
|
||||
processWatch: options.SnapshotConfig(),
|
||||
processWatcher: NewProcessWatcher(options),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,7 +261,7 @@ func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
sensorStats []SensorReading
|
||||
gpuStats []GPUStatus
|
||||
btStats []BluetoothDevice
|
||||
topProcs []ProcessInfo
|
||||
allProcs []ProcessInfo
|
||||
)
|
||||
|
||||
// Helper to launch concurrent collection.
|
||||
@@ -303,7 +314,7 @@ func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
collect(func() (err error) { topProcs = collectTopProcesses(); return nil })
|
||||
collect(func() (err error) { allProcs, err = collectProcesses(); return })
|
||||
|
||||
// Wait for all to complete.
|
||||
wg.Wait()
|
||||
@@ -318,6 +329,14 @@ func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
hwInfo := c.cachedHW
|
||||
|
||||
score, scoreMsg := calculateHealthScore(cpuStats, memStats, diskStats, diskIO, thermalStats)
|
||||
topProcs := topProcesses(allProcs, 5)
|
||||
|
||||
var processAlerts []ProcessAlert
|
||||
c.watchMu.Lock()
|
||||
if c.processWatcher != nil {
|
||||
processAlerts = c.processWatcher.Update(now, allProcs)
|
||||
}
|
||||
c.watchMu.Unlock()
|
||||
|
||||
return MetricsSnapshot{
|
||||
CollectedAt: now,
|
||||
@@ -338,12 +357,14 @@ func (c *Collector) Collect() (MetricsSnapshot, error) {
|
||||
RxHistory: c.rxHistoryBuf.Slice(),
|
||||
TxHistory: c.txHistoryBuf.Slice(),
|
||||
},
|
||||
Proxy: proxyStats,
|
||||
Batteries: batteryStats,
|
||||
Thermal: thermalStats,
|
||||
Sensors: sensorStats,
|
||||
Bluetooth: btStats,
|
||||
TopProcesses: topProcs,
|
||||
Proxy: proxyStats,
|
||||
Batteries: batteryStats,
|
||||
Thermal: thermalStats,
|
||||
Sensors: sensorStats,
|
||||
Bluetooth: btStats,
|
||||
TopProcesses: topProcs,
|
||||
ProcessWatch: c.processWatch,
|
||||
ProcessAlerts: processAlerts,
|
||||
}, mergeErr
|
||||
}
|
||||
|
||||
|
||||
@@ -2,52 +2,97 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func collectTopProcesses() []ProcessInfo {
|
||||
func collectProcesses() ([]ProcessInfo, error) {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Use ps to get top processes by CPU.
|
||||
out, err := runCmd(ctx, "ps", "-Aceo", "pcpu,pmem,comm", "-r")
|
||||
out, err := runCmd(ctx, "ps", "-Aceo", "pid=,ppid=,pcpu=,pmem=,comm=", "-r")
|
||||
if err != nil {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
return parseProcessOutput(out), nil
|
||||
}
|
||||
|
||||
func parseProcessOutput(raw string) []ProcessInfo {
|
||||
var procs []ProcessInfo
|
||||
i := 0
|
||||
for line := range strings.Lines(strings.TrimSpace(out)) {
|
||||
if i == 0 {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if i > 5 {
|
||||
break
|
||||
}
|
||||
i++
|
||||
for line := range strings.Lines(strings.TrimSpace(raw)) {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 3 {
|
||||
if len(fields) < 5 {
|
||||
continue
|
||||
}
|
||||
cpuVal, _ := strconv.ParseFloat(fields[0], 64)
|
||||
memVal, _ := strconv.ParseFloat(fields[1], 64)
|
||||
name := fields[len(fields)-1]
|
||||
|
||||
pid, err := strconv.Atoi(fields[0])
|
||||
if err != nil || pid <= 0 {
|
||||
continue
|
||||
}
|
||||
ppid, _ := strconv.Atoi(fields[1])
|
||||
cpuVal, err := strconv.ParseFloat(fields[2], 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
memVal, err := strconv.ParseFloat(fields[3], 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
command := strings.Join(fields[4:], " ")
|
||||
if command == "" {
|
||||
continue
|
||||
}
|
||||
name := command
|
||||
// Strip path from command name.
|
||||
if idx := strings.LastIndex(name, "/"); idx >= 0 {
|
||||
name = name[idx+1:]
|
||||
}
|
||||
procs = append(procs, ProcessInfo{
|
||||
Name: name,
|
||||
CPU: cpuVal,
|
||||
Memory: memVal,
|
||||
PID: pid,
|
||||
PPID: ppid,
|
||||
Name: name,
|
||||
Command: command,
|
||||
CPU: cpuVal,
|
||||
Memory: memVal,
|
||||
})
|
||||
}
|
||||
return procs
|
||||
}
|
||||
|
||||
func topProcesses(processes []ProcessInfo, limit int) []ProcessInfo {
|
||||
if limit <= 0 || len(processes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
procs := make([]ProcessInfo, len(processes))
|
||||
copy(procs, processes)
|
||||
sort.Slice(procs, func(i, j int) bool {
|
||||
if procs[i].CPU != procs[j].CPU {
|
||||
return procs[i].CPU > procs[j].CPU
|
||||
}
|
||||
if procs[i].Memory != procs[j].Memory {
|
||||
return procs[i].Memory > procs[j].Memory
|
||||
}
|
||||
return procs[i].PID < procs[j].PID
|
||||
})
|
||||
|
||||
if len(procs) > limit {
|
||||
procs = procs[:limit]
|
||||
}
|
||||
return procs
|
||||
}
|
||||
|
||||
func formatProcessLabel(proc ProcessInfo) string {
|
||||
if proc.Name != "" {
|
||||
return fmt.Sprintf("%s (%d)", proc.Name, proc.PID)
|
||||
}
|
||||
return fmt.Sprintf("pid %d", proc.PID)
|
||||
}
|
||||
|
||||
150
cmd/status/process_watch.go
Normal file
150
cmd/status/process_watch.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ProcessWatchOptions struct {
|
||||
Enabled bool
|
||||
CPUThreshold float64
|
||||
Window time.Duration
|
||||
}
|
||||
|
||||
type ProcessWatchConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
CPUThreshold float64 `json:"cpu_threshold"`
|
||||
Window string `json:"window"`
|
||||
}
|
||||
|
||||
type ProcessAlert struct {
|
||||
PID int `json:"pid"`
|
||||
Name string `json:"name"`
|
||||
Command string `json:"command,omitempty"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Threshold float64 `json:"threshold"`
|
||||
Window string `json:"window"`
|
||||
TriggeredAt time.Time `json:"triggered_at"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
type trackedProcess struct {
|
||||
info ProcessInfo
|
||||
firstAbove time.Time
|
||||
triggeredAt time.Time
|
||||
currentAbove bool
|
||||
}
|
||||
|
||||
type processIdentity struct {
|
||||
pid int
|
||||
ppid int
|
||||
command string
|
||||
}
|
||||
|
||||
type ProcessWatcher struct {
|
||||
options ProcessWatchOptions
|
||||
tracks map[processIdentity]*trackedProcess
|
||||
}
|
||||
|
||||
func NewProcessWatcher(options ProcessWatchOptions) *ProcessWatcher {
|
||||
return &ProcessWatcher{
|
||||
options: options,
|
||||
tracks: make(map[processIdentity]*trackedProcess),
|
||||
}
|
||||
}
|
||||
|
||||
func (o ProcessWatchOptions) SnapshotConfig() ProcessWatchConfig {
|
||||
return ProcessWatchConfig{
|
||||
Enabled: o.Enabled,
|
||||
CPUThreshold: o.CPUThreshold,
|
||||
Window: o.Window.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *ProcessWatcher) Update(now time.Time, processes []ProcessInfo) []ProcessAlert {
|
||||
if w == nil || !w.options.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
seen := make(map[processIdentity]bool, len(processes))
|
||||
for _, proc := range processes {
|
||||
if proc.PID <= 0 {
|
||||
continue
|
||||
}
|
||||
key := processIdentity{
|
||||
pid: proc.PID,
|
||||
ppid: proc.PPID,
|
||||
command: proc.Command,
|
||||
}
|
||||
seen[key] = true
|
||||
|
||||
track, ok := w.tracks[key]
|
||||
if !ok {
|
||||
track = &trackedProcess{}
|
||||
w.tracks[key] = track
|
||||
}
|
||||
|
||||
track.info = proc
|
||||
track.currentAbove = proc.CPU >= w.options.CPUThreshold
|
||||
|
||||
if track.currentAbove {
|
||||
if track.firstAbove.IsZero() {
|
||||
track.firstAbove = now
|
||||
}
|
||||
if now.Sub(track.firstAbove) >= w.options.Window && track.triggeredAt.IsZero() {
|
||||
track.triggeredAt = now
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
track.firstAbove = time.Time{}
|
||||
track.triggeredAt = time.Time{}
|
||||
}
|
||||
|
||||
for pid := range w.tracks {
|
||||
if !seen[pid] {
|
||||
delete(w.tracks, pid)
|
||||
}
|
||||
}
|
||||
|
||||
return w.Snapshot()
|
||||
}
|
||||
|
||||
func (w *ProcessWatcher) Snapshot() []ProcessAlert {
|
||||
if w == nil || !w.options.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
alerts := make([]ProcessAlert, 0, len(w.tracks))
|
||||
for _, track := range w.tracks {
|
||||
if !track.currentAbove || track.triggeredAt.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
alerts = append(alerts, ProcessAlert{
|
||||
PID: track.info.PID,
|
||||
Name: track.info.Name,
|
||||
Command: track.info.Command,
|
||||
CPU: track.info.CPU,
|
||||
Threshold: w.options.CPUThreshold,
|
||||
Window: w.options.Window.String(),
|
||||
TriggeredAt: track.triggeredAt,
|
||||
Status: "active",
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(alerts, func(i, j int) bool {
|
||||
if alerts[i].Status != alerts[j].Status {
|
||||
return alerts[i].Status == "active"
|
||||
}
|
||||
if !alerts[i].TriggeredAt.Equal(alerts[j].TriggeredAt) {
|
||||
return alerts[i].TriggeredAt.Before(alerts[j].TriggeredAt)
|
||||
}
|
||||
if alerts[i].CPU != alerts[j].CPU {
|
||||
return alerts[i].CPU > alerts[j].CPU
|
||||
}
|
||||
return alerts[i].PID < alerts[j].PID
|
||||
})
|
||||
|
||||
return alerts
|
||||
}
|
||||
182
cmd/status/process_watch_test.go
Normal file
182
cmd/status/process_watch_test.go
Normal file
@@ -0,0 +1,182 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestParseProcessOutput(t *testing.T) {
|
||||
raw := strings.Join([]string{
|
||||
"123 1 145.2 10.1 /Applications/Visual Studio Code.app/Contents/MacOS/Electron",
|
||||
"456 1 99.5 2.2 /System/Library/CoreServices/Finder.app/Contents/MacOS/Finder",
|
||||
"bad line",
|
||||
}, "\n")
|
||||
|
||||
procs := parseProcessOutput(raw)
|
||||
if len(procs) != 2 {
|
||||
t.Fatalf("parseProcessOutput() len = %d, want 2", len(procs))
|
||||
}
|
||||
|
||||
if procs[0].PID != 123 || procs[0].PPID != 1 {
|
||||
t.Fatalf("unexpected pid/ppid: %+v", procs[0])
|
||||
}
|
||||
if procs[0].Name != "Electron" {
|
||||
t.Fatalf("unexpected process name %q", procs[0].Name)
|
||||
}
|
||||
if !strings.Contains(procs[0].Command, "Visual Studio Code.app") {
|
||||
t.Fatalf("command path missing spaces: %q", procs[0].Command)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopProcessesSortsByCPU(t *testing.T) {
|
||||
procs := []ProcessInfo{
|
||||
{PID: 3, Name: "low", CPU: 20, Memory: 3},
|
||||
{PID: 1, Name: "high", CPU: 120, Memory: 1},
|
||||
{PID: 2, Name: "mid", CPU: 120, Memory: 8},
|
||||
}
|
||||
|
||||
top := topProcesses(procs, 2)
|
||||
if len(top) != 2 {
|
||||
t.Fatalf("topProcesses() len = %d, want 2", len(top))
|
||||
}
|
||||
if top[0].PID != 2 || top[1].PID != 1 {
|
||||
t.Fatalf("unexpected order: %+v", top)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessWatcherTriggersAfterContinuousWindow(t *testing.T) {
|
||||
base := time.Date(2026, 3, 19, 10, 0, 0, 0, time.UTC)
|
||||
watcher := NewProcessWatcher(ProcessWatchOptions{
|
||||
Enabled: true,
|
||||
CPUThreshold: 100,
|
||||
Window: 5 * time.Minute,
|
||||
})
|
||||
|
||||
proc := []ProcessInfo{{PID: 42, Name: "stress", CPU: 140}}
|
||||
if alerts := watcher.Update(base, proc); len(alerts) != 0 {
|
||||
t.Fatalf("unexpected early alerts: %+v", alerts)
|
||||
}
|
||||
if alerts := watcher.Update(base.Add(4*time.Minute), proc); len(alerts) != 0 {
|
||||
t.Fatalf("unexpected early alerts at 4m: %+v", alerts)
|
||||
}
|
||||
alerts := watcher.Update(base.Add(5*time.Minute), proc)
|
||||
if len(alerts) != 1 {
|
||||
t.Fatalf("expected 1 alert after full window, got %+v", alerts)
|
||||
}
|
||||
if alerts[0].Status != "active" {
|
||||
t.Fatalf("unexpected alert status %q", alerts[0].Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessWatcherResetsWhenUsageDrops(t *testing.T) {
|
||||
base := time.Date(2026, 3, 19, 10, 0, 0, 0, time.UTC)
|
||||
watcher := NewProcessWatcher(ProcessWatchOptions{
|
||||
Enabled: true,
|
||||
CPUThreshold: 100,
|
||||
Window: 5 * time.Minute,
|
||||
})
|
||||
|
||||
high := []ProcessInfo{{PID: 42, Name: "stress", CPU: 140}}
|
||||
low := []ProcessInfo{{PID: 42, Name: "stress", CPU: 30}}
|
||||
|
||||
watcher.Update(base, high)
|
||||
watcher.Update(base.Add(4*time.Minute), high)
|
||||
if alerts := watcher.Update(base.Add(4*time.Minute+30*time.Second), low); len(alerts) != 0 {
|
||||
t.Fatalf("expected reset after dip, got %+v", alerts)
|
||||
}
|
||||
if alerts := watcher.Update(base.Add(9*time.Minute), high); len(alerts) != 0 {
|
||||
t.Fatalf("expected no alert after reset, got %+v", alerts)
|
||||
}
|
||||
if alerts := watcher.Update(base.Add(14*time.Minute), high); len(alerts) != 1 {
|
||||
t.Fatalf("expected alert after second full window, got %+v", alerts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessWatcherResetsOnPIDReuse(t *testing.T) {
|
||||
base := time.Date(2026, 3, 19, 10, 0, 0, 0, time.UTC)
|
||||
watcher := NewProcessWatcher(ProcessWatchOptions{
|
||||
Enabled: true,
|
||||
CPUThreshold: 100,
|
||||
Window: 2 * time.Minute,
|
||||
})
|
||||
|
||||
firstProc := []ProcessInfo{{
|
||||
PID: 42,
|
||||
PPID: 1,
|
||||
Name: "stress",
|
||||
Command: "/usr/bin/stress",
|
||||
CPU: 140,
|
||||
}}
|
||||
secondProc := []ProcessInfo{{
|
||||
PID: 42,
|
||||
PPID: 99,
|
||||
Name: "node",
|
||||
Command: "/usr/local/bin/node /tmp/server.js",
|
||||
CPU: 135,
|
||||
}}
|
||||
|
||||
watcher.Update(base, firstProc)
|
||||
if alerts := watcher.Update(base.Add(2*time.Minute), firstProc); len(alerts) != 1 {
|
||||
t.Fatalf("expected first process to alert after window, got %+v", alerts)
|
||||
}
|
||||
|
||||
if alerts := watcher.Update(base.Add(3*time.Minute), secondProc); len(alerts) != 0 {
|
||||
t.Fatalf("expected pid reuse to reset tracking, got %+v", alerts)
|
||||
}
|
||||
if alerts := watcher.Update(base.Add(5*time.Minute), secondProc); len(alerts) != 1 {
|
||||
t.Fatalf("expected reused pid to alert only after its own window, got %+v", alerts)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenderProcessAlertBar(t *testing.T) {
|
||||
alerts := []ProcessAlert{
|
||||
{PID: 10, Name: "node", CPU: 150, Threshold: 100, Window: "5m0s", Status: "active"},
|
||||
{PID: 11, Name: "java", CPU: 130, Threshold: 100, Window: "5m0s", Status: "active"},
|
||||
}
|
||||
|
||||
bar := renderProcessAlertBar(alerts, 120)
|
||||
if !strings.Contains(bar, "ALERT") {
|
||||
t.Fatalf("missing alert prefix: %q", bar)
|
||||
}
|
||||
if !strings.Contains(bar, "node (10)") {
|
||||
t.Fatalf("missing lead process label: %q", bar)
|
||||
}
|
||||
if !strings.Contains(bar, "+1 more") {
|
||||
t.Fatalf("missing additional alert count: %q", bar)
|
||||
}
|
||||
if strings.Contains(bar, "terminate") || strings.Contains(bar, "ignore") {
|
||||
t.Fatalf("unexpected action text in read-only alert bar: %q", bar)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricsSnapshotJSONIncludesProcessWatch(t *testing.T) {
|
||||
snapshot := MetricsSnapshot{
|
||||
ProcessWatch: ProcessWatchConfig{
|
||||
Enabled: true,
|
||||
CPUThreshold: 100,
|
||||
Window: "5m0s",
|
||||
},
|
||||
ProcessAlerts: []ProcessAlert{{
|
||||
PID: 99,
|
||||
Name: "node",
|
||||
CPU: 140,
|
||||
Threshold: 100,
|
||||
Window: "5m0s",
|
||||
Status: "active",
|
||||
}},
|
||||
}
|
||||
|
||||
data, err := json.Marshal(snapshot)
|
||||
if err != nil {
|
||||
t.Fatalf("json.Marshal() error = %v", err)
|
||||
}
|
||||
out := string(data)
|
||||
if !strings.Contains(out, "\"process_watch\"") {
|
||||
t.Fatalf("missing process_watch in json: %s", out)
|
||||
}
|
||||
if !strings.Contains(out, "\"process_alerts\"") {
|
||||
t.Fatalf("missing process_alerts in json: %s", out)
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,12 @@ var (
|
||||
okStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#A5D6A7"))
|
||||
lineStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#404040"))
|
||||
|
||||
primaryStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#BD93F9"))
|
||||
primaryStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#BD93F9"))
|
||||
alertBarStyle = lipgloss.NewStyle().
|
||||
Foreground(lipgloss.Color("#2B1200")).
|
||||
Background(lipgloss.Color("#FFD75F")).
|
||||
Bold(true).
|
||||
Padding(0, 1)
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -234,6 +239,35 @@ func getScoreStyle(score int) lipgloss.Style {
|
||||
}
|
||||
}
|
||||
|
||||
func renderProcessAlertBar(alerts []ProcessAlert, width int) string {
|
||||
active := activeAlerts(alerts)
|
||||
if len(active) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
focus := active[0]
|
||||
|
||||
text := fmt.Sprintf(
|
||||
"ALERT %s at %.1f%% for %s (threshold %.1f%%)",
|
||||
formatProcessLabel(ProcessInfo{PID: focus.PID, Name: focus.Name}),
|
||||
focus.CPU,
|
||||
focus.Window,
|
||||
focus.Threshold,
|
||||
)
|
||||
if len(active) > 1 {
|
||||
text += fmt.Sprintf(" · +%d more", len(active)-1)
|
||||
}
|
||||
|
||||
return renderBanner(alertBarStyle, text, width)
|
||||
}
|
||||
|
||||
func renderBanner(style lipgloss.Style, text string, width int) string {
|
||||
if width > 0 {
|
||||
style = style.MaxWidth(width)
|
||||
}
|
||||
return style.Render(text)
|
||||
}
|
||||
|
||||
func renderCPUCard(cpu CPUStatus, thermal ThermalStatus) cardData {
|
||||
var lines []string
|
||||
|
||||
|
||||
@@ -73,6 +73,7 @@ setup() {
|
||||
|
||||
@test "request_sudo_access clears four lines in clamshell mode when Touch ID hint is shown" {
|
||||
run bash -c '
|
||||
unset MOLE_TEST_MODE MOLE_TEST_NO_AUTH
|
||||
source "'"$PROJECT_ROOT"'/lib/core/common.sh"
|
||||
source "'"$PROJECT_ROOT"'/lib/core/sudo.sh"
|
||||
|
||||
@@ -101,6 +102,7 @@ setup() {
|
||||
|
||||
@test "request_sudo_access keeps three-line cleanup in clamshell mode without Touch ID" {
|
||||
run bash -c '
|
||||
unset MOLE_TEST_MODE MOLE_TEST_NO_AUTH
|
||||
source "'"$PROJECT_ROOT"'/lib/core/common.sh"
|
||||
source "'"$PROJECT_ROOT"'/lib/core/sudo.sh"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user