multiple feature and performance improvements (see description)

Here are concise release notes you can paste into your GitHub release.

Release notes — 2025-08-12

Highlights

Agent back to near-zero CPU when idle (request-driven, no background samplers).
Accurate per-process CPU% via /proc deltas; only top-level processes (threads hidden).
TUI: processes pane gets scrollbar, click-to-sort (CPU% or Mem) with indicator, stable total count.
Network panes made taller; disks slightly reduced.
README revamped: rustup prereqs, crates.io install, update/systemd instructions.
Clippy cleanups across agent and client.
Agent

Reverted precompressed caches and background samplers; WebSocket path is request-driven again.
Ensured on-demand gzip for larger replies; no per-request overhead when small.
Processes: switched to refresh_processes_specifics with ProcessRefreshKind::everything().without_tasks() to exclude threads.
Per-process CPU% now computed from /proc jiffies deltas using a small ProcCpuTracker (fixes “always 0%”/scaling issues).
Optional metrics and light caching:
CPU temp and GPU metrics gated by env (SOCKTOP_AGENT_TEMP=0, SOCKTOP_AGENT_GPU=0).
Tiny TTL caches via once_cell to avoid rescanning sensors every tick.
Dependencies: added once_cell = "1.19".
No API changes to WS endpoints.
Client (TUI)

Processes pane:
Scrollbar (mouse wheel, drag; keyboard arrows/PageUp/PageDown/Home/End).
Click header to sort by CPU% or Mem; dot indicator on active column.
Preserves process_count across fast metrics updates to avoid flicker.
UI/theme:
Shared scrollbar colors moved to ui/theme.rs; both CPU and Processes reuse them.
Cached pane rect to fix input handling; removed unused vars.
Layout: network download/upload get more vertical space; disks shrink slightly.
Clippy fixes: derive Default for ProcSortBy; style/import cleanups.
Docs

README: added rustup install steps (with proper shell reload), install via cargo install socktop and cargo install socktop_agent, and a clear Updating section (systemd service steps included).
Features list updated; roadmap marks independent cadences as done.
Upgrade notes

Agent: cargo install socktop_agent --force, then restart your systemd service; if unit changed, systemctl daemon-reload.
TUI: cargo install socktop --force.
Optional envs to trim overhead: SOCKTOP_AGENT_GPU=0, SOCKTOP_AGENT_TEMP=0.
No config or API breaking changes.
This commit is contained in:
jasonwitty 2025-08-12 15:52:46 -07:00
parent 5c002f0b2b
commit 0859f50897
18 changed files with 1246 additions and 750 deletions

125
Cargo.lock generated
View File

@ -278,6 +278,15 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "crc32fast"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511"
dependencies = [
"cfg-if",
]
[[package]] [[package]]
name = "crossterm" name = "crossterm"
version = "0.27.0" version = "0.27.0"
@ -413,6 +422,16 @@ dependencies = [
"windows-sys 0.60.2", "windows-sys 0.60.2",
] ]
[[package]]
name = "flate2"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d"
dependencies = [
"crc32fast",
"miniz_oxide",
]
[[package]] [[package]]
name = "fnv" name = "fnv"
version = "1.0.7" version = "1.0.7"
@ -541,7 +560,19 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"libc", "libc",
"wasi", "wasi 0.11.1+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi 0.14.2+wasi-0.2.4",
] ]
[[package]] [[package]]
@ -986,7 +1017,7 @@ checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
dependencies = [ dependencies = [
"libc", "libc",
"log", "log",
"wasi", "wasi 0.11.1+wasi-snapshot-preview1",
"windows-sys 0.48.0", "windows-sys 0.48.0",
] ]
@ -998,7 +1029,7 @@ checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
dependencies = [ dependencies = [
"libc", "libc",
"log", "log",
"wasi", "wasi 0.11.1+wasi-snapshot-preview1",
"windows-sys 0.59.0", "windows-sys 0.59.0",
] ]
@ -1176,6 +1207,12 @@ dependencies = [
"proc-macro2", "proc-macro2",
] ]
[[package]]
name = "r-efi"
version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]] [[package]]
name = "rand" name = "rand"
version = "0.8.5" version = "0.8.5"
@ -1183,8 +1220,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [ dependencies = [
"libc", "libc",
"rand_chacha", "rand_chacha 0.3.1",
"rand_core", "rand_core 0.6.4",
]
[[package]]
name = "rand"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
dependencies = [
"rand_chacha 0.9.0",
"rand_core 0.9.3",
] ]
[[package]] [[package]]
@ -1194,7 +1241,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [ dependencies = [
"ppv-lite86", "ppv-lite86",
"rand_core", "rand_core 0.6.4",
]
[[package]]
name = "rand_chacha"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
dependencies = [
"ppv-lite86",
"rand_core 0.9.3",
] ]
[[package]] [[package]]
@ -1203,7 +1260,16 @@ version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [ dependencies = [
"getrandom", "getrandom 0.2.16",
]
[[package]]
name = "rand_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
dependencies = [
"getrandom 0.3.3",
] ]
[[package]] [[package]]
@ -1457,6 +1523,7 @@ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
"crossterm 0.27.0", "crossterm 0.27.0",
"flate2",
"futures", "futures",
"futures-util", "futures-util",
"ratatui", "ratatui",
@ -1464,7 +1531,7 @@ dependencies = [
"serde_json", "serde_json",
"tokio", "tokio",
"tokio-tungstenite", "tokio-tungstenite",
"tungstenite", "tungstenite 0.27.0",
"url", "url",
] ]
@ -1473,16 +1540,19 @@ name = "socktop_agent"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"axum", "axum",
"flate2",
"futures", "futures",
"futures-util", "futures-util",
"gfxinfo", "gfxinfo",
"nvml-wrapper", "nvml-wrapper",
"once_cell",
"serde", "serde",
"serde_json", "serde_json",
"sysinfo", "sysinfo",
"tokio", "tokio",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
"tungstenite 0.27.0",
] ]
[[package]] [[package]]
@ -1666,7 +1736,7 @@ dependencies = [
"futures-util", "futures-util",
"log", "log",
"tokio", "tokio",
"tungstenite", "tungstenite 0.24.0",
] ]
[[package]] [[package]]
@ -1771,12 +1841,29 @@ dependencies = [
"http", "http",
"httparse", "httparse",
"log", "log",
"rand", "rand 0.8.5",
"sha1", "sha1",
"thiserror 1.0.69", "thiserror 1.0.69",
"utf-8", "utf-8",
] ]
[[package]]
name = "tungstenite"
version = "0.27.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eadc29d668c91fcc564941132e17b28a7ceb2f3ebf0b9dae3e03fd7a6748eb0d"
dependencies = [
"bytes",
"data-encoding",
"http",
"httparse",
"log",
"rand 0.9.2",
"sha1",
"thiserror 2.0.12",
"utf-8",
]
[[package]] [[package]]
name = "typenum" name = "typenum"
version = "1.18.0" version = "1.18.0"
@ -1853,6 +1940,15 @@ version = "0.11.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]] [[package]]
name = "wasm-bindgen" name = "wasm-bindgen"
version = "0.2.100" version = "0.2.100"
@ -2332,6 +2428,15 @@ version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]] [[package]]
name = "wmi" name = "wmi"
version = "0.15.2" version = "0.15.2"

560
README.md
View File

@ -1,8 +1,9 @@
# socktop # socktop
**socktop** is a remote system monitor with a rich TUI interface, inspired by `top` and `btop`, that communicates with a lightweight remote agent over WebSockets. socktop is a remote system monitor with a rich TUI, inspired by top/btop, talking to a lightweight agent over WebSockets.
It lets you watch CPU, memory, disks, network, temperatures, and processes on another machine in real-time — from the comfort of your terminal. - Linux agent: near-zero CPU when idle (request-driven, no always-on sampler)
- TUI: smooth graphs, sortable process table, scrollbars, readable colors
![socktop screenshot](./docs/14900ks_arch_alacritty_gpu_active.jpg) ![socktop screenshot](./docs/14900ks_arch_alacritty_gpu_active.jpg)
@ -10,430 +11,198 @@ It lets you watch CPU, memory, disks, network, temperatures, and processes on an
## Features ## Features
- 📡 **Remote monitoring** via WebSocket — lightweight agent sends JSON metrics - Remote monitoring via WebSocket (JSON over WS)
- 🖥 **Rich TUI** built with [ratatui](https://github.com/ratatui-org/ratatui) - TUI built with ratatui
- 🔍 **Detailed CPU view** — per-core history, current load, and trends - CPU
- 📊 **Memory, Swap, Disk usage** — human-readable units, color-coded - Overall sparkline + per-core mini bars
- 🌡 **Temperatures** — CPU temperature with visual indicators - Accurate per-process CPU% (Linux /proc deltas), normalized to 0100%
- 📈 **Network throughput** — live sparkline graphs with peak tracking - Memory/Swap gauges with human units
- 🏷 **Top processes table** — PID, name, CPU%, memory, and memory% - Disks: per-device usage
- 🎨 Color-coded load, zebra striping for readability - Network: per-interface throughput with sparklines and peak markers
- ⌨ **Keyboard shortcuts**: - Temperatures: CPU (optional)
- `q` / `Esc` → Quit - Top processes (top 50)
- PID, name, CPU%, memory, and memory%
- Click-to-sort by CPU% or Mem (descending)
- Scrollbar and mouse/keyboard scrolling
- Total process count shown in the header
- Only top-level processes listed (threads hidden) — matches btop/top
- Optional GPU metrics (can be disabled)
- Optional auth token for the agent
---
## Prerequisites: Install Rust (rustup)
Rust is fast, safe, and crossplatform. Installing it will make your machine better. Consider yourself privileged.
Linux/macOS:
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
# load cargo for this shell
source "$HOME/.cargo/env"
# ensure stable is up to date
rustup update stable
rustc --version
cargo --version
# after install you may need to reload your shell, e.g.:
exec bash # or: exec zsh / exec fish
```
Windows (for the brave): install from https://rustup.rs with the MSVC toolchain. Yes, youll need Visual Studio Build Tools. You chose Windows — enjoy the ride.
--- ---
## Architecture ## Architecture
`socktop` has **two components**: Two components:
1. **Agent** (remote side) 1) Agent (remote): small Rust WS server using sysinfo + /proc. It collects on demand when the client asks (fast metrics ~500 ms, processes ~2 s, disks ~5 s). No background loop when nobody is connected.
A small Rust WebSocket server that runs on the target machine and gathers metrics via [sysinfo](https://crates.io/crates/sysinfo).
2. **Client** (local side) 2) Client (local): TUI that connects to ws://HOST:PORT/ws and renders updates.
The TUI app (`socktop`) that connects to the agents `/ws` endpoint, receives JSON metrics, and renders them.
The two communicate over a persistent WebSocket connection.
--- ---
## Adaptive (idle-aware) sampling ## Quick start
The socktop agent now samples system metrics only when at least one WebSocket client is connected. When idle (no clients), the sampler sleeps and CPU usage drops to ~0%. - Build both binaries:
How it works
- The WebSocket handler increments/decrements a client counter in `AppState` on connect/disconnect.
- A background sampler wakes when the counter transitions from 0 → >0 and sleeps when it returns to 0.
- The most recent metrics snapshot is cached as JSON for fast responses.
Cold start behavior
- If a client requests metrics while the cache is empty (e.g., just started or after a long idle), the agent performs a one-off synchronous collection to respond immediately.
Tuning
- Sampling interval (active): update `spawn_sampler(state, Duration::from_millis(500))` in `socktop_agent/src/main.rs`.
- Always-on or low-frequency idle sampling: replace the “sleep when idle” logic in `socktop_agent/src/sampler.rs` with a low-frequency interval. Example sketch:
```rust
// In sampler.rs (sketch): sample every 10s when idle, 500ms when active
let idle_period = Duration::from_secs(10);
loop {
let active = state.client_count.load(Ordering::Relaxed) > 0;
let period = if active { Duration::from_millis(500) } else { idle_period };
let mut ticker = tokio::time::interval(period);
ticker.tick().await;
if !active {
// wake early if a client connects
tokio::select! {
_ = ticker.tick() => {},
_ = state.wake_sampler.notified() => continue,
}
}
let m = collect_metrics(&state).await;
if let Ok(js) = serde_json::to_string(&m) {
*state.last_json.write().await = js;
}
}
```
---
## Installation
### Prerequisites
- Rust 1.75+ (recommended latest stable)
- Cargo package manager
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```
Raspberry Pi (required)
- Install GPU support with apt command below
```bash
sudo apt-get update
sudo apt-get install libdrm-dev libdrm-amdgpu1
```
### Install with cargo
Installing with the cargo package manager is the easiest way to install the latest stable version. The cargo package manager comes installed with rustup. Rust is the best programming language ever to be created. If you don't have, you should. Copy and past the sh script line from the prerequisites section above to setup.
Note: You will need to reload your shell after installation of rustup to do this use the exec command. (example: exec bash, exec fish, exec sh)
Note for windows users: You will need Visual Studio Community edition installed with C++ build tools in order to compile. Don't be salty about it, your the one using windows.
Another Note for Windows users: You can just download compiled binary exe files for both the agent and the terminal ui on the build artifacts section under github actions.
the commands below will install both the TUI and the agent. Both are stand alone capable, if you are on a remote server and never plan to run the TUI from that server you can only install the agent. Likewise if you dont plan to inspect performance on your local machine you can only install socktop. The agent will by default not do anything without a socket connection, so generally its fine to install on your local machine as it will use very minimal resources waiting for a socket connection.
```bash
cargo install socktop
cargo install socktop_agent
```
#### copy to a system path:
If you plan to run the agent as a systemd service (linux), execute the following:
```bash
sudo cp ~/.cargo/bin/socktop_agent /usr/local/bin/
```
#### Create service account (optional but recommended)
```bash
sudo groupadd --system socktop || true
sudo useradd --system --gid socktop --create-home \
--home-dir /var/lib/socktop --shell /usr/sbin/nologin socktop || true
```
#### Create unit file
```bash
sudo tee /etc/systemd/system/socktop-agent.service > /dev/null <<'EOF'
[Unit]
Description=Socktop Agent
After=network.target
[Service]
Type=simple
User=socktop
Group=socktop
# If you did NOT copy to /usr/local/bin, change ExecStart to /home/USERNAME/.cargo/bin/socktop_agent
ExecStart=/usr/local/bin/socktop_agent --port 3000
# Environment=SOCKTOP_TOKEN=changeme # uncomment and set if using auth
Restart=on-failure
RestartSec=2
AmbientCapabilities=CAP_NET_BIND_SERVICE
NoNewPrivileges=true
LimitNOFILE=65535
WorkingDirectory=/var/lib/socktop
[Install]
WantedBy=multi-user.target
EOF
```
#### Reload and enable
```bash
sudo systemctl daemon-reload
sudo systemctl enable --now socktop-agent
```
#### Update after cargo release
```bash
cargo install socktop_agent --force
sudo cp ~/.cargo/bin/socktop_agent /usr/local/bin/
sudo systemctl restart socktop-agent
```
### Build from source
```bash ```bash
git clone https://github.com/jasonwitty/socktop.git git clone https://github.com/jasonwitty/socktop.git
cd socktop cd socktop
cargo build --release cargo build --release
``` ```
### Install as a cargo binary - Start the agent on the target machine (default port 3000):
```bash ```bash
cargo install --path ./socktop/ ./target/release/socktop_agent --port 3000
cargo install --path ./socktop_agent/
``` ```
This will install the `socktop` binary into `~/.cargo/bin`.
- Connect with the TUI from your local machine:
```bash
./target/release/socktop ws://REMOTE_HOST:3000/ws
```
Tip: Add ?token=... if you enable auth (see Security).
--- ---
## Running ## Install (from crates.io)
### 1. Start the agent on the remote machine You dont need to clone this repo to use socktop. Install the published binaries with cargo:
The agent binary listens on a TCP port and serves `/ws`:
```bash ```bash
socktop_agent -p 3031 # TUI (client)
cargo install socktop
# Agent (server)
cargo install socktop_agent
``` ```
> **Tip:** You can run the agent under `systemd`, inside a Docker container, or just in a tmux/screen session. This drops socktop and socktop_agent into ~/.cargo/bin (add it to PATH).
### 2. Connect with the client Notes:
From your local machine: - After installing Rust via rustup, reload your shell (e.g., exec bash) so cargo is on PATH.
```bash - Windows: you can also grab prebuilt EXEs from GitHub Actions artifacts if rustup scares you. It shouldnt. Be brave.
socktop ws://REMOTE_HOST:8080/ws
```
Example: Option B: System-wide agent (Linux)
```bash ```bash
socktop ws://192.168.1.50:8080/ws # If you installed with cargo, binaries are in ~/.cargo/bin
sudo install -o root -g root -m 0755 "$HOME/.cargo/bin/socktop_agent" /usr/local/bin/socktop_agent
# Install and enable the systemd service (example unit in docs/)
sudo install -o root -g root -m 0644 docs/socktop-agent.service /etc/systemd/system/socktop-agent.service
sudo systemctl daemon-reload
sudo systemctl enable --now socktop-agent
``` ```
--- ---
## Usage ## Usage
When connected, `socktop` displays: Agent (server):
**Left column:** ```bash
- **CPU avg graph** — sparkline of recent overall CPU usage socktop_agent --port 3000
- **Memory gauge** — total and used RAM # or env: SOCKTOP_PORT=3000 socktop_agent
- **Swap gauge** — total and used swap # optional auth: SOCKTOP_TOKEN=changeme socktop_agent
- **Disks** — usage per device (only devices with available space > 0) ```
- **Network Download/Upload** — sparkline in KB/s, with current & peak values
**Right column:** Client (TUI):
- **Per-core history & trends** — each cores recent load, current %, and trend arrow
- **Top processes table** — top 20 processes with PID, name, CPU%, memory usage, and memory% ```bash
socktop ws://HOST:3000/ws
# with token:
socktop "ws://HOST:3000/ws?token=changeme"
```
Intervals (client-driven):
- Fast metrics: ~500 ms
- Processes: ~2 s (top 50)
- Disks: ~5 s
The agent stays idle unless queried. When queried, it collects just whats needed.
--- ---
## Configuring the agent port ## Updating
The agent listens on TCP port 3000 by default. You can override this via a CLI flag, a positional port argument, or an environment variable: Update the agent (systemd):
- CLI flag:
- socktop_agent --port 8080
- socktop_agent -p 8080
- Positional:
- socktop_agent 8080
- Environment variable:
- SOCKTOP_PORT=8080 socktop_agent
Help:
- socktop_agent --help
The TUI should point to ws://HOST:PORT/ws, e.g.:
- cargo run -p socktop -- ws://127.0.0.1:8080/ws
---
## Keyboard Shortcuts
| Key | Action |
|-------------|------------|
| `q` or `Esc`| Quit |
---
## Security (optional token)
By default, the agent exposes metrics over an unauthenticated WebSocket. For untrusted networks, set an auth token and pass it in the client URL:
- Server:
- SOCKTOP_TOKEN=changeme socktop_agent --port 3000
- Client:
- socktop ws://HOST:3000/ws?token=changeme
---
## Run socktop agent as a systemd service
Prerequisites
- systemd-based Linux
- Built or downloaded socktop_agent binary
- Port 3000 reachable (or adjust)
1. Install the binary
```bash
# From your project root; adjust path to your built binary if needed
sudo install -o root -g root -m 0755 ./target/release/socktop_agent /usr/local/bin/socktop_agent
```
2. Create a dedicated user
```bash
sudo groupadd --system socktop || true
# On Debian/Ubuntu the nologin shell is /usr/sbin/nologin; on RHEL/CentOS it may be /sbin/nologin
sudo useradd --system --gid socktop --create-home --home-dir /var/lib/socktop --shell /usr/sbin/nologin socktop || true
```
3. Install the systemd unit
```bash
# Using the provided unit file from this repo
sudo install -o root -g root -m 0644 docs/socktop-agent.service /etc/systemd/system/socktop-agent.service
sudo systemctl daemon-reload
```
4. Enable and start
```bash
sudo systemctl enable --now socktop-agent.service
```
5. Verify its running
```bash ```bash
# on the server running the agent
cargo install socktop_agent --force
sudo systemctl stop socktop-agent
sudo install -o root -g root -m 0755 "$HOME/.cargo/bin/socktop_agent" /usr/local/bin/socktop_agent
# if you changed the unit file:
# sudo install -o root -g root -m 0644 docs/socktop-agent.service /etc/systemd/system/socktop-agent.service
# sudo systemctl daemon-reload
sudo systemctl start socktop-agent
sudo systemctl status socktop-agent --no-pager sudo systemctl status socktop-agent --no-pager
sudo journalctl -u socktop-agent -n 100 --no-pager # logs:
# journalctl -u socktop-agent -f
# Check the port
ss -ltnp | grep socktop_agent
# Or test locally (adjust if your agent exposes a different endpoint)
curl -v http://127.0.0.1:3000/ || true
``` ```
6. Configure authentication (optional) Update the TUI (client):
```bash ```bash
# Add a token without editing the unit file directly cargo install socktop --force
sudo systemctl edit socktop-agent socktop ws://HOST:3000/ws
# Then add:
# [Service]
# Environment=SOCKTOP_TOKEN=your_strong_token
sudo systemctl daemon-reload
sudo systemctl restart socktop-agent
``` ```
7. Change the listen port (optional) Tip: If only the binary changed, restart is enough. If the unit file changed, run sudo systemctl daemon-reload.
```bash
sudo systemctl edit socktop-agent
# Then add:
# [Service]
# ExecStart=
# ExecStart=/usr/local/bin/socktop_agent --port 8080
sudo systemctl daemon-reload
sudo systemctl restart socktop-agent
```
8. Open the firewall (if applicable)
```bash
# UFW
sudo ufw allow 3000/tcp
# firewalld
sudo firewall-cmd --permanent --add-port=3000/tcp
sudo firewall-cmd --reload
```
9. Uninstall
```bash
sudo systemctl disable --now socktop-agent
sudo rm -f /etc/systemd/system/socktop-agent.service
sudo systemctl daemon-reload
sudo rm -f /usr/local/bin/socktop_agent
sudo userdel -r socktop 2>/dev/null || true
sudo groupdel socktop 2>/dev/null || true
```
--- ---
## Platform notes ## Configuration (agent)
- Linux x86_64/AMD/Intel: fully supported.
- Raspberry Pi: - Port:
- 64-bit: rustup target add aarch64-unknown-linux-gnu; build on-device for simplicity. - Flag: --port 8080 or -p 8080
- 32-bit: rustup target add armv7-unknown-linux-gnueabihf. - Positional: socktop_agent 8080
- Windows: - Env: SOCKTOP_PORT=8080
- TUI and agent build/run with stable Rust. Use PowerShell: - Auth token (optional): SOCKTOP_TOKEN=changeme
- cargo run -p socktop_agent -- --port 3000 - Disable GPU metrics: SOCKTOP_AGENT_GPU=0
- cargo run -p socktop -- ws://127.0.0.1:3000/ws - Disable CPU temperature: SOCKTOP_AGENT_TEMP=0
- CPU temperature may be unavailable; display will show N/A.
- MacOS
- Tested only on Mac/Intel currently
--- ---
## Using tmux to monitor multiple hosts ## Keyboard & Mouse
You can use tmux to show multiple socktop instances in a single terminal. - Quit: q or Esc
- Processes pane:
![socktop screenshot](./docs/tmux_4_rpis.jpg) - Click “CPU %” to sort by CPU descending
monitoring 4 Raspberry Pis using Tmux - Click “Mem” to sort by memory descending
- Mouse wheel: scroll
Prerequisites: - Drag scrollbar: scroll
- Install tmux (Ubuntu/Debian: `sudo apt-get install tmux`) - Arrow/PageUp/PageDown/Home/End: scroll
Key bindings (defaults):
- Split left/right: Ctrl-b %
- Split top/bottom: Ctrl-b "
- Move between panes: Ctrl-b + Arrow keys
- Show pane numbers: Ctrl-b q
- Close a pane: Ctrl-b x
- Detach from session: Ctrl-b d
Two panes (left/right)
- This creates a session named "socktop", splits it horizontally, and starts two socktops.
```bash
tmux new-session -d -s socktop 'socktop ws://HOST1:3000/ws' \; \
split-window -h 'socktop ws://HOST2:3000/ws' \; \
select-layout even-horizontal \; \
attach
```
Four panes (top-left, top-right, bottom-left, bottom-right)
- This creates a 2x2 grid with one socktop per pane.
```bash
tmux new-session -d -s socktop 'socktop ws://HOST1:3000/ws' \; \
split-window -h 'socktop ws://HOST2:3000/ws' \; \
select-pane -t 0 \; split-window -v 'socktop ws://HOST3:3000/ws' \; \
select-pane -t 1 \; split-window -v 'socktop ws://HOST4:3000/ws' \; \
select-layout tiled \; \
attach
```
Tips:
- Replace HOST1..HOST4 (and ports) with your targets.
- Reattach later: `tmux attach -t socktop`
- Kill the session: `tmux kill-session -t socktop`
--- ---
## Example agent JSON ## Example agent JSON
`socktop` expects the agent to send metrics in this shape:
```json ```json
{ {
"cpu_total": 12.4, "cpu_total": 12.4,
"cpu_per_core": [11.2, 15.7, ...], "cpu_per_core": [11.2, 15.7],
"mem_total": 33554432, "mem_total": 33554432,
"mem_used": 18321408, "mem_used": 18321408,
"swap_total": 0, "swap_total": 0,
@ -445,43 +214,80 @@ Tips:
"networks": [{"name":"eth0","received":12345678,"transmitted":87654321}], "networks": [{"name":"eth0","received":12345678,"transmitted":87654321}],
"top_processes": [ "top_processes": [
{"pid":1234,"name":"nginx","cpu_usage":1.2,"mem_bytes":12345678} {"pid":1234,"name":"nginx","cpu_usage":1.2,"mem_bytes":12345678}
] ],
"gpus": null
} }
``` ```
Notes:
- process_count is merged into the main metrics on the client when processes are polled.
- top_processes are the current top 50 (sorting in the TUI is client-side).
---
## Security
Set a token on the agent and pass it as a query param from the client:
Server:
```bash
SOCKTOP_TOKEN=changeme socktop_agent --port 3000
```
Client:
```bash
socktop "ws://HOST:3000/ws?token=changeme"
```
---
## Platform notes
- Linux: fully supported (agent and client).
- Raspberry Pi:
- 64-bit: aarch64-unknown-linux-gnu
- 32-bit: armv7-unknown-linux-gnueabihf
- Windows:
- TUI + agent can build with stable Rust; bring your own MSVC. Youre on Windows; you know the drill.
- CPU temperature may be unavailable.
- macOS:
- TUI works; agent is primarily targeted at Linux.
--- ---
## Development ## Development
### Run in debug mode:
```bash
cargo run -- ws://127.0.0.1:8080/ws
```
### Code formatting & lint:
```bash ```bash
cargo fmt cargo fmt
cargo clippy cargo clippy --all-targets --all-features
cargo run -p socktop -- ws://127.0.0.1:3000/ws
cargo run -p socktop_agent -- --port 3000
``` ```
--- ---
## Roadmap ## Roadmap
- [ ] Configurable refresh interval
- [ ] Filter/sort top processes in the TUI - [x] Agent authentication (token)
- [x] Hide per-thread entries; only show processes
- [x] Sort top processes in the TUI
- [ ] Configurable refresh intervals (client)
- [ ] Export metrics to file - [ ] Export metrics to file
- [ ] TLS / WSS support - [ ] TLS / WSS support
- [ x ] Agent authentication - [x] Split processes/disks to separate WS calls with independent cadences (already logical on client; formalize API)
- [ ] Split processed and jobs into seperate ws calls on different intervals
--- ---
## License ## License
MIT License — see [LICENSE](LICENSE).
MIT — see LICENSE.
--- ---
## Acknowledgements ## Acknowledgements
- [`ratatui`](https://github.com/ratatui-org/ratatui) for terminal UI rendering
- [`sysinfo`](https://crates.io/crates/sysinfo) for system metrics - ratatui for the TUI
- [`tokio-tungstenite`](https://crates.io/crates/tokio-tungstenite) for WebSocket client/server - sysinfo for system metrics
- tokio-tungstenite for WebSockets

View File

@ -9,7 +9,6 @@ license = "MIT"
[dependencies] [dependencies]
tokio = { workspace = true } tokio = { workspace = true }
tokio-tungstenite = { workspace = true } tokio-tungstenite = { workspace = true }
tungstenite = { workspace = true }
futures = { workspace = true } futures = { workspace = true }
futures-util = { workspace = true } futures-util = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
@ -19,3 +18,5 @@ ratatui = { workspace = true }
crossterm = { workspace = true } crossterm = { workspace = true }
chrono = { workspace = true } chrono = { workspace = true }
anyhow = { workspace = true } anyhow = { workspace = true }
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
tungstenite = "0.27.0"

View File

@ -25,11 +25,12 @@ use crate::ui::cpu::{
draw_cpu_avg_graph, draw_per_core_bars, per_core_clamp, per_core_content_area, draw_cpu_avg_graph, draw_per_core_bars, per_core_clamp, per_core_content_area,
per_core_handle_key, per_core_handle_mouse, per_core_handle_scrollbar_mouse, PerCoreScrollDrag, per_core_handle_key, per_core_handle_mouse, per_core_handle_scrollbar_mouse, PerCoreScrollDrag,
}; };
use crate::ui::processes::{processes_handle_key, processes_handle_mouse, ProcSortBy};
use crate::ui::{ use crate::ui::{
disks::draw_disks, gpu::draw_gpu, header::draw_header, mem::draw_mem, net::draw_net_spark, disks::draw_disks, gpu::draw_gpu, header::draw_header, mem::draw_mem, net::draw_net_spark,
processes::draw_top_processes, swap::draw_swap, swap::draw_swap,
}; };
use crate::ws::{connect, request_metrics}; use crate::ws::{connect, request_disks, request_metrics, request_processes};
pub struct App { pub struct App {
// Latest metrics + histories // Latest metrics + histories
@ -53,6 +54,15 @@ pub struct App {
pub per_core_scroll: usize, pub per_core_scroll: usize,
pub per_core_drag: Option<PerCoreScrollDrag>, // new: drag state pub per_core_drag: Option<PerCoreScrollDrag>, // new: drag state
pub procs_scroll_offset: usize,
pub procs_drag: Option<PerCoreScrollDrag>,
pub procs_sort_by: ProcSortBy,
last_procs_area: Option<ratatui::layout::Rect>,
last_procs_poll: Instant,
last_disks_poll: Instant,
procs_interval: Duration,
disks_interval: Duration,
} }
impl App { impl App {
@ -69,6 +79,18 @@ impl App {
should_quit: false, should_quit: false,
per_core_scroll: 0, per_core_scroll: 0,
per_core_drag: None, per_core_drag: None,
procs_scroll_offset: 0,
procs_drag: None,
procs_sort_by: ProcSortBy::CpuDesc,
last_procs_area: None,
last_procs_poll: Instant::now()
.checked_sub(Duration::from_secs(2))
.unwrap_or_else(Instant::now), // trigger immediately on first loop
last_disks_poll: Instant::now()
.checked_sub(Duration::from_secs(5))
.unwrap_or_else(Instant::now),
procs_interval: Duration::from_secs(2),
disks_interval: Duration::from_secs(5),
} }
} }
@ -143,6 +165,12 @@ impl App {
total_rows, total_rows,
content.height as usize, content.height as usize,
); );
if let Some(p_area) = self.last_procs_area {
// page size = visible rows (inner height minus header = 1)
let page = p_area.height.saturating_sub(3).max(1) as usize; // borders (2) + header (1)
processes_handle_key(&mut self.procs_scroll_offset, k, page);
}
} }
Event::Mouse(m) => { Event::Mouse(m) => {
// Layout to get areas // Layout to get areas
@ -192,6 +220,21 @@ impl App {
total_rows, total_rows,
content.height as usize, content.height as usize,
); );
// Processes table: sort by column on header click
if let (Some(mm), Some(p_area)) =
(self.last_metrics.as_ref(), self.last_procs_area)
{
if let Some(new_sort) = processes_handle_mouse(
&mut self.procs_scroll_offset,
&mut self.procs_drag,
m,
p_area,
mm.top_processes.len(),
) {
self.procs_sort_by = new_sort;
}
}
} }
Event::Resize(_, _) => {} Event::Resize(_, _) => {}
_ => {} _ => {}
@ -204,6 +247,27 @@ impl App {
// Fetch and update // Fetch and update
if let Some(m) = request_metrics(ws).await { if let Some(m) = request_metrics(ws).await {
self.update_with_metrics(m); self.update_with_metrics(m);
// Only poll processes every 2s
if self.last_procs_poll.elapsed() >= self.procs_interval {
if let Some(procs) = request_processes(ws).await {
if let Some(mm) = self.last_metrics.as_mut() {
mm.top_processes = procs.top_processes;
mm.process_count = Some(procs.process_count);
}
}
self.last_procs_poll = Instant::now();
}
// Only poll disks every 5s
if self.last_disks_poll.elapsed() >= self.disks_interval {
if let Some(disks) = request_disks(ws).await {
if let Some(mm) = self.last_metrics.as_mut() {
mm.disks = disks;
}
}
self.last_disks_poll = Instant::now();
}
} }
// Draw // Draw
@ -216,7 +280,21 @@ impl App {
Ok(()) Ok(())
} }
fn update_with_metrics(&mut self, m: Metrics) { fn update_with_metrics(&mut self, mut m: Metrics) {
if let Some(prev) = &self.last_metrics {
// Preserve slower fields when the fast payload omits them
if m.disks.is_empty() {
m.disks = prev.disks.clone();
}
if m.top_processes.is_empty() {
m.top_processes = prev.top_processes.clone();
}
// Preserve total processes count across fast updates
if m.process_count.is_none() {
m.process_count = prev.process_count;
}
}
// CPU avg history // CPU avg history
let v = m.cpu_total.clamp(0.0, 100.0).round() as u64; let v = m.cpu_total.clamp(0.0, 100.0).round() as u64;
push_capped(&mut self.cpu_hist, v, 600); push_capped(&mut self.cpu_hist, v, 600);
@ -243,6 +321,7 @@ impl App {
self.rx_peak = self.rx_peak.max(rx_kb); self.rx_peak = self.rx_peak.max(rx_kb);
self.tx_peak = self.tx_peak.max(tx_kb); self.tx_peak = self.tx_peak.max(tx_kb);
// Store merged snapshot
self.last_metrics = Some(m); self.last_metrics = Some(m);
} }
@ -305,16 +384,16 @@ impl App {
// Bottom area: left = Disks + Network, right = Top Processes // Bottom area: left = Disks + Network, right = Top Processes
let bottom_lr = ratatui::layout::Layout::default() let bottom_lr = ratatui::layout::Layout::default()
.direction(Direction::Horizontal) .direction(Direction::Horizontal)
.constraints([Constraint::Percentage(66), Constraint::Percentage(34)]) .constraints([Constraint::Percentage(60), Constraint::Percentage(40)])
.split(rows[4]); .split(rows[4]);
// Left bottom: Disks + Net stacked (network "back up") // Left bottom: Disks + Net stacked (make net panes slightly taller)
let left_stack = ratatui::layout::Layout::default() let left_stack = ratatui::layout::Layout::default()
.direction(Direction::Vertical) .direction(Direction::Vertical)
.constraints([ .constraints([
Constraint::Min(7), // Disks grow Constraint::Min(4), // Disks shrink slightly
Constraint::Length(3), // Download Constraint::Length(5), // Download taller
Constraint::Length(3), // Upload Constraint::Length(5), // Upload taller
]) ])
.split(bottom_lr[0]); .split(bottom_lr[0]);
@ -343,7 +422,16 @@ impl App {
); );
// Right bottom: Top Processes fills the column // Right bottom: Top Processes fills the column
draw_top_processes(f, bottom_lr[1], self.last_metrics.as_ref()); let procs_area = bottom_lr[1];
// Cache for input handlers
self.last_procs_area = Some(procs_area);
crate::ui::processes::draw_top_processes(
f,
procs_area,
self.last_metrics.as_ref(),
self.procs_scroll_offset,
self.procs_sort_by,
);
} }
} }
@ -361,6 +449,18 @@ impl Default for App {
should_quit: false, should_quit: false,
per_core_scroll: 0, per_core_scroll: 0,
per_core_drag: None, per_core_drag: None,
procs_scroll_offset: 0,
procs_drag: None,
procs_sort_by: ProcSortBy::CpuDesc,
last_procs_area: None,
last_procs_poll: Instant::now()
.checked_sub(Duration::from_secs(2))
.unwrap_or_else(Instant::now), // trigger immediately on first loop
last_disks_poll: Instant::now()
.checked_sub(Duration::from_secs(5))
.unwrap_or_else(Instant::now),
procs_interval: Duration::from_secs(2),
disks_interval: Duration::from_secs(5),
} }
} }
} }

View File

@ -2,21 +2,7 @@
use serde::Deserialize; use serde::Deserialize;
#[derive(Debug, Deserialize, Clone)] #[derive(Debug, Clone, Deserialize)]
pub struct Disk {
pub name: String,
pub total: u64,
pub available: u64,
}
#[derive(Debug, Deserialize, Clone)]
pub struct Network {
// cumulative totals; client diffs to compute rates
pub received: u64,
pub transmitted: u64,
}
#[derive(Debug, Deserialize, Clone)]
pub struct ProcessInfo { pub struct ProcessInfo {
pub pid: u32, pub pid: u32,
pub name: String, pub name: String,
@ -24,15 +10,48 @@ pub struct ProcessInfo {
pub mem_bytes: u64, pub mem_bytes: u64,
} }
#[derive(Debug, Clone, serde::Deserialize)] #[derive(Debug, Clone, Deserialize)]
pub struct GpuMetrics { pub struct DiskInfo {
pub name: String, pub name: String,
pub utilization_gpu_pct: u32, pub total: u64,
pub mem_used_bytes: u64, pub available: u64,
pub mem_total_bytes: u64,
} }
#[derive(Debug, Deserialize, Clone)] #[derive(Debug, Clone, Deserialize)]
pub struct NetworkInfo {
#[allow(dead_code)]
pub name: String,
pub received: u64,
pub transmitted: u64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct GpuInfo {
pub name: Option<String>,
#[allow(dead_code)]
pub vendor: Option<String>,
// Accept both the new and legacy keys
#[serde(
default,
alias = "utilization_gpu_pct",
alias = "gpu_util_pct",
alias = "gpu_utilization"
)]
pub utilization: Option<f32>,
#[serde(default, alias = "mem_used_bytes", alias = "vram_used_bytes")]
pub mem_used: Option<u64>,
#[serde(default, alias = "mem_total_bytes", alias = "vram_total_bytes")]
pub mem_total: Option<u64>,
#[allow(dead_code)]
#[serde(default, alias = "temp_c", alias = "temperature_c")]
pub temperature: Option<f32>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct Metrics { pub struct Metrics {
pub cpu_total: f32, pub cpu_total: f32,
pub cpu_per_core: Vec<f32>, pub cpu_per_core: Vec<f32>,
@ -40,11 +59,20 @@ pub struct Metrics {
pub mem_used: u64, pub mem_used: u64,
pub swap_total: u64, pub swap_total: u64,
pub swap_used: u64, pub swap_used: u64,
pub process_count: usize,
pub hostname: String, pub hostname: String,
pub cpu_temp_c: Option<f32>, pub cpu_temp_c: Option<f32>,
pub disks: Vec<Disk>, pub disks: Vec<DiskInfo>,
pub networks: Vec<Network>, pub networks: Vec<NetworkInfo>,
pub top_processes: Vec<ProcessInfo>,
pub gpus: Option<Vec<GpuInfo>>,
// New: keep the last reported total process count
#[serde(default)]
pub process_count: Option<usize>,
}
#[allow(dead_code)]
#[derive(Debug, Clone, Deserialize)]
pub struct ProcessesPayload {
pub process_count: usize,
pub top_processes: Vec<ProcessInfo>, pub top_processes: Vec<ProcessInfo>,
pub gpus: Option<Vec<GpuMetrics>>,
} }

View File

@ -1,10 +1,11 @@
//! CPU average sparkline + per-core mini bars. //! CPU average sparkline + per-core mini bars.
use crate::ui::theme::{SB_ARROW, SB_THUMB, SB_TRACK};
use crossterm::event::{KeyCode, KeyEvent, MouseButton, MouseEvent, MouseEventKind}; use crossterm::event::{KeyCode, KeyEvent, MouseButton, MouseEvent, MouseEventKind};
use ratatui::style::Modifier; use ratatui::style::Modifier;
use ratatui::style::{Color, Style};
use ratatui::{ use ratatui::{
layout::{Constraint, Direction, Layout, Rect}, layout::{Constraint, Direction, Layout, Rect},
style::{Color, Style},
text::{Line, Span}, text::{Line, Span},
widgets::{Block, Borders, Paragraph, Sparkline}, widgets::{Block, Borders, Paragraph, Sparkline},
}; };
@ -12,11 +13,6 @@ use ratatui::{
use crate::history::PerCoreHistory; use crate::history::PerCoreHistory;
use crate::types::Metrics; use crate::types::Metrics;
/// Subtle grey theme for the custom scrollbar
const SB_ARROW: Color = Color::Rgb(170, 170, 180);
const SB_TRACK: Color = Color::Rgb(170, 170, 180);
const SB_THUMB: Color = Color::Rgb(170, 170, 180);
/// State for dragging the scrollbar thumb /// State for dragging the scrollbar thumb
#[derive(Clone, Copy, Debug, Default)] #[derive(Clone, Copy, Debug, Default)]
pub struct PerCoreScrollDrag { pub struct PerCoreScrollDrag {

View File

@ -82,15 +82,13 @@ pub fn draw_gpu(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
let g = &gpus[i]; let g = &gpus[i];
// Row 1: GPU name // Row 1: GPU name
let name_text = g.name.clone(); let name_text = g.name.as_deref().unwrap_or("GPU");
f.render_widget( let name_p = Paragraph::new(Span::raw(name_text)).style(Style::default().fg(Color::Gray));
Paragraph::new(Span::raw(name_text)).style(Style::default().fg(Color::Gray)), f.render_widget(name_p, rows[i * 3]);
rows[i * 3],
);
// Row 2: Utilization bar + right label // Row 2: Utilization bar + right label
let util_cols = split_bar(rows[i * 3 + 1]); let util_cols = split_bar(rows[i * 3 + 1]);
let util = g.utilization_gpu_pct.min(100) as u16; let util = g.utilization.unwrap_or(0.0).clamp(0.0, 100.0) as u16;
let util_gauge = Gauge::default() let util_gauge = Gauge::default()
.gauge_style(Style::default().fg(Color::Green)) .gauge_style(Style::default().fg(Color::Green))
.label(Span::raw("")) .label(Span::raw(""))
@ -104,8 +102,8 @@ pub fn draw_gpu(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
// Row 3: VRAM bar + right label // Row 3: VRAM bar + right label
let mem_cols = split_bar(rows[i * 3 + 2]); let mem_cols = split_bar(rows[i * 3 + 2]);
let used = g.mem_used_bytes; let used = g.mem_used.unwrap_or(0);
let total = g.mem_total_bytes.max(1); let total = g.mem_total.unwrap_or(1);
let mem_ratio = used as f64 / total as f64; let mem_ratio = used as f64 / total as f64;
let mem_pct = (mem_ratio * 100.0).round() as u16; let mem_pct = (mem_ratio * 100.0).round() as u16;
@ -114,7 +112,6 @@ pub fn draw_gpu(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
.label(Span::raw("")) .label(Span::raw(""))
.ratio(mem_ratio); .ratio(mem_ratio);
f.render_widget(mem_gauge, mem_cols[0]); f.render_widget(mem_gauge, mem_cols[0]);
// Prepare strings to enable captured identifiers in format!
let used_s = fmt_bytes(used); let used_s = fmt_bytes(used);
let total_s = fmt_bytes(total); let total_s = fmt_bytes(total);
f.render_widget( f.render_widget(

View File

@ -8,4 +8,5 @@ pub mod mem;
pub mod net; pub mod net;
pub mod processes; pub mod processes;
pub mod swap; pub mod swap;
pub mod theme;
pub mod util; pub mod util;

View File

@ -1,42 +1,105 @@
//! Top processes table with per-cell coloring and zebra striping. //! Top processes table with per-cell coloring, zebra striping, sorting, and a scrollbar.
use crossterm::event::{MouseButton, MouseEvent, MouseEventKind};
use ratatui::style::Modifier; use ratatui::style::Modifier;
use ratatui::{ use ratatui::{
layout::{Constraint, Rect}, layout::{Constraint, Direction, Layout, Rect},
style::{Color, Style}, style::{Color, Style},
widgets::{Block, Borders, Cell, Row, Table}, text::{Line, Span},
widgets::{Block, Borders, Paragraph, Table},
}; };
use std::cmp::Ordering;
use crate::types::Metrics; use crate::types::Metrics;
use crate::ui::cpu::{per_core_clamp, per_core_handle_scrollbar_mouse};
use crate::ui::theme::{SB_ARROW, SB_THUMB, SB_TRACK};
use crate::ui::util::human; use crate::ui::util::human;
pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) { #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
let Some(mm) = m else { pub enum ProcSortBy {
f.render_widget( #[default]
Block::default() CpuDesc,
MemDesc,
}
// Keep the original header widths here so drawing and hit-testing match.
const COLS: [Constraint; 5] = [
Constraint::Length(8), // PID
Constraint::Percentage(40), // Name
Constraint::Length(8), // CPU %
Constraint::Length(12), // Mem
Constraint::Length(8), // Mem %
];
pub fn draw_top_processes(
f: &mut ratatui::Frame<'_>,
area: Rect,
m: Option<&Metrics>,
scroll_offset: usize,
sort_by: ProcSortBy,
) {
// Draw outer block and title
let Some(mm) = m else { return };
let total = mm.process_count.unwrap_or(mm.top_processes.len());
let block = Block::default()
.borders(Borders::ALL) .borders(Borders::ALL)
.title("Top Processes"), .title(format!("Top Processes ({total} total)"));
area, f.render_widget(block, area);
);
// Inner area and content area (reserve 2 columns for scrollbar)
let inner = Rect {
x: area.x + 1,
y: area.y + 1,
width: area.width.saturating_sub(2),
height: area.height.saturating_sub(2),
};
if inner.height < 1 || inner.width < 3 {
return; return;
}
let content = Rect {
x: inner.x,
y: inner.y,
width: inner.width.saturating_sub(2),
height: inner.height,
}; };
// Sort rows (by CPU% or Mem bytes), descending.
let mut idxs: Vec<usize> = (0..mm.top_processes.len()).collect();
match sort_by {
ProcSortBy::CpuDesc => idxs.sort_by(|&a, &b| {
let aa = mm.top_processes[a].cpu_usage;
let bb = mm.top_processes[b].cpu_usage;
bb.partial_cmp(&aa).unwrap_or(Ordering::Equal)
}),
ProcSortBy::MemDesc => idxs.sort_by(|&a, &b| {
let aa = mm.top_processes[a].mem_bytes;
let bb = mm.top_processes[b].mem_bytes;
bb.cmp(&aa)
}),
}
// Scrolling
let total_rows = idxs.len();
let header_rows = 1usize;
let viewport_rows = content.height.saturating_sub(header_rows as u16) as usize;
let max_off = total_rows.saturating_sub(viewport_rows);
let offset = scroll_offset.min(max_off);
let show_n = total_rows.saturating_sub(offset).min(viewport_rows);
// Build visible rows
let total_mem_bytes = mm.mem_total.max(1); let total_mem_bytes = mm.mem_total.max(1);
let title = format!("Top Processes ({} total)", mm.process_count);
let peak_cpu = mm let peak_cpu = mm
.top_processes .top_processes
.iter() .iter()
.map(|p| p.cpu_usage) .map(|p| p.cpu_usage)
.fold(0.0_f32, f32::max); .fold(0.0_f32, f32::max);
let rows: Vec<Row> = mm let rows_iter = idxs.iter().skip(offset).take(show_n).map(|&ix| {
.top_processes let p = &mm.top_processes[ix];
.iter()
.enumerate()
.map(|(i, p)| {
let mem_pct = (p.mem_bytes as f64 / total_mem_bytes as f64) * 100.0; let mem_pct = (p.mem_bytes as f64 / total_mem_bytes as f64) * 100.0;
let cpu_fg = match p.cpu_usage { let cpu_val = p.cpu_usage;
let cpu_fg = match cpu_val {
x if x < 25.0 => Color::Green, x if x < 25.0 => Color::Green,
x if x < 60.0 => Color::Yellow, x if x < 60.0 => Color::Yellow,
_ => Color::Red, _ => Color::Red,
@ -47,48 +110,157 @@ pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Met
_ => Color::Red, _ => Color::Red,
}; };
let zebra = if i % 2 == 0 { let emphasis = if (cpu_val - peak_cpu).abs() < f32::EPSILON {
Style::default().fg(Color::Gray)
} else {
Style::default()
};
let emphasis = if (p.cpu_usage - peak_cpu).abs() < f32::EPSILON {
Style::default().add_modifier(Modifier::BOLD) Style::default().add_modifier(Modifier::BOLD)
} else { } else {
Style::default() Style::default()
}; };
Row::new(vec![ let cpu_str = fmt_cpu_pct(cpu_val);
Cell::from(p.pid.to_string()).style(Style::default().fg(Color::DarkGray)),
Cell::from(p.name.clone()),
Cell::from(format!("{:.1}%", p.cpu_usage)).style(Style::default().fg(cpu_fg)),
Cell::from(human(p.mem_bytes)),
Cell::from(format!("{mem_pct:.2}%")).style(Style::default().fg(mem_fg)),
])
.style(zebra.patch(emphasis))
})
.collect();
let header = Row::new(vec!["PID", "Name", "CPU %", "Mem", "Mem %"]).style( ratatui::widgets::Row::new(vec![
ratatui::widgets::Cell::from(p.pid.to_string())
.style(Style::default().fg(Color::DarkGray)),
ratatui::widgets::Cell::from(p.name.clone()),
ratatui::widgets::Cell::from(cpu_str).style(Style::default().fg(cpu_fg)),
ratatui::widgets::Cell::from(human(p.mem_bytes)),
ratatui::widgets::Cell::from(format!("{mem_pct:.2}%"))
.style(Style::default().fg(mem_fg)),
])
.style(emphasis)
});
// Header with sort indicator
let cpu_hdr = match sort_by {
ProcSortBy::CpuDesc => "CPU % •",
_ => "CPU %",
};
let mem_hdr = match sort_by {
ProcSortBy::MemDesc => "Mem •",
_ => "Mem",
};
let header = ratatui::widgets::Row::new(vec!["PID", "Name", cpu_hdr, mem_hdr, "Mem %"]).style(
Style::default() Style::default()
.fg(Color::Cyan) .fg(Color::Cyan)
.add_modifier(Modifier::BOLD), .add_modifier(Modifier::BOLD),
); );
let table = Table::new( // Render table inside content area (no borders here; outer block already drawn)
rows, let table = Table::new(rows_iter, COLS.to_vec())
vec![
Constraint::Length(8),
Constraint::Percentage(40),
Constraint::Length(8),
Constraint::Length(12),
Constraint::Length(8),
],
)
.header(header) .header(header)
.column_spacing(1) .column_spacing(1);
.block(Block::default().borders(Borders::ALL).title(title)); f.render_widget(table, content);
f.render_widget(table, area); // Draw scrollbar like CPU pane
let scroll_area = Rect {
x: inner.x + inner.width.saturating_sub(1),
y: inner.y,
width: 1,
height: inner.height,
};
if scroll_area.height >= 3 {
let track = (scroll_area.height - 2) as usize;
let total = total_rows.max(1);
let view = viewport_rows.clamp(1, total);
let max_off = total.saturating_sub(view);
let thumb_len = (track * view).div_ceil(total).max(1).min(track);
let thumb_top = if max_off == 0 {
0
} else {
((track - thumb_len) * offset + max_off / 2) / max_off
};
// Build lines: top arrow, track (with thumb), bottom arrow
let mut lines: Vec<Line> = Vec::with_capacity(scroll_area.height as usize);
lines.push(Line::from(Span::styled("", Style::default().fg(SB_ARROW))));
for i in 0..track {
if i >= thumb_top && i < thumb_top + thumb_len {
lines.push(Line::from(Span::styled("", Style::default().fg(SB_THUMB))));
} else {
lines.push(Line::from(Span::styled("", Style::default().fg(SB_TRACK))));
}
}
lines.push(Line::from(Span::styled("", Style::default().fg(SB_ARROW))));
f.render_widget(Paragraph::new(lines), scroll_area);
}
}
fn fmt_cpu_pct(v: f32) -> String {
format!("{:>5.1}", v.clamp(0.0, 100.0))
}
/// Handle keyboard scrolling (Up/Down/PageUp/PageDown/Home/End)
pub fn processes_handle_key(
scroll_offset: &mut usize,
key: crossterm::event::KeyEvent,
page_size: usize,
) {
crate::ui::cpu::per_core_handle_key(scroll_offset, key, page_size);
}
/// Handle mouse for content scrolling and scrollbar dragging.
/// Returns Some(new_sort) if the header "CPU %" or "Mem" was clicked.
pub fn processes_handle_mouse(
scroll_offset: &mut usize,
drag: &mut Option<crate::ui::cpu::PerCoreScrollDrag>,
mouse: MouseEvent,
area: Rect,
total_rows: usize,
) -> Option<ProcSortBy> {
// Inner and content areas (match draw_top_processes)
let inner = Rect {
x: area.x + 1,
y: area.y + 1,
width: area.width.saturating_sub(2),
height: area.height.saturating_sub(2),
};
if inner.height == 0 || inner.width <= 2 {
return None;
}
let content = Rect {
x: inner.x,
y: inner.y,
width: inner.width.saturating_sub(2),
height: inner.height,
};
// Scrollbar interactions (click arrows/page/drag)
per_core_handle_scrollbar_mouse(scroll_offset, drag, mouse, area, total_rows);
// Wheel scrolling when inside the content
crate::ui::cpu::per_core_handle_mouse(scroll_offset, mouse, content, content.height as usize);
// Header click to change sort
let header_area = Rect {
x: content.x,
y: content.y,
width: content.width,
height: 1,
};
let inside_header = mouse.row == header_area.y
&& mouse.column >= header_area.x
&& mouse.column < header_area.x + header_area.width;
if inside_header && matches!(mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
// Split header into the same columns
let cols = Layout::default()
.direction(Direction::Horizontal)
.constraints(COLS.to_vec())
.split(header_area);
if mouse.column >= cols[2].x && mouse.column < cols[2].x + cols[2].width {
return Some(ProcSortBy::CpuDesc);
}
if mouse.column >= cols[3].x && mouse.column < cols[3].x + cols[3].width {
return Some(ProcSortBy::MemDesc);
}
}
// Clamp to valid range
per_core_clamp(
scroll_offset,
total_rows,
(content.height.saturating_sub(1)) as usize,
);
None
} }

8
socktop/src/ui/theme.rs Normal file
View File

@ -0,0 +1,8 @@
//! Shared UI theme constants.
use ratatui::style::Color;
// Scrollbar colors (same look as before)
pub const SB_ARROW: Color = Color::Rgb(170, 170, 180);
pub const SB_TRACK: Color = Color::Rgb(170, 170, 180);
pub const SB_THUMB: Color = Color::Rgb(170, 170, 180);

View File

@ -1,9 +1,13 @@
//! Minimal WebSocket client helpers for requesting metrics from the agent. //! Minimal WebSocket client helpers for requesting metrics from the agent.
use flate2::bufread::GzDecoder;
use futures_util::{SinkExt, StreamExt};
use std::io::Read;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio::time::{interval, Duration};
use tokio_tungstenite::{connect_async, tungstenite::Message, MaybeTlsStream, WebSocketStream}; use tokio_tungstenite::{connect_async, tungstenite::Message, MaybeTlsStream, WebSocketStream};
use crate::types::Metrics; use crate::types::{DiskInfo, Metrics, ProcessesPayload};
pub type WsStream = WebSocketStream<MaybeTlsStream<TcpStream>>; pub type WsStream = WebSocketStream<MaybeTlsStream<TcpStream>>;
@ -19,10 +23,134 @@ pub async fn request_metrics(ws: &mut WsStream) -> Option<Metrics> {
return None; return None;
} }
match ws.next().await { match ws.next().await {
Some(Ok(Message::Binary(b))) => {
gunzip_to_string(&b).and_then(|s| serde_json::from_str::<Metrics>(&s).ok())
}
Some(Ok(Message::Text(json))) => serde_json::from_str::<Metrics>(&json).ok(), Some(Ok(Message::Text(json))) => serde_json::from_str::<Metrics>(&json).ok(),
_ => None, _ => None,
} }
} }
// Re-export SinkExt/StreamExt for call sites // Decompress a gzip-compressed binary frame into a String.
use futures_util::{SinkExt, StreamExt}; fn gunzip_to_string(bytes: &[u8]) -> Option<String> {
let mut dec = GzDecoder::new(bytes);
let mut out = String::new();
dec.read_to_string(&mut out).ok()?;
Some(out)
}
// Suppress dead_code until these are wired into the app
#[allow(dead_code)]
pub enum Payload {
Metrics(Metrics),
Disks(Vec<DiskInfo>),
Processes(ProcessesPayload),
}
#[allow(dead_code)]
fn parse_any_payload(json: &str) -> Result<Payload, serde_json::Error> {
if let Ok(m) = serde_json::from_str::<Metrics>(json) {
return Ok(Payload::Metrics(m));
}
if let Ok(d) = serde_json::from_str::<Vec<DiskInfo>>(json) {
return Ok(Payload::Disks(d));
}
if let Ok(p) = serde_json::from_str::<ProcessesPayload>(json) {
return Ok(Payload::Processes(p));
}
Err(serde_json::Error::io(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"unknown payload",
)))
}
// Send a "get_disks" request and await a JSON Vec<DiskInfo>
pub async fn request_disks(ws: &mut WsStream) -> Option<Vec<DiskInfo>> {
if ws.send(Message::Text("get_disks".into())).await.is_err() {
return None;
}
match ws.next().await {
Some(Ok(Message::Binary(b))) => {
gunzip_to_string(&b).and_then(|s| serde_json::from_str::<Vec<DiskInfo>>(&s).ok())
}
Some(Ok(Message::Text(json))) => serde_json::from_str::<Vec<DiskInfo>>(&json).ok(),
_ => None,
}
}
// Send a "get_processes" request and await a JSON ProcessesPayload
pub async fn request_processes(ws: &mut WsStream) -> Option<ProcessesPayload> {
if ws
.send(Message::Text("get_processes".into()))
.await
.is_err()
{
return None;
}
match ws.next().await {
Some(Ok(Message::Binary(b))) => {
gunzip_to_string(&b).and_then(|s| serde_json::from_str::<ProcessesPayload>(&s).ok())
}
Some(Ok(Message::Text(json))) => serde_json::from_str::<ProcessesPayload>(&json).ok(),
_ => None,
}
}
#[allow(dead_code)]
pub async fn start_ws_polling(mut ws: WsStream) {
let mut t_fast = interval(Duration::from_millis(500));
let mut t_procs = interval(Duration::from_secs(2));
let mut t_disks = interval(Duration::from_secs(5));
let _ = ws.send(Message::Text("get_metrics".into())).await;
let _ = ws.send(Message::Text("get_processes".into())).await;
let _ = ws.send(Message::Text("get_disks".into())).await;
loop {
tokio::select! {
_ = t_fast.tick() => {
let _ = ws.send(Message::Text("get_metrics".into())).await;
}
_ = t_procs.tick() => {
let _ = ws.send(Message::Text("get_processes".into())).await;
}
_ = t_disks.tick() => {
let _ = ws.send(Message::Text("get_disks".into())).await;
}
maybe = ws.next() => {
let Some(result) = maybe else { break; };
let Ok(msg) = result else { break; };
match msg {
Message::Binary(b) => {
if let Some(json) = gunzip_to_string(&b) {
if let Ok(payload) = parse_any_payload(&json) {
match payload {
Payload::Metrics(_m) => {
// update your app state with fast metrics
}
Payload::Disks(_d) => {
// update your app state with disks
}
Payload::Processes(_p) => {
// update your app state with processes
}
}
}
}
}
Message::Text(s) => {
if let Ok(payload) = parse_any_payload(&s) {
match payload {
Payload::Metrics(_m) => {}
Payload::Disks(_d) => {}
Payload::Processes(_p) => {}
}
}
}
Message::Close(_) => break,
_ => {}
}
}
}
}
}

View File

@ -12,9 +12,12 @@ axum = { version = "0.7", features = ["ws", "macros"] }
sysinfo = { version = "0.37", features = ["network", "disk", "component"] } sysinfo = { version = "0.37", features = ["network", "disk", "component"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1" serde_json = "1"
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
futures = "0.3" futures = "0.3"
futures-util = "0.3.31" futures-util = "0.3.31"
tracing = "0.1" tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] }
nvml-wrapper = "0.10" nvml-wrapper = "0.10"
gfxinfo = "0.1.2" gfxinfo = "0.1.2"
tungstenite = "0.27.0"
once_cell = "1.19"

View File

@ -9,64 +9,25 @@ mod types;
mod ws; mod ws;
use axum::{routing::get, Router}; use axum::{routing::get, Router};
use std::{net::SocketAddr, sync::atomic::AtomicUsize, sync::Arc, time::Duration}; use std::net::SocketAddr;
use sysinfo::{ use crate::sampler::{spawn_disks_sampler, spawn_process_sampler, spawn_sampler};
Components, CpuRefreshKind, Disks, MemoryRefreshKind, Networks, ProcessRefreshKind,
RefreshKind, System,
};
use tokio::sync::{Mutex, Notify, RwLock};
use tracing_subscriber::EnvFilter;
use sampler::spawn_sampler;
use state::AppState; use state::AppState;
use ws::ws_handler; use ws::ws_handler;
#[tokio::main] #[tokio::main]
async fn main() { async fn main() {
// Init logging; configure with RUST_LOG (e.g., RUST_LOG=info). tracing_subscriber::fmt::init();
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_target(false)
.compact()
.init();
// sysinfo build specifics (scopes what refresh_all() will touch internally) let state = AppState::new();
let refresh_kind = RefreshKind::nothing()
.with_cpu(CpuRefreshKind::everything())
.with_memory(MemoryRefreshKind::everything())
.with_processes(ProcessRefreshKind::everything());
// Initialize sysinfo handles once and keep them alive
let mut sys = System::new_with_specifics(refresh_kind);
sys.refresh_all();
let mut nets = Networks::new();
nets.refresh(true);
let mut components = Components::new();
components.refresh(true);
let mut disks = Disks::new();
disks.refresh(true);
// Shared state across requests
let state = AppState {
sys: Arc::new(Mutex::new(sys)),
last_json: Arc::new(RwLock::new(String::new())),
components: Arc::new(Mutex::new(components)),
disks: Arc::new(Mutex::new(disks)),
networks: Arc::new(Mutex::new(nets)),
// new: adaptive sampling controls
client_count: Arc::new(AtomicUsize::new(0)),
wake_sampler: Arc::new(Notify::new()),
auth_token: std::env::var("SOCKTOP_TOKEN")
.ok()
.filter(|s| !s.is_empty()),
};
// Start background sampler (adjust cadence as needed) // Start background sampler (adjust cadence as needed)
let _sampler = spawn_sampler(state.clone(), Duration::from_millis(500)); // 500ms fast metrics
let _h_fast = spawn_sampler(state.clone(), std::time::Duration::from_millis(500));
// 2s processes (top 50)
let _h_procs = spawn_process_sampler(state.clone(), std::time::Duration::from_secs(2), 50);
// 5s disks
let _h_disks = spawn_disks_sampler(state.clone(), std::time::Duration::from_secs(5));
// Web app // Web app
let port = resolve_port(); let port = resolve_port();

View File

@ -2,43 +2,122 @@
use crate::gpu::collect_all_gpus; use crate::gpu::collect_all_gpus;
use crate::state::AppState; use crate::state::AppState;
use crate::types::{DiskInfo, Metrics, NetworkInfo, ProcessInfo}; use crate::types::{DiskInfo, Metrics, NetworkInfo, ProcessInfo, ProcessesPayload};
use once_cell::sync::OnceCell;
use std::cmp::Ordering; use std::collections::HashMap;
use sysinfo::{ProcessesToUpdate, System}; use std::fs;
use std::io;
use std::sync::Mutex;
use std::time::{Duration, Instant};
use sysinfo::{ProcessRefreshKind, ProcessesToUpdate, System};
use tracing::warn; use tracing::warn;
pub async fn collect_metrics(state: &AppState) -> Metrics { // Runtime toggles (read once)
let mut sys = state.sys.lock().await; fn gpu_enabled() -> bool {
static ON: OnceCell<bool> = OnceCell::new();
*ON.get_or_init(|| {
std::env::var("SOCKTOP_AGENT_GPU")
.map(|v| v != "0")
.unwrap_or(true)
})
}
fn temp_enabled() -> bool {
static ON: OnceCell<bool> = OnceCell::new();
*ON.get_or_init(|| {
std::env::var("SOCKTOP_AGENT_TEMP")
.map(|v| v != "0")
.unwrap_or(true)
})
}
// Targeted refresh: CPU/mem/processes only // Tiny TTL caches to avoid rescanning sensors every 500ms
const TTL: Duration = Duration::from_millis(1500);
struct TempCache {
at: Option<Instant>,
v: Option<f32>,
}
static TEMP: OnceCell<Mutex<TempCache>> = OnceCell::new();
struct GpuCache {
at: Option<Instant>,
v: Option<Vec<crate::gpu::GpuMetrics>>,
}
static GPUC: OnceCell<Mutex<GpuCache>> = OnceCell::new();
fn cached_temp() -> Option<f32> {
if !temp_enabled() {
return None;
}
let now = Instant::now();
let lock = TEMP.get_or_init(|| Mutex::new(TempCache { at: None, v: None }));
let mut c = lock.lock().ok()?;
if c.at.is_none_or(|t| now.duration_since(t) >= TTL) {
c.at = Some(now);
// caller will fill this; we just hold a slot
c.v = None;
}
c.v
}
fn set_temp(v: Option<f32>) {
if let Some(lock) = TEMP.get() {
if let Ok(mut c) = lock.lock() {
c.v = v;
c.at = Some(Instant::now());
}
}
}
fn cached_gpus() -> Option<Vec<crate::gpu::GpuMetrics>> {
if !gpu_enabled() {
return None;
}
let now = Instant::now();
let lock = GPUC.get_or_init(|| Mutex::new(GpuCache { at: None, v: None }));
let mut c = lock.lock().ok()?;
if c.at.is_none_or(|t| now.duration_since(t) >= TTL) {
// mark stale; caller will refresh
c.at = Some(now);
c.v = None;
}
c.v.clone()
}
fn set_gpus(v: Option<Vec<crate::gpu::GpuMetrics>>) {
if let Some(lock) = GPUC.get() {
if let Ok(mut c) = lock.lock() {
c.v = v.clone();
c.at = Some(Instant::now());
}
}
}
// Collect only fast-changing metrics (CPU/mem/net + optional temps/gpus).
pub async fn collect_fast_metrics(state: &AppState) -> Metrics {
let mut sys = state.sys.lock().await;
if let Err(e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { if let Err(e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
sys.refresh_cpu_usage(); sys.refresh_cpu_usage();
sys.refresh_memory(); sys.refresh_memory();
sys.refresh_processes(ProcessesToUpdate::All, true);
})) { })) {
warn!("sysinfo selective refresh panicked: {e:?}"); warn!("sysinfo selective refresh panicked: {e:?}");
} }
// Hostname
let hostname = System::host_name().unwrap_or_else(|| "unknown".to_string()); let hostname = System::host_name().unwrap_or_else(|| "unknown".to_string());
// CPU usage
let cpu_total = sys.global_cpu_usage(); let cpu_total = sys.global_cpu_usage();
let cpu_per_core: Vec<f32> = sys.cpus().iter().map(|c| c.cpu_usage()).collect(); let cpu_per_core: Vec<f32> = sys.cpus().iter().map(|c| c.cpu_usage()).collect();
// Memory / swap
let mem_total = sys.total_memory(); let mem_total = sys.total_memory();
let mem_used = mem_total.saturating_sub(sys.available_memory()); let mem_used = mem_total.saturating_sub(sys.available_memory());
let swap_total = sys.total_swap(); let swap_total = sys.total_swap();
let swap_used = sys.used_swap(); let swap_used = sys.used_swap();
drop(sys);
drop(sys); // release quickly before touching other locks // CPU temperature: only refresh sensors if cache is stale
let cpu_temp_c = if cached_temp().is_some() {
// Components (cached): just refresh temps cached_temp()
let cpu_temp_c = { } else if temp_enabled() {
let val = {
let mut components = state.components.lock().await; let mut components = state.components.lock().await;
components.refresh(true); components.refresh(false);
components.iter().find_map(|c| { components.iter().find_map(|c| {
let l = c.label().to_ascii_lowercase(); let l = c.label().to_ascii_lowercase();
if l.contains("cpu") if l.contains("cpu")
@ -52,25 +131,16 @@ pub async fn collect_metrics(state: &AppState) -> Metrics {
} }
}) })
}; };
set_temp(val);
// Disks (cached): refresh sizes/usage, reuse enumeration val
let disks: Vec<DiskInfo> = { } else {
let mut disks_list = state.disks.lock().await; None
disks_list.refresh(true);
disks_list
.iter()
.map(|d| DiskInfo {
name: d.name().to_string_lossy().into_owned(),
total: d.total_space(),
available: d.available_space(),
})
.collect()
}; };
// Networks (cached): refresh counters // Networks
let networks: Vec<NetworkInfo> = { let networks: Vec<NetworkInfo> = {
let mut nets = state.networks.lock().await; let mut nets = state.networks.lock().await;
nets.refresh(true); nets.refresh(false);
nets.iter() nets.iter()
.map(|(name, data)| NetworkInfo { .map(|(name, data)| NetworkInfo {
name: name.to_string(), name: name.to_string(),
@ -80,42 +150,11 @@ pub async fn collect_metrics(state: &AppState) -> Metrics {
.collect() .collect()
}; };
// Processes: only collect fields we use (pid, name, cpu, mem), keep top K efficiently // GPUs: refresh only when cache is stale
const TOP_K: usize = 30; let gpus = if cached_gpus().is_some() {
let mut procs: Vec<ProcessInfo> = { cached_gpus()
let sys = state.sys.lock().await; // re-lock briefly to read processes } else if gpu_enabled() {
sys.processes() let v = match collect_all_gpus() {
.values()
.map(|p| ProcessInfo {
pid: p.pid().as_u32(),
name: p.name().to_string_lossy().into_owned(),
cpu_usage: p.cpu_usage(),
mem_bytes: p.memory(),
})
.collect()
};
if procs.len() > TOP_K {
procs.select_nth_unstable_by(TOP_K, |a, b| {
b.cpu_usage
.partial_cmp(&a.cpu_usage)
.unwrap_or(Ordering::Equal)
});
procs.truncate(TOP_K);
}
procs.sort_by(|a, b| {
b.cpu_usage
.partial_cmp(&a.cpu_usage)
.unwrap_or(Ordering::Equal)
});
let process_count = {
let sys = state.sys.lock().await;
sys.processes().len()
};
// GPU(s)
let gpus = match collect_all_gpus() {
Ok(v) if !v.is_empty() => Some(v), Ok(v) if !v.is_empty() => Some(v),
Ok(_) => None, Ok(_) => None,
Err(e) => { Err(e) => {
@ -123,6 +162,11 @@ pub async fn collect_metrics(state: &AppState) -> Metrics {
None None
} }
}; };
set_gpus(v.clone());
v
} else {
None
};
Metrics { Metrics {
cpu_total, cpu_total,
@ -131,12 +175,153 @@ pub async fn collect_metrics(state: &AppState) -> Metrics {
mem_used, mem_used,
swap_total, swap_total,
swap_used, swap_used,
process_count,
hostname, hostname,
cpu_temp_c, cpu_temp_c,
disks, disks: Vec::new(),
networks, networks,
top_processes: procs, top_processes: Vec::new(),
gpus, gpus,
} }
} }
// Cached disks
pub async fn collect_disks(state: &AppState) -> Vec<DiskInfo> {
let mut disks_list = state.disks.lock().await;
disks_list.refresh(false); // don't drop missing disks
disks_list
.iter()
.map(|d| DiskInfo {
name: d.name().to_string_lossy().into_owned(),
total: d.total_space(),
available: d.available_space(),
})
.collect()
}
#[inline]
fn read_total_jiffies() -> io::Result<u64> {
// /proc/stat first line: "cpu user nice system idle iowait irq softirq steal ..."
let s = fs::read_to_string("/proc/stat")?;
if let Some(line) = s.lines().next() {
let mut it = line.split_whitespace();
let _cpu = it.next(); // "cpu"
let mut sum: u64 = 0;
for tok in it.take(8) {
if let Ok(v) = tok.parse::<u64>() {
sum = sum.saturating_add(v);
}
}
return Ok(sum);
}
Err(io::Error::other("no cpu line"))
}
#[inline]
fn read_proc_jiffies(pid: u32) -> Option<u64> {
let path = format!("/proc/{pid}/stat");
let s = fs::read_to_string(path).ok()?;
// Find the right parenthesis that terminates comm; everything after is space-separated fields starting at "state"
let rpar = s.rfind(')')?;
let after = s.get(rpar + 2..)?; // skip ") "
let mut it = after.split_whitespace();
// utime (14th field) is offset 11 from "state", stime (15th) is next
let utime = it.nth(11)?.parse::<u64>().ok()?;
let stime = it.next()?.parse::<u64>().ok()?;
Some(utime.saturating_add(stime))
}
// Replace the body of collect_processes_top_k to use /proc deltas.
// This makes CPU% = (delta_proc / delta_total) * 100 over the 2s interval.
pub async fn collect_processes_top_k(state: &AppState, k: usize) -> ProcessesPayload {
// Fresh view to avoid lingering entries and select "no tasks" (no per-thread rows).
// Only processes, no per-thread entries.
let mut sys = System::new();
sys.refresh_processes_specifics(
ProcessesToUpdate::All,
false,
ProcessRefreshKind::everything().without_tasks(),
);
let total_count = sys.processes().len();
// Snapshot current per-pid jiffies
let mut current: HashMap<u32, u64> = HashMap::with_capacity(total_count);
for p in sys.processes().values() {
let pid = p.pid().as_u32();
if let Some(j) = read_proc_jiffies(pid) {
current.insert(pid, j);
}
}
let total_now = read_total_jiffies().unwrap_or(0);
// Compute deltas vs last sample
let (last_total, mut last_map) = {
let mut t = state.proc_cpu.lock().await;
let lt = t.last_total;
let lm = std::mem::take(&mut t.last_per_pid);
t.last_total = total_now;
t.last_per_pid = current.clone();
(lt, lm)
};
// On first run or if total delta is tiny, report zeros
if last_total == 0 || total_now <= last_total {
let procs: Vec<ProcessInfo> = sys
.processes()
.values()
.map(|p| ProcessInfo {
pid: p.pid().as_u32(),
name: p.name().to_string_lossy().into_owned(),
cpu_usage: 0.0,
mem_bytes: p.memory(),
})
.collect();
return ProcessesPayload {
process_count: total_count,
top_processes: top_k_sorted(procs, k),
};
}
let dt = total_now.saturating_sub(last_total).max(1) as f32;
let procs: Vec<ProcessInfo> = sys
.processes()
.values()
.map(|p| {
let pid = p.pid().as_u32();
let now = current.get(&pid).copied().unwrap_or(0);
let prev = last_map.remove(&pid).unwrap_or(0);
let du = now.saturating_sub(prev) as f32;
let cpu = ((du / dt) * 100.0).clamp(0.0, 100.0);
ProcessInfo {
pid,
name: p.name().to_string_lossy().into_owned(),
cpu_usage: cpu,
mem_bytes: p.memory(),
}
})
.collect();
ProcessesPayload {
process_count: total_count,
top_processes: top_k_sorted(procs, k),
}
}
// Small helper to select and sort top-k by cpu
fn top_k_sorted(mut v: Vec<ProcessInfo>, k: usize) -> Vec<ProcessInfo> {
if v.len() > k {
v.select_nth_unstable_by(k, |a, b| {
b.cpu_usage
.partial_cmp(&a.cpu_usage)
.unwrap_or(std::cmp::Ordering::Equal)
});
v.truncate(k);
}
v.sort_by(|a, b| {
b.cpu_usage
.partial_cmp(&a.cpu_usage)
.unwrap_or(std::cmp::Ordering::Equal)
});
v
}

View File

@ -1,39 +1,34 @@
//! Background sampler: periodically collects metrics and updates a JSON cache, //! Background sampler: periodically collects metrics and updates precompressed caches,
//! so WS replies are just a read of the cached string. //! so WS replies just read and send cached bytes.
use crate::metrics::collect_metrics;
use crate::state::AppState; use crate::state::AppState;
//use serde_json::to_string;
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tokio::time::{interval, Duration, MissedTickBehavior}; use tokio::time::{sleep, Duration};
pub fn spawn_sampler(state: AppState, period: Duration) -> JoinHandle<()> { // 500ms: fast path (cpu/mem/net/temp/gpu)
pub fn spawn_sampler(_state: AppState, _period: Duration) -> JoinHandle<()> {
tokio::spawn(async move { tokio::spawn(async move {
let idle_period = Duration::from_secs(10); // no-op background sampler (request-driven collection elsewhere)
loop { loop {
let active = state sleep(Duration::from_secs(3600)).await;
.client_count }
.load(std::sync::atomic::Ordering::Relaxed) })
> 0; }
let mut ticker = interval(if active { period } else { idle_period });
ticker.set_missed_tick_behavior(MissedTickBehavior::Skip); // 2s: processes top-k
ticker.tick().await; pub fn spawn_process_sampler(_state: AppState, _period: Duration, _top_k: usize) -> JoinHandle<()> {
tokio::spawn(async move {
if !active { loop {
tokio::select! { sleep(Duration::from_secs(3600)).await;
_ = ticker.tick() => {}, }
_ = state.wake_sampler.notified() => continue, })
} }
}
// 5s: disks
if let Ok(json) = async { pub fn spawn_disks_sampler(_state: AppState, _period: Duration) -> JoinHandle<()> {
let m = collect_metrics(&state).await; tokio::spawn(async move {
serde_json::to_string(&m) loop {
} sleep(Duration::from_secs(3600)).await;
.await
{
*state.last_json.write().await = json;
}
} }
}) })
} }

View File

@ -1,49 +1,52 @@
//! Shared agent state: sysinfo handles and hot JSON cache. //! Shared agent state: sysinfo handles and hot JSON cache.
use std::collections::HashMap;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::Arc; use std::sync::Arc;
use sysinfo::{Components, Disks, Networks, System}; use sysinfo::{Components, Disks, Networks, System};
use tokio::sync::{Mutex, Notify, RwLock}; use tokio::sync::Mutex;
pub type SharedSystem = Arc<Mutex<System>>; pub type SharedSystem = Arc<Mutex<System>>;
pub type SharedComponents = Arc<Mutex<Components>>; pub type SharedComponents = Arc<Mutex<Components>>;
pub type SharedDisks = Arc<Mutex<Disks>>; pub type SharedDisks = Arc<Mutex<Disks>>;
pub type SharedNetworks = Arc<Mutex<Networks>>; pub type SharedNetworks = Arc<Mutex<Networks>>;
#[derive(Default)]
pub struct ProcCpuTracker {
pub last_total: u64,
pub last_per_pid: HashMap<u32, u64>,
}
#[derive(Clone)] #[derive(Clone)]
pub struct AppState { pub struct AppState {
// Persistent sysinfo handles
pub sys: SharedSystem, pub sys: SharedSystem,
// Last serialized JSON snapshot for fast WS responses
pub last_json: Arc<RwLock<String>>,
// Adaptive sampling controls
pub client_count: Arc<AtomicUsize>,
pub wake_sampler: Arc<Notify>,
pub auth_token: Option<String>,
// Cached containers (enumerated once; refreshed per tick)
pub components: SharedComponents, pub components: SharedComponents,
pub disks: SharedDisks, pub disks: SharedDisks,
pub networks: SharedNetworks, pub networks: SharedNetworks,
// For correct per-process CPU% using /proc deltas
pub proc_cpu: Arc<Mutex<ProcCpuTracker>>,
// Connection tracking (to allow future idle sleeps if desired)
pub client_count: Arc<AtomicUsize>,
pub auth_token: Option<String>,
} }
impl AppState { impl AppState {
#[allow(dead_code)]
pub fn new() -> Self { pub fn new() -> Self {
let sys = System::new(); // targeted refreshes per tick let sys = System::new();
let components = Components::new_with_refreshed_list(); // enumerate once let components = Components::new_with_refreshed_list();
let disks = Disks::new_with_refreshed_list(); let disks = Disks::new_with_refreshed_list();
let networks = Networks::new_with_refreshed_list(); let networks = Networks::new_with_refreshed_list();
Self { Self {
sys: Arc::new(Mutex::new(sys)), sys: Arc::new(Mutex::new(sys)),
components: Arc::new(Mutex::new(components)), components: Arc::new(Mutex::new(components)),
disks: Arc::new(Mutex::new(disks)), disks: Arc::new(Mutex::new(disks)),
networks: Arc::new(Mutex::new(networks)), networks: Arc::new(Mutex::new(networks)),
last_json: Arc::new(RwLock::new(String::new())), proc_cpu: Arc::new(Mutex::new(ProcCpuTracker::default())),
client_count: Arc::new(AtomicUsize::new(0)), client_count: Arc::new(AtomicUsize::new(0)),
wake_sampler: Arc::new(Notify::new()),
auth_token: std::env::var("SOCKTOP_TOKEN") auth_token: std::env::var("SOCKTOP_TOKEN")
.ok() .ok()
.filter(|s| !s.is_empty()), .filter(|s| !s.is_empty()),

View File

@ -4,7 +4,21 @@
use crate::gpu::GpuMetrics; use crate::gpu::GpuMetrics;
use serde::Serialize; use serde::Serialize;
#[derive(Debug, Serialize, Clone)] #[derive(Debug, Clone, Serialize)]
pub struct DiskInfo {
pub name: String,
pub total: u64,
pub available: u64,
}
#[derive(Debug, Clone, Serialize)]
pub struct NetworkInfo {
pub name: String,
pub received: u64,
pub transmitted: u64,
}
#[derive(Debug, Clone, Serialize)]
pub struct ProcessInfo { pub struct ProcessInfo {
pub pid: u32, pub pid: u32,
pub name: String, pub name: String,
@ -12,22 +26,7 @@ pub struct ProcessInfo {
pub mem_bytes: u64, pub mem_bytes: u64,
} }
#[derive(Debug, Serialize, Clone)] #[derive(Debug, Clone, Serialize)]
pub struct DiskInfo {
pub name: String,
pub total: u64,
pub available: u64,
}
#[derive(Debug, Serialize, Clone)]
pub struct NetworkInfo {
pub name: String,
// cumulative totals since the agent started (client should diff to get rates)
pub received: u64,
pub transmitted: u64,
}
#[derive(Serialize)]
pub struct Metrics { pub struct Metrics {
pub cpu_total: f32, pub cpu_total: f32,
pub cpu_per_core: Vec<f32>, pub cpu_per_core: Vec<f32>,
@ -35,7 +34,6 @@ pub struct Metrics {
pub mem_used: u64, pub mem_used: u64,
pub swap_total: u64, pub swap_total: u64,
pub swap_used: u64, pub swap_used: u64,
pub process_count: usize,
pub hostname: String, pub hostname: String,
pub cpu_temp_c: Option<f32>, pub cpu_temp_c: Option<f32>,
pub disks: Vec<DiskInfo>, pub disks: Vec<DiskInfo>,
@ -43,3 +41,9 @@ pub struct Metrics {
pub top_processes: Vec<ProcessInfo>, pub top_processes: Vec<ProcessInfo>,
pub gpus: Option<Vec<GpuMetrics>>, pub gpus: Option<Vec<GpuMetrics>>,
} }
#[derive(Debug, Clone, Serialize)]
pub struct ProcessesPayload {
pub process_count: usize,
pub top_processes: Vec<ProcessInfo>,
}

View File

@ -1,66 +1,69 @@
//! WebSocket upgrade and per-connection handler. Serves cached JSON quickly. //! WebSocket upgrade and per-connection handler (request-driven).
use axum::{ use axum::{
extract::{ extract::ws::{Message, WebSocket},
ws::{Message, WebSocket, WebSocketUpgrade}, extract::{Query, State, WebSocketUpgrade},
Query, State, response::Response,
},
http::StatusCode,
response::{IntoResponse, Response},
}; };
use futures_util::stream::StreamExt; use flate2::{write::GzEncoder, Compression};
use futures_util::StreamExt;
use crate::metrics::collect_metrics;
use crate::state::AppState;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::atomic::Ordering; use std::io::Write;
use crate::metrics::{collect_disks, collect_fast_metrics, collect_processes_top_k};
use crate::state::AppState;
pub async fn ws_handler( pub async fn ws_handler(
ws: WebSocketUpgrade, ws: WebSocketUpgrade,
State(state): State<AppState>, State(state): State<AppState>,
Query(q): Query<HashMap<String, String>>, Query(q): Query<HashMap<String, String>>,
) -> Response { ) -> Response {
// optional auth
if let Some(expected) = state.auth_token.as_ref() { if let Some(expected) = state.auth_token.as_ref() {
match q.get("token") { if q.get("token") != Some(expected) {
Some(t) if t == expected => {} return ws.on_upgrade(|socket| async move {
_ => return StatusCode::UNAUTHORIZED.into_response(), let _ = socket.close().await;
});
} }
} }
ws.on_upgrade(move |socket| handle_socket(socket, state)) ws.on_upgrade(move |socket| handle_socket(socket, state))
} }
async fn handle_socket(mut socket: WebSocket, state: AppState) { async fn handle_socket(mut socket: WebSocket, state: AppState) {
// Bump client count on connect and wake the sampler. state
state.client_count.fetch_add(1, Ordering::Relaxed); .client_count
state.wake_sampler.notify_waiters(); .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
// Ensure we decrement on disconnect (drop).
struct ClientGuard(AppState);
impl Drop for ClientGuard {
fn drop(&mut self) {
self.0.client_count.fetch_sub(1, Ordering::Relaxed);
self.0.wake_sampler.notify_waiters();
}
}
let _guard = ClientGuard(state.clone());
while let Some(Ok(msg)) = socket.next().await { while let Some(Ok(msg)) = socket.next().await {
match msg { match msg {
Message::Text(text) if text == "get_metrics" => { Message::Text(ref text) if text == "get_metrics" => {
// Serve the cached JSON quickly; if empty (cold start), collect once. let m = collect_fast_metrics(&state).await;
let cached = state.last_json.read().await.clone(); let _ = send_json(&mut socket, &m).await;
if !cached.is_empty() {
let _ = socket.send(Message::Text(cached)).await;
} else {
let metrics = collect_metrics(&state).await;
if let Ok(js) = serde_json::to_string(&metrics) {
let _ = socket.send(Message::Text(js)).await;
} }
Message::Text(ref text) if text == "get_disks" => {
let d = collect_disks(&state).await;
let _ = send_json(&mut socket, &d).await;
} }
Message::Text(ref text) if text == "get_processes" => {
let p = collect_processes_top_k(&state, 50).await;
let _ = send_json(&mut socket, &p).await;
} }
Message::Close(_) => break, Message::Close(_) => break,
_ => {} _ => {}
} }
} }
state
.client_count
.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
}
// Small, cheap gzip for larger payloads; send text for small.
async fn send_json<T: serde::Serialize>(ws: &mut WebSocket, value: &T) -> Result<(), axum::Error> {
let json = serde_json::to_string(value).expect("serialize");
if json.len() <= 768 {
return ws.send(Message::Text(json)).await;
}
let mut enc = GzEncoder::new(Vec::new(), Compression::fast());
enc.write_all(json.as_bytes()).ok();
let bin = enc.finish().unwrap_or_else(|_| json.into_bytes());
ws.send(Message::Binary(bin)).await
} }