Compare commits
100 Commits
feature/ho
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 3024816525 | |||
| 1d7bc42d59 | |||
| 518ae8c2bf | |||
| 6eb1809309 | |||
| 1c01902a71 | |||
| 9d302ad475 | |||
| 7875f132f7 | |||
| 0d789fb97c | |||
| 5ddaed298b | |||
| 1528568c30 | |||
| 6f238cdf25 | |||
| ffe451edaa | |||
| c9bde52cb1 | |||
| 0603746d7c | |||
| 25632f3427 | |||
| e51cdb0c50 | |||
| 1cb05d404b | |||
| 4196066e57 | |||
| 47e96c7d92 | |||
| bae2ecb79a | |||
| bd0d15a1ae | |||
| 689498c5f4 | |||
| 34e260a612 | |||
| 47eff3a75c | |||
| 0210b49219 | |||
| 70a150152c | |||
| f4b54db399 | |||
| e857cfc665 | |||
| e66008f341 | |||
| a238ce320b | |||
| b635f5d7f4 | |||
| 18b41c1b45 | |||
| b4ed036357 | |||
| ec0e409488 | |||
| 08f248c696 | |||
| cea133b7da | |||
| e679896ca0 | |||
| 5e5fde190a | |||
| 8286d21a2a | |||
| b91fc7b016 | |||
| f936767835 | |||
| 5f2777cdb2 | |||
| 49164da105 | |||
| 22c1f80e70 | |||
| a486225008 | |||
| d97f7507e8 | |||
| e4186a7ec0 | |||
| f59c28d966 | |||
| 06cd6d0c82 | |||
| ffc246b705 | |||
| cd2816915d | |||
| 7cd5941434 | |||
| 76c7fe1d6f | |||
| eed04f1d5c | |||
| 764c25846f | |||
| a9bf4208ab | |||
| 9c1416eabf | |||
| e7350f8908 | |||
| 2647b611d2 | |||
| a359f17367 | |||
| d93b7aca5a | |||
| e51054811c | |||
| b74242e6d9 | |||
| 4e378b882a | |||
| 622767a605 | |||
| 0c5a1d7553 | |||
| 0bd709d2a7 | |||
| 31f5f9ce76 | |||
| df2308e6e9 | |||
| 7592709a43 | |||
| 61fe1cc38e | |||
| eed346abb6 | |||
| ab3bb33711 | |||
| 7caf2f4bfb | |||
| b249c7ba99 | |||
| f0858525e8 | |||
| 2fe005ed90 | |||
| ca6a5cbdfa | |||
| 56301d61fd | |||
| 55e5c708fe | |||
| 2d17cf1598 | |||
| 353c08c35e | |||
| f13ea45360 | |||
| 8ce00a5dad | |||
| f37b8d9ff4 | |||
| 322981ada7 | |||
| 3394beab67 | |||
| c9ebea92f5 | |||
| e2dc5e8ac9 | |||
| beddba0072 | |||
| cacc4cba9f | |||
| 66270c16b7 | |||
| 00d5777d05 | |||
| f62b5274d2 | |||
| bbbe35111a | |||
| a4356b5ece | |||
| b6e656738b | |||
| f83cb07d57 | |||
| 7697c7dc2b | |||
| 1043fffc8d |
15
.githooks/pre-commit
Normal file → Executable file
15
.githooks/pre-commit
Normal file → Executable file
@ -1,11 +1,22 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
# This repository uses a custom hooks directory (.githooks). To enable this pre-commit hook run:
|
||||||
|
# git config core.hooksPath .githooks
|
||||||
|
# Ensure this file is executable: chmod +x .githooks/pre-commit
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
echo "[pre-commit] Running cargo fmt --all" >&2
|
echo "[pre-commit] Running cargo fmt --all" >&2
|
||||||
|
|
||||||
if ! command -v cargo >/dev/null 2>&1; then
|
if ! command -v cargo >/dev/null 2>&1; then
|
||||||
echo "[pre-commit] cargo not found in PATH" >&2
|
# Try loading rustup environment (common install path)
|
||||||
exit 1
|
if [ -f "$HOME/.cargo/env" ]; then
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
. "$HOME/.cargo/env"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v cargo >/dev/null 2>&1; then
|
||||||
|
echo "[pre-commit] cargo not found in PATH; skipping fmt (install Rust or adjust PATH)." >&2
|
||||||
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cargo fmt --all
|
cargo fmt --all
|
||||||
|
|||||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@ -42,7 +42,7 @@ jobs:
|
|||||||
kill $AGENT_PID || true
|
kill $AGENT_PID || true
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
SOCKTOP_WS=ws://127.0.0.1:3000/ws cargo test -p socktop --test ws_probe -- --nocapture
|
SOCKTOP_WS=ws://127.0.0.1:3000/ws cargo test -p socktop_connector --test integration_test -- --nocapture
|
||||||
kill $AGENT_PID || true
|
kill $AGENT_PID || true
|
||||||
|
|
||||||
- name: "Windows: start agent and run WS probe"
|
- name: "Windows: start agent and run WS probe"
|
||||||
@ -79,7 +79,7 @@ jobs:
|
|||||||
}
|
}
|
||||||
$env:SOCKTOP_WS = "ws://127.0.0.1:3000/ws"
|
$env:SOCKTOP_WS = "ws://127.0.0.1:3000/ws"
|
||||||
try {
|
try {
|
||||||
cargo test -p socktop --test ws_probe -- --nocapture
|
cargo test -p socktop_connector --test integration_test -- --nocapture
|
||||||
} finally {
|
} finally {
|
||||||
if ($p -and !$p.HasExited) { Stop-Process -Id $p.Id -Force -ErrorAction SilentlyContinue }
|
if ($p -and !$p.HasExited) { Stop-Process -Id $p.Id -Force -ErrorAction SilentlyContinue }
|
||||||
}
|
}
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@ -1,2 +1,7 @@
|
|||||||
/target
|
/target
|
||||||
.vscode/
|
.vscode/
|
||||||
|
/socktop-wasm-test/target
|
||||||
|
|
||||||
|
# Documentation files from development sessions (context-specific, not for public repo)
|
||||||
|
/OPTIMIZATION_PROCESS_DETAILS.md
|
||||||
|
/THREAD_SUPPORT.md
|
||||||
|
|||||||
1050
Cargo.lock
generated
1050
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
11
Cargo.toml
11
Cargo.toml
@ -2,7 +2,8 @@
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
members = [
|
members = [
|
||||||
"socktop",
|
"socktop",
|
||||||
"socktop_agent"
|
"socktop_agent",
|
||||||
|
"socktop_connector"
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
@ -26,7 +27,6 @@ sysinfo = "0.37"
|
|||||||
ratatui = "0.28"
|
ratatui = "0.28"
|
||||||
crossterm = "0.27"
|
crossterm = "0.27"
|
||||||
|
|
||||||
|
|
||||||
# web server (remote-agent)
|
# web server (remote-agent)
|
||||||
axum = { version = "0.7", features = ["ws"] }
|
axum = { version = "0.7", features = ["ws"] }
|
||||||
|
|
||||||
@ -34,6 +34,13 @@ axum = { version = "0.7", features = ["ws"] }
|
|||||||
prost = "0.13"
|
prost = "0.13"
|
||||||
dirs-next = "2"
|
dirs-next = "2"
|
||||||
|
|
||||||
|
# compression
|
||||||
|
flate2 = "1.0"
|
||||||
|
|
||||||
|
# TLS
|
||||||
|
rustls = { version = "0.23", features = ["ring"] }
|
||||||
|
rustls-pemfile = "2.1"
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
# Favor smaller, simpler binaries with good runtime perf
|
# Favor smaller, simpler binaries with good runtime perf
|
||||||
lto = "thin"
|
lto = "thin"
|
||||||
|
|||||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2025 Witty One Off
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
17
README.md
17
README.md
@ -5,7 +5,7 @@ socktop is a remote system monitor with a rich TUI, inspired by top/btop, talkin
|
|||||||
- Linux agent: near-zero CPU when idle (request-driven, no always-on sampler)
|
- Linux agent: near-zero CPU when idle (request-driven, no always-on sampler)
|
||||||
- TUI: smooth graphs, sortable process table, scrollbars, readable colors
|
- TUI: smooth graphs, sortable process table, scrollbars, readable colors
|
||||||
|
|
||||||

|
<img src="./docs/socktop_demo.apng" width="100%">
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -60,6 +60,8 @@ sudo apt-get update
|
|||||||
sudo apt-get install libdrm-dev libdrm-amdgpu1
|
sudo apt-get install libdrm-dev libdrm-amdgpu1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
_Additional note for Raspberry Pi users. Please update your system to use the newest kernel available through app, kernel version 6.6+ will use considerably less overall CPU to run the agent. For example on a rpi4 the kernel < 6.6 the agent will consume .8 cpu but on the same hardware on > 6.6 the agent will consume only .2 cpu. (these numbers indicate continuous polling at web socket endpoints, when not in use the usage is 0)_
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Architecture
|
## Architecture
|
||||||
@ -94,6 +96,12 @@ cargo build --release
|
|||||||
./target/release/socktop ws://REMOTE_HOST:3000/ws
|
./target/release/socktop ws://REMOTE_HOST:3000/ws
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Cross-compiling for Raspberry Pi
|
||||||
|
|
||||||
|
For Raspberry Pi and other ARM devices, you can cross-compile the agent from a more powerful machine:
|
||||||
|
|
||||||
|
- [Cross-compilation guide](./docs/cross-compiling.md) - Instructions for cross-compiling from Linux, macOS, or Windows hosts
|
||||||
|
|
||||||
### Quick demo (no agent setup)
|
### Quick demo (no agent setup)
|
||||||
|
|
||||||
Spin up a temporary local agent on port 3231 and connect automatically:
|
Spin up a temporary local agent on port 3231 and connect automatically:
|
||||||
@ -199,7 +207,7 @@ socktop -t /path/to/cert.pem wss://HOST:8443/ws
|
|||||||
|
|
||||||
Intervals (client-driven):
|
Intervals (client-driven):
|
||||||
- Fast metrics: ~500 ms
|
- Fast metrics: ~500 ms
|
||||||
- Processes: ~2 s (top 50)
|
- Processes: ~2 s
|
||||||
- Disks: ~5 s
|
- Disks: ~5 s
|
||||||
|
|
||||||
The agent stays idle unless queried. When queried, it collects just what’s needed.
|
The agent stays idle unless queried. When queried, it collects just what’s needed.
|
||||||
@ -525,10 +533,13 @@ Every commit will then format Rust sources and restage them automatically.
|
|||||||
- [x] Agent authentication (token)
|
- [x] Agent authentication (token)
|
||||||
- [x] Hide per-thread entries; only show processes
|
- [x] Hide per-thread entries; only show processes
|
||||||
- [x] Sort top processes in the TUI
|
- [x] Sort top processes in the TUI
|
||||||
- [ ] Configurable refresh intervals (client)
|
- [x] Configurable refresh intervals (client)
|
||||||
- [ ] Export metrics to file
|
- [ ] Export metrics to file
|
||||||
- [x] TLS / WSS support (self‑signed server cert + client pinning)
|
- [x] TLS / WSS support (self‑signed server cert + client pinning)
|
||||||
- [x] Split processes/disks to separate WS calls with independent cadences (already logical on client; formalize API)
|
- [x] Split processes/disks to separate WS calls with independent cadences (already logical on client; formalize API)
|
||||||
|
- [ ] Outage notifications and reconnect.
|
||||||
|
- [ ] Per process detailed statistics pane
|
||||||
|
- [ ] cleanup of Disks section, properly display physical disks / partitions, remove duplicate entries
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
204
docs/cross-compiling.md
Normal file
204
docs/cross-compiling.md
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
# Cross-Compiling socktop_agent for Raspberry Pi
|
||||||
|
|
||||||
|
This guide explains how to cross-compile the socktop_agent on various host systems and deploy it to a Raspberry Pi. Cross-compiling is particularly useful for older or resource-constrained Pi models where native compilation might be slow.
|
||||||
|
|
||||||
|
## Cross-Compilation Host Setup
|
||||||
|
|
||||||
|
Choose your host operating system:
|
||||||
|
|
||||||
|
- [Debian/Ubuntu](#debianubuntu-based-systems)
|
||||||
|
- [Arch Linux](#arch-linux-based-systems)
|
||||||
|
- [macOS](#macos)
|
||||||
|
- [Windows](#windows)
|
||||||
|
|
||||||
|
## Debian/Ubuntu Based Systems
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
Install the cross-compilation toolchain for your target Raspberry Pi architecture:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For 64-bit Raspberry Pi (aarch64)
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install gcc-aarch64-linux-gnu libc6-dev-arm64-cross libdrm-dev:arm64
|
||||||
|
|
||||||
|
# For 32-bit Raspberry Pi (armv7)
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libdrm-dev:armhf
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setup Rust Cross-Compilation Targets
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For 64-bit Raspberry Pi
|
||||||
|
rustup target add aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
# For 32-bit Raspberry Pi
|
||||||
|
rustup target add armv7-unknown-linux-gnueabihf
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configure Cargo for Cross-Compilation
|
||||||
|
|
||||||
|
Create or edit `~/.cargo/config.toml`:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[target.aarch64-unknown-linux-gnu]
|
||||||
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
|
|
||||||
|
[target.armv7-unknown-linux-gnueabihf]
|
||||||
|
linker = "arm-linux-gnueabihf-gcc"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Arch Linux Based Systems
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
Install the cross-compilation toolchain using pacman and AUR:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install base dependencies
|
||||||
|
sudo pacman -S base-devel
|
||||||
|
|
||||||
|
# For 64-bit Raspberry Pi (aarch64)
|
||||||
|
sudo pacman -S aarch64-linux-gnu-gcc
|
||||||
|
# Install libdrm for aarch64 using an AUR helper (e.g., yay, paru)
|
||||||
|
yay -S aarch64-linux-gnu-libdrm
|
||||||
|
|
||||||
|
# For 32-bit Raspberry Pi (armv7)
|
||||||
|
sudo pacman -S arm-linux-gnueabihf-gcc
|
||||||
|
# Install libdrm for armv7 using an AUR helper
|
||||||
|
yay -S arm-linux-gnueabihf-libdrm
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setup Rust Cross-Compilation Targets
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For 64-bit Raspberry Pi
|
||||||
|
rustup target add aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
|
# For 32-bit Raspberry Pi
|
||||||
|
rustup target add armv7-unknown-linux-gnueabihf
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configure Cargo for Cross-Compilation
|
||||||
|
|
||||||
|
Create or edit `~/.cargo/config.toml`:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[target.aarch64-unknown-linux-gnu]
|
||||||
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
|
|
||||||
|
[target.armv7-unknown-linux-gnueabihf]
|
||||||
|
linker = "arm-linux-gnueabihf-gcc"
|
||||||
|
```
|
||||||
|
|
||||||
|
## macOS
|
||||||
|
|
||||||
|
The recommended approach for cross-compiling from macOS is to use Docker:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install Docker
|
||||||
|
brew install --cask docker
|
||||||
|
|
||||||
|
# Pull a cross-compilation Docker image
|
||||||
|
docker pull messense/rust-musl-cross:armv7-musleabihf # For 32-bit Pi
|
||||||
|
docker pull messense/rust-musl-cross:aarch64-musl # For 64-bit Pi
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using Docker for Cross-Compilation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Navigate to your socktop project directory
|
||||||
|
cd path/to/socktop
|
||||||
|
|
||||||
|
# For 64-bit Raspberry Pi
|
||||||
|
docker run --rm -it -v "$(pwd)":/home/rust/src messense/rust-musl-cross:aarch64-musl cargo build --release --target aarch64-unknown-linux-musl -p socktop_agent
|
||||||
|
|
||||||
|
# For 32-bit Raspberry Pi
|
||||||
|
docker run --rm -it -v "$(pwd)":/home/rust/src messense/rust-musl-cross:armv7-musleabihf cargo build --release --target armv7-unknown-linux-musleabihf -p socktop_agent
|
||||||
|
```
|
||||||
|
|
||||||
|
The compiled binaries will be available in your local target directory.
|
||||||
|
|
||||||
|
## Windows
|
||||||
|
|
||||||
|
The recommended approach for Windows is to use Windows Subsystem for Linux (WSL2):
|
||||||
|
|
||||||
|
1. Install WSL2 with a Debian/Ubuntu distribution by following the [official Microsoft documentation](https://docs.microsoft.com/en-us/windows/wsl/install).
|
||||||
|
|
||||||
|
2. Once WSL2 is set up with a Debian/Ubuntu distribution, open your WSL terminal and follow the [Debian/Ubuntu instructions](#debianubuntu-based-systems) above.
|
||||||
|
|
||||||
|
## Cross-Compile the Agent
|
||||||
|
|
||||||
|
After setting up your environment, build the socktop_agent for your target Raspberry Pi:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For 64-bit Raspberry Pi
|
||||||
|
cargo build --release --target aarch64-unknown-linux-gnu -p socktop_agent
|
||||||
|
|
||||||
|
# For 32-bit Raspberry Pi
|
||||||
|
cargo build --release --target armv7-unknown-linux-gnueabihf -p socktop_agent
|
||||||
|
```
|
||||||
|
|
||||||
|
## Transfer the Binary to Your Raspberry Pi
|
||||||
|
|
||||||
|
Use SCP to transfer the compiled binary to your Raspberry Pi:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For 64-bit Raspberry Pi
|
||||||
|
scp target/aarch64-unknown-linux-gnu/release/socktop_agent pi@raspberry-pi-ip:~/
|
||||||
|
|
||||||
|
# For 32-bit Raspberry Pi
|
||||||
|
scp target/armv7-unknown-linux-gnueabihf/release/socktop_agent pi@raspberry-pi-ip:~/
|
||||||
|
```
|
||||||
|
|
||||||
|
Replace `raspberry-pi-ip` with your Raspberry Pi's IP address and `pi` with your username.
|
||||||
|
|
||||||
|
## Install Dependencies on the Raspberry Pi
|
||||||
|
|
||||||
|
SSH into your Raspberry Pi and install the required dependencies:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh pi@raspberry-pi-ip
|
||||||
|
|
||||||
|
# For Raspberry Pi OS (Debian-based)
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install libdrm-dev libdrm-amdgpu1
|
||||||
|
|
||||||
|
# For Arch Linux ARM
|
||||||
|
sudo pacman -Syu
|
||||||
|
sudo pacman -S libdrm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Make the Binary Executable and Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
chmod +x ~/socktop_agent
|
||||||
|
|
||||||
|
# Optional: Install system-wide
|
||||||
|
sudo install -o root -g root -m 0755 ~/socktop_agent /usr/local/bin/socktop_agent
|
||||||
|
|
||||||
|
# Optional: Set up as a systemd service
|
||||||
|
sudo install -o root -g root -m 0644 ~/socktop-agent.service /etc/systemd/system/socktop-agent.service
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
sudo systemctl enable --now socktop-agent
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
If you encounter issues with the cross-compiled binary:
|
||||||
|
|
||||||
|
1. **Incorrect Architecture**: Ensure you've chosen the correct target for your Raspberry Pi model:
|
||||||
|
- For Raspberry Pi 2: use `armv7-unknown-linux-gnueabihf`
|
||||||
|
- For Raspberry Pi 3/4/5 in 64-bit mode: use `aarch64-unknown-linux-gnu`
|
||||||
|
- For Raspberry Pi 3/4/5 in 32-bit mode: use `armv7-unknown-linux-gnueabihf`
|
||||||
|
|
||||||
|
2. **Dependency Issues**: Check for missing libraries:
|
||||||
|
```bash
|
||||||
|
ldd ~/socktop_agent
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Run with Backtrace**: Get detailed error information:
|
||||||
|
```bash
|
||||||
|
RUST_BACKTRACE=1 ~/socktop_agent
|
||||||
|
```
|
||||||
BIN
docs/socktop_demo.apng
Normal file
BIN
docs/socktop_demo.apng
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 47 MiB |
@ -1,8 +0,0 @@
|
|||||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
|
||||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
|
||||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
|
||||||
Error: Address already in use (os error 98)
|
|
||||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
|
||||||
Error: Address already in use (os error 98)
|
|
||||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8443/ws
|
|
||||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8443/ws
|
|
||||||
47
scripts/check-windows.sh
Normal file
47
scripts/check-windows.sh
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Cross-check Windows build from Linux using the GNU (MinGW) toolchain.
|
||||||
|
# - Ensures target `x86_64-pc-windows-gnu` is installed
|
||||||
|
# - Verifies MinGW cross-compiler is available (x86_64-w64-mingw32-gcc)
|
||||||
|
# - Runs cargo clippy with warnings-as-errors for the Windows target
|
||||||
|
# - Builds release binaries for the Windows target
|
||||||
|
|
||||||
|
echo "[socktop] Windows cross-check: clippy + build (GNU target)"
|
||||||
|
|
||||||
|
have() { command -v "$1" >/dev/null 2>&1; }
|
||||||
|
|
||||||
|
if ! have rustup; then
|
||||||
|
echo "error: rustup not found. Install Rust via rustup first (see README)." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! rustup target list --installed | grep -q '^x86_64-pc-windows-gnu$'; then
|
||||||
|
echo "+ rustup target add x86_64-pc-windows-gnu"
|
||||||
|
rustup target add x86_64-pc-windows-gnu
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! have x86_64-w64-mingw32-gcc; then
|
||||||
|
echo "error: Missing MinGW cross-compiler (x86_64-w64-mingw32-gcc)." >&2
|
||||||
|
if have pacman; then
|
||||||
|
echo "Arch Linux: sudo pacman -S --needed mingw-w64-gcc" >&2
|
||||||
|
elif have apt-get; then
|
||||||
|
echo "Debian/Ubuntu: sudo apt-get install -y mingw-w64" >&2
|
||||||
|
elif have dnf; then
|
||||||
|
echo "Fedora: sudo dnf install -y mingw64-gcc" >&2
|
||||||
|
else
|
||||||
|
echo "Install the mingw-w64 toolchain for your distro, then re-run." >&2
|
||||||
|
fi
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
CARGO_FLAGS=(--workspace --all-targets --all-features --target x86_64-pc-windows-gnu)
|
||||||
|
|
||||||
|
echo "+ cargo clippy ${CARGO_FLAGS[*]} -- -D warnings"
|
||||||
|
cargo clippy "${CARGO_FLAGS[@]}" -- -D warnings
|
||||||
|
|
||||||
|
echo "+ cargo build --release ${CARGO_FLAGS[*]}"
|
||||||
|
cargo build --release "${CARGO_FLAGS[@]}"
|
||||||
|
|
||||||
|
echo "✅ Windows clippy and build completed successfully."
|
||||||
|
|
||||||
43
scripts/publish_socktop_agent.sh
Normal file
43
scripts/publish_socktop_agent.sh
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Publish job: "publish new socktop agent version"
|
||||||
|
# Usage: ./scripts/publish_socktop_agent.sh <new_version>
|
||||||
|
|
||||||
|
if [[ ${1:-} == "" ]]; then
|
||||||
|
echo "Usage: $0 <new_version>" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
NEW_VERSION="$1"
|
||||||
|
ROOT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)
|
||||||
|
CRATE_DIR="$ROOT_DIR/socktop_agent"
|
||||||
|
|
||||||
|
echo "==> Formatting socktop_agent"
|
||||||
|
(cd "$ROOT_DIR" && cargo fmt -p socktop_agent)
|
||||||
|
|
||||||
|
echo "==> Running tests for socktop_agent"
|
||||||
|
(cd "$ROOT_DIR" && cargo test -p socktop_agent)
|
||||||
|
|
||||||
|
echo "==> Running clippy (warnings as errors) for socktop_agent"
|
||||||
|
(cd "$ROOT_DIR" && cargo clippy -p socktop_agent -- -D warnings)
|
||||||
|
|
||||||
|
echo "==> Building release for socktop_agent"
|
||||||
|
(cd "$ROOT_DIR" && cargo build -p socktop_agent --release)
|
||||||
|
|
||||||
|
echo "==> Bumping version to $NEW_VERSION in socktop_agent/Cargo.toml"
|
||||||
|
sed -i.bak -E "s/^version = \"[0-9]+\.[0-9]+\.[0-9]+\"/version = \"$NEW_VERSION\"/" "$CRATE_DIR/Cargo.toml"
|
||||||
|
rm -f "$CRATE_DIR/Cargo.toml.bak"
|
||||||
|
|
||||||
|
echo "==> Committing version bump"
|
||||||
|
(cd "$ROOT_DIR" && git add -A && git commit -m "socktop_agent: bump version to $NEW_VERSION")
|
||||||
|
|
||||||
|
CURRENT_BRANCH=$(cd "$ROOT_DIR" && git rev-parse --abbrev-ref HEAD)
|
||||||
|
echo "==> Pushing to origin $CURRENT_BRANCH"
|
||||||
|
(cd "$ROOT_DIR" && git push origin "$CURRENT_BRANCH")
|
||||||
|
|
||||||
|
echo "==> Publishing socktop_agent $NEW_VERSION to crates.io"
|
||||||
|
(cd "$ROOT_DIR" && cargo publish -p socktop_agent)
|
||||||
|
|
||||||
|
echo "==> Done: socktop_agent $NEW_VERSION published"
|
||||||
|
|
||||||
@ -1,14 +1,17 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "socktop"
|
name = "socktop"
|
||||||
version = "0.1.3"
|
version = "1.50.0"
|
||||||
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
||||||
description = "Remote system monitor over WebSocket, TUI like top"
|
description = "Remote system monitor over WebSocket, TUI like top"
|
||||||
edition = "2021"
|
edition = "2024"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
readme = "README.md"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
# socktop connector for agent communication
|
||||||
|
socktop_connector = "1.50.0"
|
||||||
|
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
tokio-tungstenite = { workspace = true }
|
|
||||||
futures-util = { workspace = true }
|
futures-util = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
@ -16,17 +19,9 @@ url = { workspace = true }
|
|||||||
ratatui = { workspace = true }
|
ratatui = { workspace = true }
|
||||||
crossterm = { workspace = true }
|
crossterm = { workspace = true }
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
|
|
||||||
dirs-next = { workspace = true }
|
dirs-next = { workspace = true }
|
||||||
sysinfo = { workspace = true }
|
sysinfo = { workspace = true }
|
||||||
rustls = "0.23"
|
|
||||||
rustls-pemfile = "2.1"
|
|
||||||
prost = { workspace = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
assert_cmd = "2.0"
|
assert_cmd = "2.0"
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
prost-build = "0.13"
|
|
||||||
protoc-bin-vendored = "3"
|
|
||||||
26
socktop/README.md
Normal file
26
socktop/README.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
# socktop (client)
|
||||||
|
|
||||||
|
Minimal TUI client for the socktop remote monitoring agent.
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- Connects to a socktop_agent over WebSocket / secure WebSocket
|
||||||
|
- Displays CPU, memory, swap, disks, network, processes, (optional) GPU metrics
|
||||||
|
- Self‑signed TLS cert pinning via --tls-ca
|
||||||
|
- Profile management with saved intervals
|
||||||
|
- Low CPU usage (request-driven updates)
|
||||||
|
|
||||||
|
Quick start:
|
||||||
|
```
|
||||||
|
cargo install socktop
|
||||||
|
socktop ws://HOST:3000/ws
|
||||||
|
```
|
||||||
|
With TLS (copy agent cert first):
|
||||||
|
```
|
||||||
|
socktop --tls-ca cert.pem wss://HOST:8443/ws
|
||||||
|
```
|
||||||
|
Demo mode (spawns a local agent automatically on first run prompt):
|
||||||
|
```
|
||||||
|
socktop --demo
|
||||||
|
```
|
||||||
|
Full documentation, screenshots, and advanced usage:
|
||||||
|
https://github.com/jasonwitty/socktop
|
||||||
@ -1,14 +0,0 @@
|
|||||||
fn main() {
|
|
||||||
// Vendored protoc for reproducible builds (works on crates.io build machines)
|
|
||||||
let protoc = protoc_bin_vendored::protoc_bin_path().expect("protoc");
|
|
||||||
std::env::set_var("PROTOC", &protoc);
|
|
||||||
|
|
||||||
// Tell Cargo when to re-run
|
|
||||||
println!("cargo:rerun-if-changed=proto/processes.proto");
|
|
||||||
|
|
||||||
let mut cfg = prost_build::Config::new();
|
|
||||||
cfg.out_dir(std::env::var("OUT_DIR").unwrap());
|
|
||||||
// Use in-crate relative path so `cargo package` includes the file
|
|
||||||
cfg.compile_protos(&["proto/processes.proto"], &["proto"]) // paths relative to CARGO_MANIFEST_DIR
|
|
||||||
.expect("compile protos");
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,6 @@
|
|||||||
//! Library surface for integration tests and reuse.
|
//! Library surface for integration tests and reuse.
|
||||||
|
|
||||||
pub mod types;
|
pub mod types;
|
||||||
pub mod ws;
|
|
||||||
|
// Re-export connector functionality
|
||||||
|
pub use socktop_connector::{SocktopConnector, connect_to_socktop_agent};
|
||||||
|
|||||||
@ -3,12 +3,12 @@
|
|||||||
mod app;
|
mod app;
|
||||||
mod history;
|
mod history;
|
||||||
mod profiles;
|
mod profiles;
|
||||||
|
mod retry;
|
||||||
mod types;
|
mod types;
|
||||||
mod ui;
|
mod ui; // pure retry timing logic
|
||||||
mod ws;
|
|
||||||
|
|
||||||
use app::App;
|
use app::App;
|
||||||
use profiles::{load_profiles, save_profiles, ProfileEntry, ProfileRequest, ResolveProfile};
|
use profiles::{ProfileEntry, ProfileRequest, ResolveProfile, load_profiles, save_profiles};
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::io::{self, Write};
|
use std::io::{self, Write};
|
||||||
|
|
||||||
@ -39,7 +39,9 @@ pub(crate) fn parse_args<I: IntoIterator<Item = String>>(args: I) -> Result<Pars
|
|||||||
while let Some(arg) = it.next() {
|
while let Some(arg) = it.next() {
|
||||||
match arg.as_str() {
|
match arg.as_str() {
|
||||||
"-h" | "--help" => {
|
"-h" | "--help" => {
|
||||||
return Err(format!("Usage: {prog} [--tls-ca CERT_PEM|-t CERT_PEM] [--verify-hostname] [--profile NAME|-P NAME] [--save] [--demo] [--metrics-interval-ms N] [--processes-interval-ms N] [ws://HOST:PORT/ws]\n"));
|
return Err(format!(
|
||||||
|
"Usage: {prog} [--tls-ca CERT_PEM|-t CERT_PEM] [--verify-hostname] [--profile NAME|-P NAME] [--save] [--demo] [--metrics-interval-ms N] [--processes-interval-ms N] [ws://HOST:PORT/ws]\n"
|
||||||
|
));
|
||||||
}
|
}
|
||||||
"--tls-ca" | "-t" => {
|
"--tls-ca" | "-t" => {
|
||||||
tls_ca = it.next();
|
tls_ca = it.next();
|
||||||
@ -70,19 +72,19 @@ pub(crate) fn parse_args<I: IntoIterator<Item = String>>(args: I) -> Result<Pars
|
|||||||
processes_interval_ms = it.next().and_then(|v| v.parse().ok());
|
processes_interval_ms = it.next().and_then(|v| v.parse().ok());
|
||||||
}
|
}
|
||||||
_ if arg.starts_with("--tls-ca=") => {
|
_ if arg.starts_with("--tls-ca=") => {
|
||||||
if let Some((_, v)) = arg.split_once('=') {
|
if let Some((_, v)) = arg.split_once('=')
|
||||||
if !v.is_empty() {
|
&& !v.is_empty()
|
||||||
|
{
|
||||||
tls_ca = Some(v.to_string());
|
tls_ca = Some(v.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
_ if arg.starts_with("--profile=") => {
|
_ if arg.starts_with("--profile=") => {
|
||||||
if let Some((_, v)) = arg.split_once('=') {
|
if let Some((_, v)) = arg.split_once('=')
|
||||||
if !v.is_empty() {
|
&& !v.is_empty()
|
||||||
|
{
|
||||||
profile = Some(v.to_string());
|
profile = Some(v.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
_ if arg.starts_with("--metrics-interval-ms=") => {
|
_ if arg.starts_with("--metrics-interval-ms=") => {
|
||||||
if let Some((_, v)) = arg.split_once('=') {
|
if let Some((_, v)) = arg.split_once('=') {
|
||||||
metrics_interval_ms = v.parse().ok();
|
metrics_interval_ms = v.parse().ok();
|
||||||
@ -97,7 +99,9 @@ pub(crate) fn parse_args<I: IntoIterator<Item = String>>(args: I) -> Result<Pars
|
|||||||
if url.is_none() {
|
if url.is_none() {
|
||||||
url = Some(arg);
|
url = Some(arg);
|
||||||
} else {
|
} else {
|
||||||
return Err(format!("Unexpected argument. Usage: {prog} [--tls-ca CERT_PEM|-t CERT_PEM] [--verify-hostname] [--profile NAME|-P NAME] [--save] [--demo] [ws://HOST:PORT/ws]"));
|
return Err(format!(
|
||||||
|
"Unexpected argument. Usage: {prog} [--tls-ca CERT_PEM|-t CERT_PEM] [--verify-hostname] [--profile NAME|-P NAME] [--save] [--demo] [ws://HOST:PORT/ws]"
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -124,19 +128,24 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
//support version flag (print and exit)
|
||||||
|
if env::args().any(|a| a == "--version" || a == "-V") {
|
||||||
|
println!("socktop {}", env!("CARGO_PKG_VERSION"));
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
if parsed.demo || matches!(parsed.profile.as_deref(), Some("demo")) {
|
if parsed.demo || matches!(parsed.profile.as_deref(), Some("demo")) {
|
||||||
return run_demo_mode(parsed.tls_ca.as_deref()).await;
|
return run_demo_mode(parsed.tls_ca.as_deref()).await;
|
||||||
}
|
}
|
||||||
if parsed.verify_hostname {
|
|
||||||
// Set env var consumed by ws::connect logic
|
|
||||||
std::env::set_var("SOCKTOP_VERIFY_NAME", "1");
|
|
||||||
}
|
|
||||||
let profiles_file = load_profiles();
|
let profiles_file = load_profiles();
|
||||||
let req = ProfileRequest {
|
let req = ProfileRequest {
|
||||||
profile_name: parsed.profile.clone(),
|
profile_name: parsed.profile.clone(),
|
||||||
url: parsed.url.clone(),
|
url: parsed.url.clone(),
|
||||||
tls_ca: parsed.tls_ca.clone(),
|
tls_ca: parsed.tls_ca.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let resolved = req.resolve(&profiles_file);
|
let resolved = req.resolve(&profiles_file);
|
||||||
let mut profiles_mut = profiles_file.clone();
|
let mut profiles_mut = profiles_file.clone();
|
||||||
let (url, tls_ca, metrics_interval_ms, processes_interval_ms): (
|
let (url, tls_ca, metrics_interval_ms, processes_interval_ms): (
|
||||||
@ -229,7 +238,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
let mut line = String::new();
|
let mut line = String::new();
|
||||||
if io::stdin().read_line(&mut line).is_ok() {
|
if io::stdin().read_line(&mut line).is_ok() {
|
||||||
if let Ok(idx) = line.trim().parse::<usize>() {
|
if let Ok(idx) = line.trim().parse::<usize>() {
|
||||||
if idx >= 1 && idx <= names.len() {
|
if (1..=names.len()).contains(&idx) {
|
||||||
let name = &names[idx - 1];
|
let name = &names[idx - 1];
|
||||||
if name == "demo" {
|
if name == "demo" {
|
||||||
return run_demo_mode(parsed.tls_ca.as_deref()).await;
|
return run_demo_mode(parsed.tls_ca.as_deref()).await;
|
||||||
@ -287,7 +296,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
if profiles_mut.profiles.is_empty() && parsed.url.is_none() {
|
if profiles_mut.profiles.is_empty() && parsed.url.is_none() {
|
||||||
eprintln!("Welcome to socktop!");
|
eprintln!("Welcome to socktop!");
|
||||||
eprintln!("It looks like this is your first time running the application.");
|
eprintln!("It looks like this is your first time running the application.");
|
||||||
eprintln!("You can connect to a socktop_agent instance to monitor system metrics and processes.");
|
eprintln!(
|
||||||
|
"You can connect to a socktop_agent instance to monitor system metrics and processes."
|
||||||
|
);
|
||||||
eprintln!("If you don't have an agent running, you can try the demo mode.");
|
eprintln!("If you don't have an agent running, you can try the demo mode.");
|
||||||
if prompt_yes_no("Would you like to start the demo mode now? [Y/n]: ") {
|
if prompt_yes_no("Would you like to start the demo mode now? [Y/n]: ") {
|
||||||
return run_demo_mode(parsed.tls_ca.as_deref()).await;
|
return run_demo_mode(parsed.tls_ca.as_deref()).await;
|
||||||
@ -308,7 +319,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
if parsed.dry_run {
|
if parsed.dry_run {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
app.run(&url, tls_ca.as_deref()).await
|
app.run(&url, tls_ca.as_deref(), parsed.verify_hostname)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
fn prompt_yes_no(prompt: &str) -> bool {
|
fn prompt_yes_no(prompt: &str) -> bool {
|
||||||
@ -372,7 +384,8 @@ async fn run_demo_mode(_tls_ca: Option<&str>) -> Result<(), Box<dyn std::error::
|
|||||||
let url = format!("ws://127.0.0.1:{port}/ws");
|
let url = format!("ws://127.0.0.1:{port}/ws");
|
||||||
let child = spawn_demo_agent(port)?;
|
let child = spawn_demo_agent(port)?;
|
||||||
let mut app = App::new();
|
let mut app = App::new();
|
||||||
tokio::select! { res=app.run(&url,None)=>{ drop(child); res } _=tokio::signal::ctrl_c()=>{ drop(child); Ok(()) } }
|
// Demo mode connects to localhost, so disable hostname verification
|
||||||
|
tokio::select! { res=app.run(&url,None,false)=>{ drop(child); res } _=tokio::signal::ctrl_c()=>{ drop(child); Ok(()) } }
|
||||||
}
|
}
|
||||||
struct DemoGuard {
|
struct DemoGuard {
|
||||||
port: u16,
|
port: u16,
|
||||||
@ -404,8 +417,9 @@ fn spawn_demo_agent(port: u16) -> Result<DemoGuard, Box<dyn std::error::Error>>
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
fn find_agent_executable() -> std::path::PathBuf {
|
fn find_agent_executable() -> std::path::PathBuf {
|
||||||
if let Ok(exe) = std::env::current_exe() {
|
if let Ok(exe) = std::env::current_exe()
|
||||||
if let Some(parent) = exe.parent() {
|
&& let Some(parent) = exe.parent()
|
||||||
|
{
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
let name = "socktop_agent.exe";
|
let name = "socktop_agent.exe";
|
||||||
#[cfg(not(windows))]
|
#[cfg(not(windows))]
|
||||||
@ -415,6 +429,5 @@ fn find_agent_executable() -> std::path::PathBuf {
|
|||||||
return candidate;
|
return candidate;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
std::path::PathBuf::from("socktop_agent")
|
std::path::PathBuf::from("socktop_agent")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -77,12 +77,13 @@ impl ProfileRequest {
|
|||||||
pub fn resolve(self, pf: &ProfilesFile) -> ResolveProfile {
|
pub fn resolve(self, pf: &ProfilesFile) -> ResolveProfile {
|
||||||
// Case: only profile name given -> try load
|
// Case: only profile name given -> try load
|
||||||
if self.url.is_none() && self.profile_name.is_some() {
|
if self.url.is_none() && self.profile_name.is_some() {
|
||||||
let name = self.profile_name.unwrap();
|
let Some(name) = self.profile_name else {
|
||||||
if let Some(entry) = pf.profiles.get(&name) {
|
unreachable!("Already checked profile_name.is_some()")
|
||||||
return ResolveProfile::Loaded(entry.url.clone(), entry.tls_ca.clone());
|
};
|
||||||
} else {
|
let Some(entry) = pf.profiles.get(&name) else {
|
||||||
return ResolveProfile::PromptCreate(name);
|
return ResolveProfile::PromptCreate(name);
|
||||||
}
|
};
|
||||||
|
return ResolveProfile::Loaded(entry.url.clone(), entry.tls_ca.clone());
|
||||||
}
|
}
|
||||||
// Both provided -> direct (maybe later saved by caller)
|
// Both provided -> direct (maybe later saved by caller)
|
||||||
if let Some(u) = self.url {
|
if let Some(u) = self.url {
|
||||||
|
|||||||
114
socktop/src/retry.rs
Normal file
114
socktop/src/retry.rs
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
//! Pure retry timing logic (decoupled from App state / UI) for testability.
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
/// Result of computing retry timing.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct RetryTiming {
|
||||||
|
pub should_retry_now: bool,
|
||||||
|
/// Seconds until next retry (Some(0) means ready now); None means inactive/no countdown.
|
||||||
|
pub seconds_until_retry: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute retry timing given connection state inputs.
|
||||||
|
///
|
||||||
|
/// Inputs:
|
||||||
|
/// - `disconnected`: true when connection_state == Disconnected.
|
||||||
|
/// - `modal_active`: requires the connection error modal be visible to show countdown / trigger auto retry.
|
||||||
|
/// - `original_disconnect_time`: time we first noticed disconnect.
|
||||||
|
/// - `last_auto_retry`: time we last performed an automatic retry.
|
||||||
|
/// - `now`: current time (injected for determinism / tests).
|
||||||
|
/// - `interval`: retry interval duration.
|
||||||
|
pub(crate) fn compute_retry_timing(
|
||||||
|
disconnected: bool,
|
||||||
|
modal_active: bool,
|
||||||
|
original_disconnect_time: Option<Instant>,
|
||||||
|
last_auto_retry: Option<Instant>,
|
||||||
|
now: Instant,
|
||||||
|
interval: Duration,
|
||||||
|
) -> RetryTiming {
|
||||||
|
if !disconnected || !modal_active {
|
||||||
|
return RetryTiming {
|
||||||
|
should_retry_now: false,
|
||||||
|
seconds_until_retry: None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let baseline = match last_auto_retry.or(original_disconnect_time) {
|
||||||
|
Some(b) => b,
|
||||||
|
None => {
|
||||||
|
return RetryTiming {
|
||||||
|
should_retry_now: false,
|
||||||
|
seconds_until_retry: None,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let elapsed = now.saturating_duration_since(baseline);
|
||||||
|
if elapsed >= interval {
|
||||||
|
RetryTiming {
|
||||||
|
should_retry_now: true,
|
||||||
|
seconds_until_retry: Some(0),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let remaining = interval - elapsed;
|
||||||
|
RetryTiming {
|
||||||
|
should_retry_now: false,
|
||||||
|
seconds_until_retry: Some(remaining.as_secs()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn inactive_when_not_disconnected() {
|
||||||
|
let now = Instant::now();
|
||||||
|
let rt = compute_retry_timing(false, true, Some(now), None, now, Duration::from_secs(30));
|
||||||
|
assert!(!rt.should_retry_now);
|
||||||
|
assert_eq!(rt.seconds_until_retry, None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn countdown_progress_and_ready() {
|
||||||
|
let base = Instant::now();
|
||||||
|
let rt1 = compute_retry_timing(
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
Some(base),
|
||||||
|
None,
|
||||||
|
base + Duration::from_secs(10),
|
||||||
|
Duration::from_secs(30),
|
||||||
|
);
|
||||||
|
assert!(!rt1.should_retry_now);
|
||||||
|
assert_eq!(rt1.seconds_until_retry, Some(20));
|
||||||
|
let rt2 = compute_retry_timing(
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
Some(base),
|
||||||
|
None,
|
||||||
|
base + Duration::from_secs(30),
|
||||||
|
Duration::from_secs(30),
|
||||||
|
);
|
||||||
|
assert!(rt2.should_retry_now);
|
||||||
|
assert_eq!(rt2.seconds_until_retry, Some(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn uses_last_auto_retry_as_baseline() {
|
||||||
|
let base: Instant = Instant::now();
|
||||||
|
let last = base + Duration::from_secs(30); // one prior retry
|
||||||
|
// 10s after last retry => 20s remaining
|
||||||
|
let rt = compute_retry_timing(
|
||||||
|
true,
|
||||||
|
true,
|
||||||
|
Some(base),
|
||||||
|
Some(last),
|
||||||
|
last + Duration::from_secs(10),
|
||||||
|
Duration::from_secs(30),
|
||||||
|
);
|
||||||
|
assert!(!rt.should_retry_now);
|
||||||
|
assert_eq!(rt.seconds_until_retry, Some(20));
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,78 +1,4 @@
|
|||||||
//! Types that mirror the agent's JSON schema.
|
//! Types that mirror the agent's JSON schema.
|
||||||
|
|
||||||
use serde::Deserialize;
|
// Re-export commonly used types from socktop_connector
|
||||||
|
pub use socktop_connector::Metrics;
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub struct ProcessInfo {
|
|
||||||
pub pid: u32,
|
|
||||||
pub name: String,
|
|
||||||
pub cpu_usage: f32,
|
|
||||||
pub mem_bytes: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub struct DiskInfo {
|
|
||||||
pub name: String,
|
|
||||||
pub total: u64,
|
|
||||||
pub available: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub struct NetworkInfo {
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub name: String,
|
|
||||||
pub received: u64,
|
|
||||||
pub transmitted: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub struct GpuInfo {
|
|
||||||
pub name: Option<String>,
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub vendor: Option<String>,
|
|
||||||
|
|
||||||
// Accept both the new and legacy keys
|
|
||||||
#[serde(
|
|
||||||
default,
|
|
||||||
alias = "utilization_gpu_pct",
|
|
||||||
alias = "gpu_util_pct",
|
|
||||||
alias = "gpu_utilization"
|
|
||||||
)]
|
|
||||||
pub utilization: Option<f32>,
|
|
||||||
|
|
||||||
#[serde(default, alias = "mem_used_bytes", alias = "vram_used_bytes")]
|
|
||||||
pub mem_used: Option<u64>,
|
|
||||||
|
|
||||||
#[serde(default, alias = "mem_total_bytes", alias = "vram_total_bytes")]
|
|
||||||
pub mem_total: Option<u64>,
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
#[serde(default, alias = "temp_c", alias = "temperature_c")]
|
|
||||||
pub temperature: Option<f32>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub struct Metrics {
|
|
||||||
pub cpu_total: f32,
|
|
||||||
pub cpu_per_core: Vec<f32>,
|
|
||||||
pub mem_total: u64,
|
|
||||||
pub mem_used: u64,
|
|
||||||
pub swap_total: u64,
|
|
||||||
pub swap_used: u64,
|
|
||||||
pub hostname: String,
|
|
||||||
pub cpu_temp_c: Option<f32>,
|
|
||||||
pub disks: Vec<DiskInfo>,
|
|
||||||
pub networks: Vec<NetworkInfo>,
|
|
||||||
pub top_processes: Vec<ProcessInfo>,
|
|
||||||
pub gpus: Option<Vec<GpuInfo>>,
|
|
||||||
// New: keep the last reported total process count
|
|
||||||
#[serde(default)]
|
|
||||||
pub process_count: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
|
||||||
pub struct ProcessesPayload {
|
|
||||||
pub process_count: usize,
|
|
||||||
pub top_processes: Vec<ProcessInfo>,
|
|
||||||
}
|
|
||||||
|
|||||||
1849
socktop/src/ui/.modal.rs.backup
Normal file
1849
socktop/src/ui/.modal.rs.backup
Normal file
File diff suppressed because it is too large
Load Diff
@ -42,8 +42,8 @@ pub fn per_core_content_area(area: Rect) -> Rect {
|
|||||||
/// Handles key events for per-core CPU bars.
|
/// Handles key events for per-core CPU bars.
|
||||||
pub fn per_core_handle_key(scroll_offset: &mut usize, key: KeyEvent, page_size: usize) {
|
pub fn per_core_handle_key(scroll_offset: &mut usize, key: KeyEvent, page_size: usize) {
|
||||||
match key.code {
|
match key.code {
|
||||||
KeyCode::Up => *scroll_offset = scroll_offset.saturating_sub(1),
|
KeyCode::Left => *scroll_offset = scroll_offset.saturating_sub(1),
|
||||||
KeyCode::Down => *scroll_offset = scroll_offset.saturating_add(1),
|
KeyCode::Right => *scroll_offset = scroll_offset.saturating_add(1),
|
||||||
KeyCode::PageUp => {
|
KeyCode::PageUp => {
|
||||||
let step = page_size.max(1);
|
let step = page_size.max(1);
|
||||||
*scroll_offset = scroll_offset.saturating_sub(step);
|
*scroll_offset = scroll_offset.saturating_sub(step);
|
||||||
@ -180,8 +180,9 @@ pub fn per_core_handle_scrollbar_mouse(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
MouseEventKind::Drag(MouseButton::Left) => {
|
MouseEventKind::Drag(MouseButton::Left) => {
|
||||||
if let Some(mut d) = drag.take() {
|
if let Some(mut d) = drag.take()
|
||||||
if d.active {
|
&& d.active
|
||||||
|
{
|
||||||
let dy = (mouse.row as i32) - (d.start_y as i32);
|
let dy = (mouse.row as i32) - (d.start_y as i32);
|
||||||
let new_top = (d.start_top as i32 + dy)
|
let new_top = (d.start_top as i32 + dy)
|
||||||
.clamp(0, (track.saturating_sub(thumb_len)) as i32)
|
.clamp(0, (track.saturating_sub(thumb_len)) as i32)
|
||||||
@ -203,7 +204,6 @@ pub fn per_core_handle_scrollbar_mouse(
|
|||||||
*drag = Some(d);
|
*drag = Some(d);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
MouseEventKind::Up(MouseButton::Left) => {
|
MouseEventKind::Up(MouseButton::Left) => {
|
||||||
// End drag
|
// End drag
|
||||||
*drag = None;
|
*drag = None;
|
||||||
@ -240,20 +240,61 @@ pub fn draw_cpu_avg_graph(
|
|||||||
hist: &std::collections::VecDeque<u64>,
|
hist: &std::collections::VecDeque<u64>,
|
||||||
m: Option<&Metrics>,
|
m: Option<&Metrics>,
|
||||||
) {
|
) {
|
||||||
|
// Calculate average CPU over the monitoring period
|
||||||
|
let avg_cpu = if !hist.is_empty() {
|
||||||
|
let sum: u64 = hist.iter().sum();
|
||||||
|
sum as f64 / hist.len() as f64
|
||||||
|
} else {
|
||||||
|
0.0
|
||||||
|
};
|
||||||
|
|
||||||
let title = if let Some(mm) = m {
|
let title = if let Some(mm) = m {
|
||||||
format!("CPU avg (now: {:>5.1}%)", mm.cpu_total)
|
format!("CPU (now: {:>5.1}% | avg: {:>5.1}%)", mm.cpu_total, avg_cpu)
|
||||||
} else {
|
} else {
|
||||||
"CPU avg".into()
|
"CPU avg".into()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Build the top-right info (CPU temp and polling intervals)
|
||||||
|
let top_right_info = if let Some(mm) = m {
|
||||||
|
mm.cpu_temp_c
|
||||||
|
.map(|t| {
|
||||||
|
let icon = if t < 50.0 {
|
||||||
|
"😎"
|
||||||
|
} else if t < 85.0 {
|
||||||
|
"⚠️"
|
||||||
|
} else {
|
||||||
|
"🔥"
|
||||||
|
};
|
||||||
|
format!("CPU Temp: {t:.1}°C {icon}")
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| "CPU Temp: N/A".into())
|
||||||
|
} else {
|
||||||
|
String::new()
|
||||||
|
};
|
||||||
|
|
||||||
let max_points = area.width.saturating_sub(2) as usize;
|
let max_points = area.width.saturating_sub(2) as usize;
|
||||||
let start = hist.len().saturating_sub(max_points);
|
let start = hist.len().saturating_sub(max_points);
|
||||||
let data: Vec<u64> = hist.iter().skip(start).cloned().collect();
|
let data: Vec<u64> = hist.iter().skip(start).cloned().collect();
|
||||||
|
|
||||||
|
// Render the sparkline with title on left
|
||||||
let spark = Sparkline::default()
|
let spark = Sparkline::default()
|
||||||
.block(Block::default().borders(Borders::ALL).title(title))
|
.block(Block::default().borders(Borders::ALL).title(title))
|
||||||
.data(&data)
|
.data(&data)
|
||||||
.max(100)
|
.max(100)
|
||||||
.style(Style::default().fg(Color::Cyan));
|
.style(Style::default().fg(Color::Cyan));
|
||||||
f.render_widget(spark, area);
|
f.render_widget(spark, area);
|
||||||
|
|
||||||
|
// Render the top-right info as text overlay in the top-right corner
|
||||||
|
if !top_right_info.is_empty() {
|
||||||
|
let info_area = Rect {
|
||||||
|
x: area.x + area.width.saturating_sub(top_right_info.len() as u16 + 2),
|
||||||
|
y: area.y,
|
||||||
|
width: top_right_info.len() as u16 + 1,
|
||||||
|
height: 1,
|
||||||
|
};
|
||||||
|
let info_line = Line::from(Span::raw(top_right_info));
|
||||||
|
f.render_widget(Paragraph::new(info_line), info_area);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Draws the per-core CPU bars with sparklines and trends.
|
/// Draws the per-core CPU bars with sparklines and trends.
|
||||||
|
|||||||
@ -24,8 +24,16 @@ pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Filter duplicates by keeping first occurrence of each unique name
|
||||||
|
let mut seen_names = std::collections::HashSet::new();
|
||||||
|
let unique_disks: Vec<_> = mm
|
||||||
|
.disks
|
||||||
|
.iter()
|
||||||
|
.filter(|d| seen_names.insert(d.name.clone()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
let per_disk_h = 3u16;
|
let per_disk_h = 3u16;
|
||||||
let max_cards = (inner.height / per_disk_h).min(mm.disks.len() as u16) as usize;
|
let max_cards = (inner.height / per_disk_h).min(unique_disks.len() as u16) as usize;
|
||||||
|
|
||||||
let constraints: Vec<Constraint> = (0..max_cards)
|
let constraints: Vec<Constraint> = (0..max_cards)
|
||||||
.map(|_| Constraint::Length(per_disk_h))
|
.map(|_| Constraint::Length(per_disk_h))
|
||||||
@ -36,7 +44,7 @@ pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
|
|||||||
.split(inner);
|
.split(inner);
|
||||||
|
|
||||||
for (i, slot) in rows.iter().enumerate() {
|
for (i, slot) in rows.iter().enumerate() {
|
||||||
let d = &mm.disks[i];
|
let d = unique_disks[i];
|
||||||
let used = d.total.saturating_sub(d.available);
|
let used = d.total.saturating_sub(d.available);
|
||||||
let ratio = if d.total > 0 {
|
let ratio = if d.total > 0 {
|
||||||
used as f64 / d.total as f64
|
used as f64 / d.total as f64
|
||||||
@ -53,23 +61,43 @@ pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
|
|||||||
ratatui::style::Color::Red
|
ratatui::style::Color::Red
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Add indentation for partitions
|
||||||
|
let indent = if d.is_partition { "└─" } else { "" };
|
||||||
|
|
||||||
|
// Add temperature if available
|
||||||
|
let temp_str = d
|
||||||
|
.temperature
|
||||||
|
.map(|t| format!(" {}°C", t.round() as i32))
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
let title = format!(
|
let title = format!(
|
||||||
"{} {} {} / {} ({}%)",
|
"{}{}{}{} {} / {} ({}%)",
|
||||||
|
indent,
|
||||||
disk_icon(&d.name),
|
disk_icon(&d.name),
|
||||||
truncate_middle(&d.name, (slot.width.saturating_sub(6)) as usize / 2),
|
truncate_middle(&d.name, (slot.width.saturating_sub(6)) as usize / 2),
|
||||||
|
temp_str,
|
||||||
human(used),
|
human(used),
|
||||||
human(d.total),
|
human(d.total),
|
||||||
pct
|
pct
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Indent the entire card (block) for partitions to align with └─ prefix (4 chars)
|
||||||
|
let card_indent = if d.is_partition { 4 } else { 0 };
|
||||||
|
let card_rect = Rect {
|
||||||
|
x: slot.x + card_indent,
|
||||||
|
y: slot.y,
|
||||||
|
width: slot.width.saturating_sub(card_indent),
|
||||||
|
height: slot.height,
|
||||||
|
};
|
||||||
|
|
||||||
let card = Block::default().borders(Borders::ALL).title(title);
|
let card = Block::default().borders(Borders::ALL).title(title);
|
||||||
f.render_widget(card, *slot);
|
f.render_widget(card, card_rect);
|
||||||
|
|
||||||
let inner_card = Rect {
|
let inner_card = Rect {
|
||||||
x: slot.x + 1,
|
x: card_rect.x + 1,
|
||||||
y: slot.y + 1,
|
y: card_rect.y + 1,
|
||||||
width: slot.width.saturating_sub(2),
|
width: card_rect.width.saturating_sub(2),
|
||||||
height: slot.height.saturating_sub(2),
|
height: card_rect.height.saturating_sub(2),
|
||||||
};
|
};
|
||||||
if inner_card.height == 0 {
|
if inner_card.height == 0 {
|
||||||
continue;
|
continue;
|
||||||
|
|||||||
@ -3,7 +3,8 @@
|
|||||||
use crate::types::Metrics;
|
use crate::types::Metrics;
|
||||||
use ratatui::{
|
use ratatui::{
|
||||||
layout::Rect,
|
layout::Rect,
|
||||||
widgets::{Block, Borders},
|
text::{Line, Span},
|
||||||
|
widgets::{Block, Borders, Paragraph},
|
||||||
};
|
};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
@ -17,20 +18,7 @@ pub fn draw_header(
|
|||||||
procs_interval: Duration,
|
procs_interval: Duration,
|
||||||
) {
|
) {
|
||||||
let base = if let Some(mm) = m {
|
let base = if let Some(mm) = m {
|
||||||
let temp = mm
|
format!("socktop — host: {}", mm.hostname)
|
||||||
.cpu_temp_c
|
|
||||||
.map(|t| {
|
|
||||||
let icon = if t < 50.0 {
|
|
||||||
"😎"
|
|
||||||
} else if t < 85.0 {
|
|
||||||
"⚠️"
|
|
||||||
} else {
|
|
||||||
"🔥"
|
|
||||||
};
|
|
||||||
format!("CPU Temp: {t:.1}°C {icon}")
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|| "CPU Temp: N/A".into());
|
|
||||||
format!("socktop — host: {} | {}", mm.hostname, temp)
|
|
||||||
} else {
|
} else {
|
||||||
"socktop — connecting...".into()
|
"socktop — connecting...".into()
|
||||||
};
|
};
|
||||||
@ -38,15 +26,30 @@ pub fn draw_header(
|
|||||||
let tls_txt = if is_tls { "🔒 TLS" } else { "🔒✗ TLS" };
|
let tls_txt = if is_tls { "🔒 TLS" } else { "🔒✗ TLS" };
|
||||||
// Token indicator
|
// Token indicator
|
||||||
let tok_txt = if has_token { "🔑 token" } else { "" };
|
let tok_txt = if has_token { "🔑 token" } else { "" };
|
||||||
let mi = metrics_interval.as_millis();
|
|
||||||
let pi = procs_interval.as_millis();
|
|
||||||
let intervals = format!("⏱ {mi}ms metrics | {pi}ms procs");
|
|
||||||
let mut parts = vec![base, tls_txt.into()];
|
let mut parts = vec![base, tls_txt.into()];
|
||||||
if !tok_txt.is_empty() {
|
if !tok_txt.is_empty() {
|
||||||
parts.push(tok_txt.into());
|
parts.push(tok_txt.into());
|
||||||
}
|
}
|
||||||
parts.push(intervals);
|
parts.push("(a: about, h: help, q: quit)".into());
|
||||||
parts.push("(q to quit)".into());
|
|
||||||
let title = parts.join(" | ");
|
let title = parts.join(" | ");
|
||||||
|
|
||||||
|
// Render the block with left-aligned title
|
||||||
f.render_widget(Block::default().title(title).borders(Borders::BOTTOM), area);
|
f.render_widget(Block::default().title(title).borders(Borders::BOTTOM), area);
|
||||||
|
|
||||||
|
// Render polling intervals on the right side
|
||||||
|
let mi = metrics_interval.as_millis();
|
||||||
|
let pi = procs_interval.as_millis();
|
||||||
|
let intervals = format!("⏱ {mi}ms metrics | {pi}ms procs");
|
||||||
|
let intervals_width = intervals.len() as u16;
|
||||||
|
|
||||||
|
if area.width > intervals_width + 2 {
|
||||||
|
let right_area = Rect {
|
||||||
|
x: area.x + area.width.saturating_sub(intervals_width + 1),
|
||||||
|
y: area.y,
|
||||||
|
width: intervals_width,
|
||||||
|
height: 1,
|
||||||
|
};
|
||||||
|
let intervals_line = Line::from(Span::raw(intervals));
|
||||||
|
f.render_widget(Paragraph::new(intervals_line), right_area);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,6 +5,11 @@ pub mod disks;
|
|||||||
pub mod gpu;
|
pub mod gpu;
|
||||||
pub mod header;
|
pub mod header;
|
||||||
pub mod mem;
|
pub mod mem;
|
||||||
|
pub mod modal;
|
||||||
|
pub mod modal_connection;
|
||||||
|
pub mod modal_format;
|
||||||
|
pub mod modal_process;
|
||||||
|
pub mod modal_types;
|
||||||
pub mod net;
|
pub mod net;
|
||||||
pub mod processes;
|
pub mod processes;
|
||||||
pub mod swap;
|
pub mod swap;
|
||||||
|
|||||||
634
socktop/src/ui/modal.rs
Normal file
634
socktop/src/ui/modal.rs
Normal file
@ -0,0 +1,634 @@
|
|||||||
|
//! Modal window system for socktop TUI application
|
||||||
|
|
||||||
|
use super::theme::MODAL_DIM_BG;
|
||||||
|
use crossterm::event::KeyCode;
|
||||||
|
use ratatui::{
|
||||||
|
Frame,
|
||||||
|
layout::{Alignment, Constraint, Direction, Layout, Rect},
|
||||||
|
style::{Color, Modifier, Style},
|
||||||
|
text::Line,
|
||||||
|
widgets::{Block, Borders, Clear, Paragraph, Wrap},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Re-export types from modal_types
|
||||||
|
pub use super::modal_types::{
|
||||||
|
ModalAction, ModalButton, ModalType, ProcessHistoryData, ProcessModalData,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ModalManager {
|
||||||
|
stack: Vec<ModalType>,
|
||||||
|
pub(super) active_button: ModalButton,
|
||||||
|
pub thread_scroll_offset: usize,
|
||||||
|
pub journal_scroll_offset: usize,
|
||||||
|
pub thread_scroll_max: usize,
|
||||||
|
pub journal_scroll_max: usize,
|
||||||
|
pub help_scroll_offset: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ModalManager {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
stack: Vec::new(),
|
||||||
|
active_button: ModalButton::Retry,
|
||||||
|
thread_scroll_offset: 0,
|
||||||
|
journal_scroll_offset: 0,
|
||||||
|
thread_scroll_max: 0,
|
||||||
|
journal_scroll_max: 0,
|
||||||
|
help_scroll_offset: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn is_active(&self) -> bool {
|
||||||
|
!self.stack.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn current_modal(&self) -> Option<&ModalType> {
|
||||||
|
self.stack.last()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn push_modal(&mut self, modal: ModalType) {
|
||||||
|
self.stack.push(modal);
|
||||||
|
self.active_button = match self.stack.last() {
|
||||||
|
Some(ModalType::ConnectionError { .. }) => ModalButton::Retry,
|
||||||
|
Some(ModalType::ProcessDetails { .. }) => {
|
||||||
|
// Reset scroll state for new process details
|
||||||
|
self.thread_scroll_offset = 0;
|
||||||
|
self.journal_scroll_offset = 0;
|
||||||
|
self.thread_scroll_max = 0;
|
||||||
|
self.journal_scroll_max = 0;
|
||||||
|
ModalButton::Ok
|
||||||
|
}
|
||||||
|
Some(ModalType::About) => ModalButton::Ok,
|
||||||
|
Some(ModalType::Help) => {
|
||||||
|
// Reset scroll state for help modal
|
||||||
|
self.help_scroll_offset = 0;
|
||||||
|
ModalButton::Ok
|
||||||
|
}
|
||||||
|
Some(ModalType::Confirmation { .. }) => ModalButton::Confirm,
|
||||||
|
Some(ModalType::Info { .. }) => ModalButton::Ok,
|
||||||
|
None => ModalButton::Ok,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
pub fn pop_modal(&mut self) -> Option<ModalType> {
|
||||||
|
let m = self.stack.pop();
|
||||||
|
if let Some(next) = self.stack.last() {
|
||||||
|
self.active_button = match next {
|
||||||
|
ModalType::ConnectionError { .. } => ModalButton::Retry,
|
||||||
|
ModalType::ProcessDetails { .. } => ModalButton::Ok,
|
||||||
|
ModalType::About => ModalButton::Ok,
|
||||||
|
ModalType::Help => ModalButton::Ok,
|
||||||
|
ModalType::Confirmation { .. } => ModalButton::Confirm,
|
||||||
|
ModalType::Info { .. } => ModalButton::Ok,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
m
|
||||||
|
}
|
||||||
|
pub fn update_connection_error_countdown(&mut self, new_countdown: Option<u64>) {
|
||||||
|
if let Some(ModalType::ConnectionError {
|
||||||
|
auto_retry_countdown,
|
||||||
|
..
|
||||||
|
}) = self.stack.last_mut()
|
||||||
|
{
|
||||||
|
*auto_retry_countdown = new_countdown;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn handle_key(&mut self, key: KeyCode) -> ModalAction {
|
||||||
|
if !self.is_active() {
|
||||||
|
return ModalAction::None;
|
||||||
|
}
|
||||||
|
match key {
|
||||||
|
KeyCode::Esc => {
|
||||||
|
self.pop_modal();
|
||||||
|
ModalAction::Cancel
|
||||||
|
}
|
||||||
|
KeyCode::Enter => self.handle_enter(),
|
||||||
|
KeyCode::Tab | KeyCode::Right => {
|
||||||
|
self.next_button();
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
KeyCode::BackTab | KeyCode::Left => {
|
||||||
|
self.prev_button();
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
KeyCode::Char('r') | KeyCode::Char('R') => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::ConnectionError { .. })) {
|
||||||
|
ModalAction::RetryConnection
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Char('q') | KeyCode::Char('Q') => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::ConnectionError { .. })) {
|
||||||
|
ModalAction::ExitApp
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Char('x') | KeyCode::Char('X') => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||||
|
// Close all ProcessDetails modals at once (handles parent navigation chain)
|
||||||
|
while matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||||
|
self.pop_modal();
|
||||||
|
}
|
||||||
|
ModalAction::Dismiss
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Char('j') | KeyCode::Char('J') => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||||
|
self.thread_scroll_offset = self
|
||||||
|
.thread_scroll_offset
|
||||||
|
.saturating_add(1)
|
||||||
|
.min(self.thread_scroll_max);
|
||||||
|
ModalAction::Handled
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Char('k') | KeyCode::Char('K') => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||||
|
self.thread_scroll_offset = self.thread_scroll_offset.saturating_sub(1);
|
||||||
|
ModalAction::Handled
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Char('d') | KeyCode::Char('D') => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||||
|
self.thread_scroll_offset = self
|
||||||
|
.thread_scroll_offset
|
||||||
|
.saturating_add(10)
|
||||||
|
.min(self.thread_scroll_max);
|
||||||
|
ModalAction::Handled
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Char('u') | KeyCode::Char('U') => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||||
|
self.thread_scroll_offset = self.thread_scroll_offset.saturating_sub(10);
|
||||||
|
ModalAction::Handled
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Char('[') => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||||
|
self.journal_scroll_offset = self.journal_scroll_offset.saturating_sub(1);
|
||||||
|
ModalAction::Handled
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Char(']') => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||||
|
self.journal_scroll_offset = self
|
||||||
|
.journal_scroll_offset
|
||||||
|
.saturating_add(1)
|
||||||
|
.min(self.journal_scroll_max);
|
||||||
|
ModalAction::Handled
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Char('p') | KeyCode::Char('P') => {
|
||||||
|
// Switch to parent process if it exists
|
||||||
|
if let Some(ModalType::ProcessDetails { pid }) = self.stack.last() {
|
||||||
|
// We need to get the parent PID from the process details
|
||||||
|
// For now, return a special action that the app can handle
|
||||||
|
// The app has access to the process details and can extract parent_pid
|
||||||
|
ModalAction::SwitchToParentProcess(*pid)
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Up => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::Help)) {
|
||||||
|
self.help_scroll_offset = self.help_scroll_offset.saturating_sub(1);
|
||||||
|
ModalAction::Handled
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Down => {
|
||||||
|
if matches!(self.stack.last(), Some(ModalType::Help)) {
|
||||||
|
self.help_scroll_offset = self.help_scroll_offset.saturating_add(1);
|
||||||
|
ModalAction::Handled
|
||||||
|
} else {
|
||||||
|
ModalAction::None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => ModalAction::None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn handle_enter(&mut self) -> ModalAction {
|
||||||
|
match (&self.stack.last(), &self.active_button) {
|
||||||
|
(Some(ModalType::ConnectionError { .. }), ModalButton::Retry) => {
|
||||||
|
ModalAction::RetryConnection
|
||||||
|
}
|
||||||
|
(Some(ModalType::ConnectionError { .. }), ModalButton::Exit) => ModalAction::ExitApp,
|
||||||
|
(Some(ModalType::ProcessDetails { .. }), ModalButton::Ok) => {
|
||||||
|
self.pop_modal();
|
||||||
|
ModalAction::Dismiss
|
||||||
|
}
|
||||||
|
(Some(ModalType::About), ModalButton::Ok) => {
|
||||||
|
self.pop_modal();
|
||||||
|
ModalAction::Dismiss
|
||||||
|
}
|
||||||
|
(Some(ModalType::Help), ModalButton::Ok) => {
|
||||||
|
self.pop_modal();
|
||||||
|
ModalAction::Dismiss
|
||||||
|
}
|
||||||
|
(Some(ModalType::Confirmation { .. }), ModalButton::Confirm) => ModalAction::Confirm,
|
||||||
|
(Some(ModalType::Confirmation { .. }), ModalButton::Cancel) => ModalAction::Cancel,
|
||||||
|
(Some(ModalType::Info { .. }), ModalButton::Ok) => {
|
||||||
|
self.pop_modal();
|
||||||
|
ModalAction::Dismiss
|
||||||
|
}
|
||||||
|
_ => ModalAction::None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn next_button(&mut self) {
|
||||||
|
self.active_button = match (&self.stack.last(), &self.active_button) {
|
||||||
|
(Some(ModalType::ConnectionError { .. }), ModalButton::Retry) => ModalButton::Exit,
|
||||||
|
(Some(ModalType::ConnectionError { .. }), ModalButton::Exit) => ModalButton::Retry,
|
||||||
|
(Some(ModalType::Confirmation { .. }), ModalButton::Confirm) => ModalButton::Cancel,
|
||||||
|
(Some(ModalType::Confirmation { .. }), ModalButton::Cancel) => ModalButton::Confirm,
|
||||||
|
_ => self.active_button.clone(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
fn prev_button(&mut self) {
|
||||||
|
self.next_button();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn render(&mut self, f: &mut Frame, data: ProcessModalData) {
|
||||||
|
if let Some(m) = self.stack.last().cloned() {
|
||||||
|
self.render_background_dim(f);
|
||||||
|
self.render_modal_content(f, &m, data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_background_dim(&self, f: &mut Frame) {
|
||||||
|
let area = f.area();
|
||||||
|
f.render_widget(Clear, area);
|
||||||
|
f.render_widget(
|
||||||
|
Block::default()
|
||||||
|
.style(Style::default().bg(MODAL_DIM_BG).fg(MODAL_DIM_BG))
|
||||||
|
.borders(Borders::NONE),
|
||||||
|
area,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_modal_content(&mut self, f: &mut Frame, modal: &ModalType, data: ProcessModalData) {
|
||||||
|
let area = f.area();
|
||||||
|
// Different sizes for different modal types
|
||||||
|
let modal_area = match modal {
|
||||||
|
ModalType::ProcessDetails { .. } => {
|
||||||
|
// Process details modal uses almost full screen (95% width, 90% height)
|
||||||
|
self.centered_rect(95, 90, area)
|
||||||
|
}
|
||||||
|
ModalType::About => {
|
||||||
|
// About modal uses medium size
|
||||||
|
self.centered_rect(90, 90, area)
|
||||||
|
}
|
||||||
|
ModalType::Help => {
|
||||||
|
// Help modal uses medium size
|
||||||
|
self.centered_rect(70, 80, area)
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
// Other modals use smaller size
|
||||||
|
self.centered_rect(70, 50, area)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
f.render_widget(Clear, modal_area);
|
||||||
|
match modal {
|
||||||
|
ModalType::ConnectionError {
|
||||||
|
message,
|
||||||
|
disconnected_at,
|
||||||
|
retry_count,
|
||||||
|
auto_retry_countdown,
|
||||||
|
} => self.render_connection_error(
|
||||||
|
f,
|
||||||
|
modal_area,
|
||||||
|
message,
|
||||||
|
*disconnected_at,
|
||||||
|
*retry_count,
|
||||||
|
*auto_retry_countdown,
|
||||||
|
),
|
||||||
|
ModalType::ProcessDetails { pid } => {
|
||||||
|
self.render_process_details(f, modal_area, *pid, data)
|
||||||
|
}
|
||||||
|
ModalType::About => self.render_about(f, modal_area),
|
||||||
|
ModalType::Help => self.render_help(f, modal_area),
|
||||||
|
ModalType::Confirmation {
|
||||||
|
title,
|
||||||
|
message,
|
||||||
|
confirm_text,
|
||||||
|
cancel_text,
|
||||||
|
} => self.render_confirmation(f, modal_area, title, message, confirm_text, cancel_text),
|
||||||
|
ModalType::Info { title, message } => self.render_info(f, modal_area, title, message),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_confirmation(
|
||||||
|
&self,
|
||||||
|
f: &mut Frame,
|
||||||
|
area: Rect,
|
||||||
|
title: &str,
|
||||||
|
message: &str,
|
||||||
|
confirm_text: &str,
|
||||||
|
cancel_text: &str,
|
||||||
|
) {
|
||||||
|
let chunks = Layout::default()
|
||||||
|
.direction(Direction::Vertical)
|
||||||
|
.constraints([Constraint::Min(1), Constraint::Length(3)])
|
||||||
|
.split(area);
|
||||||
|
let block = Block::default()
|
||||||
|
.title(format!(" {title} "))
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.style(Style::default().bg(Color::Black));
|
||||||
|
f.render_widget(block, area);
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(message)
|
||||||
|
.style(Style::default().fg(Color::White))
|
||||||
|
.alignment(Alignment::Center)
|
||||||
|
.wrap(Wrap { trim: true }),
|
||||||
|
chunks[0],
|
||||||
|
);
|
||||||
|
let buttons = Layout::default()
|
||||||
|
.direction(Direction::Horizontal)
|
||||||
|
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)])
|
||||||
|
.split(chunks[1]);
|
||||||
|
let confirm_style = if self.active_button == ModalButton::Confirm {
|
||||||
|
Style::default()
|
||||||
|
.bg(Color::Green)
|
||||||
|
.fg(Color::Black)
|
||||||
|
.add_modifier(Modifier::BOLD)
|
||||||
|
} else {
|
||||||
|
Style::default().fg(Color::Green)
|
||||||
|
};
|
||||||
|
let cancel_style = if self.active_button == ModalButton::Cancel {
|
||||||
|
Style::default()
|
||||||
|
.bg(Color::Red)
|
||||||
|
.fg(Color::Black)
|
||||||
|
.add_modifier(Modifier::BOLD)
|
||||||
|
} else {
|
||||||
|
Style::default().fg(Color::Red)
|
||||||
|
};
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(confirm_text)
|
||||||
|
.style(confirm_style)
|
||||||
|
.alignment(Alignment::Center),
|
||||||
|
buttons[0],
|
||||||
|
);
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(cancel_text)
|
||||||
|
.style(cancel_style)
|
||||||
|
.alignment(Alignment::Center),
|
||||||
|
buttons[1],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_info(&self, f: &mut Frame, area: Rect, title: &str, message: &str) {
|
||||||
|
let chunks = Layout::default()
|
||||||
|
.direction(Direction::Vertical)
|
||||||
|
.constraints([Constraint::Min(1), Constraint::Length(3)])
|
||||||
|
.split(area);
|
||||||
|
let block = Block::default()
|
||||||
|
.title(format!(" {title} "))
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.style(Style::default().bg(Color::Black));
|
||||||
|
f.render_widget(block, area);
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(message)
|
||||||
|
.style(Style::default().fg(Color::White))
|
||||||
|
.alignment(Alignment::Center)
|
||||||
|
.wrap(Wrap { trim: true }),
|
||||||
|
chunks[0],
|
||||||
|
);
|
||||||
|
let ok_style = if self.active_button == ModalButton::Ok {
|
||||||
|
Style::default()
|
||||||
|
.bg(Color::Blue)
|
||||||
|
.fg(Color::White)
|
||||||
|
.add_modifier(Modifier::BOLD)
|
||||||
|
} else {
|
||||||
|
Style::default().fg(Color::Blue)
|
||||||
|
};
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new("[ Enter ] OK")
|
||||||
|
.style(ok_style)
|
||||||
|
.alignment(Alignment::Center),
|
||||||
|
chunks[1],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_about(&self, f: &mut Frame, area: Rect) {
|
||||||
|
//get ASCII art from a constant stored in theme.rs
|
||||||
|
use super::theme::ASCII_ART;
|
||||||
|
|
||||||
|
let version = env!("CARGO_PKG_VERSION");
|
||||||
|
|
||||||
|
let about_text = format!(
|
||||||
|
"{}\n\
|
||||||
|
Version {}\n\
|
||||||
|
\n\
|
||||||
|
A terminal first remote monitoring tool\n\
|
||||||
|
\n\
|
||||||
|
Website: https://socktop.io\n\
|
||||||
|
GitHub: https://github.com/jasonwitty/socktop\n\
|
||||||
|
\n\
|
||||||
|
License: MIT License\n\
|
||||||
|
\n\
|
||||||
|
Created by Jason Witty\n\
|
||||||
|
jasonpwitty+socktop@proton.me",
|
||||||
|
ASCII_ART, version
|
||||||
|
);
|
||||||
|
|
||||||
|
// Render the border block
|
||||||
|
let block = Block::default()
|
||||||
|
.title(" About socktop ")
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.style(Style::default().bg(Color::Black).fg(Color::DarkGray));
|
||||||
|
f.render_widget(block, area);
|
||||||
|
|
||||||
|
// Calculate inner area manually to avoid any parent styling
|
||||||
|
let inner_area = Rect {
|
||||||
|
x: area.x + 1,
|
||||||
|
y: area.y + 1,
|
||||||
|
width: area.width.saturating_sub(2),
|
||||||
|
height: area.height.saturating_sub(2), // Leave room for button at bottom
|
||||||
|
};
|
||||||
|
|
||||||
|
// Render content area with explicit black background
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(about_text)
|
||||||
|
.style(Style::default().fg(Color::Cyan).bg(Color::Black))
|
||||||
|
.alignment(Alignment::Center)
|
||||||
|
.wrap(Wrap { trim: false }),
|
||||||
|
inner_area,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Button area
|
||||||
|
let button_area = Rect {
|
||||||
|
x: area.x + 1,
|
||||||
|
y: area.y + area.height.saturating_sub(2),
|
||||||
|
width: area.width.saturating_sub(2),
|
||||||
|
height: 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
let ok_style = if self.active_button == ModalButton::Ok {
|
||||||
|
Style::default()
|
||||||
|
.bg(Color::Blue)
|
||||||
|
.fg(Color::White)
|
||||||
|
.add_modifier(Modifier::BOLD)
|
||||||
|
} else {
|
||||||
|
Style::default().fg(Color::Blue).bg(Color::Black)
|
||||||
|
};
|
||||||
|
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new("[ Enter ] Close")
|
||||||
|
.style(ok_style)
|
||||||
|
.alignment(Alignment::Center),
|
||||||
|
button_area,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_help(&self, f: &mut Frame, area: Rect) {
|
||||||
|
let help_lines = vec![
|
||||||
|
"GLOBAL",
|
||||||
|
" q/Q/Esc ........ Quit │ a/A ....... About │ h/H ....... Help",
|
||||||
|
"",
|
||||||
|
"PROCESS LIST",
|
||||||
|
" / .............. Start/edit fuzzy search",
|
||||||
|
" c/C ............ Clear search filter",
|
||||||
|
" ↑/↓ ............ Select/navigate processes",
|
||||||
|
" Enter .......... Open Process Details",
|
||||||
|
" x/X ............ Clear selection",
|
||||||
|
" Click header ... Sort by column (CPU/Mem)",
|
||||||
|
" Click row ...... Select process",
|
||||||
|
"",
|
||||||
|
"SEARCH MODE (after pressing /)",
|
||||||
|
" Type ........... Enter search query (fuzzy match)",
|
||||||
|
" ↑/↓ ............ Navigate results while typing",
|
||||||
|
" Esc ............ Cancel search and clear filter",
|
||||||
|
" Enter .......... Apply filter and select first result",
|
||||||
|
"",
|
||||||
|
"CPU PER-CORE",
|
||||||
|
" ←/→ ............ Scroll cores │ PgUp/PgDn ... Page up/down",
|
||||||
|
" Home/End ....... Jump to first/last core",
|
||||||
|
"",
|
||||||
|
"PROCESS DETAILS MODAL",
|
||||||
|
" x/X ............ Close modal (all parent modals)",
|
||||||
|
" p/P ............ Navigate to parent process",
|
||||||
|
" j/k ............ Scroll threads ↓/↑ (1 line)",
|
||||||
|
" d/u ............ Scroll threads ↓/↑ (10 lines)",
|
||||||
|
" [ / ] .......... Scroll journal ↑/↓",
|
||||||
|
" Esc/Enter ...... Close modal",
|
||||||
|
"",
|
||||||
|
"MODAL NAVIGATION",
|
||||||
|
" Tab/→ .......... Next button │ Shift+Tab/← ... Previous button",
|
||||||
|
" Enter .......... Confirm/OK │ Esc ............ Cancel/Close",
|
||||||
|
];
|
||||||
|
|
||||||
|
// Render the border block
|
||||||
|
let block = Block::default()
|
||||||
|
.title(" Hotkey Help (use ↑/↓ to scroll) ")
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.style(Style::default().bg(Color::Black).fg(Color::DarkGray));
|
||||||
|
f.render_widget(block, area);
|
||||||
|
|
||||||
|
// Split into content area and button area
|
||||||
|
let chunks = Layout::default()
|
||||||
|
.direction(Direction::Vertical)
|
||||||
|
.constraints([Constraint::Min(1), Constraint::Length(1)])
|
||||||
|
.split(Rect {
|
||||||
|
x: area.x + 1,
|
||||||
|
y: area.y + 1,
|
||||||
|
width: area.width.saturating_sub(2),
|
||||||
|
height: area.height.saturating_sub(2),
|
||||||
|
});
|
||||||
|
|
||||||
|
let content_area = chunks[0];
|
||||||
|
let button_area = chunks[1];
|
||||||
|
|
||||||
|
// Calculate visible window
|
||||||
|
let visible_height = content_area.height as usize;
|
||||||
|
let total_lines = help_lines.len();
|
||||||
|
let max_scroll = total_lines.saturating_sub(visible_height);
|
||||||
|
let scroll_offset = self.help_scroll_offset.min(max_scroll);
|
||||||
|
|
||||||
|
// Get visible lines
|
||||||
|
let visible_lines: Vec<Line> = help_lines
|
||||||
|
.iter()
|
||||||
|
.skip(scroll_offset)
|
||||||
|
.take(visible_height)
|
||||||
|
.map(|s| Line::from(*s))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Render scrollable content
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(visible_lines)
|
||||||
|
.style(Style::default().fg(Color::Cyan).bg(Color::Black))
|
||||||
|
.alignment(Alignment::Left),
|
||||||
|
content_area,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Render scrollbar if needed
|
||||||
|
if total_lines > visible_height {
|
||||||
|
use ratatui::widgets::{Scrollbar, ScrollbarOrientation, ScrollbarState};
|
||||||
|
|
||||||
|
let scrollbar_area = Rect {
|
||||||
|
x: area.x + area.width.saturating_sub(2),
|
||||||
|
y: area.y + 1,
|
||||||
|
width: 1,
|
||||||
|
height: area.height.saturating_sub(2),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut scrollbar_state = ScrollbarState::new(max_scroll).position(scroll_offset);
|
||||||
|
|
||||||
|
let scrollbar = Scrollbar::new(ScrollbarOrientation::VerticalRight)
|
||||||
|
.begin_symbol(Some("↑"))
|
||||||
|
.end_symbol(Some("↓"))
|
||||||
|
.style(Style::default().fg(Color::DarkGray));
|
||||||
|
|
||||||
|
f.render_stateful_widget(scrollbar, scrollbar_area, &mut scrollbar_state);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Button area
|
||||||
|
let ok_style = if self.active_button == ModalButton::Ok {
|
||||||
|
Style::default()
|
||||||
|
.bg(Color::Blue)
|
||||||
|
.fg(Color::White)
|
||||||
|
.add_modifier(Modifier::BOLD)
|
||||||
|
} else {
|
||||||
|
Style::default().fg(Color::Blue).bg(Color::Black)
|
||||||
|
};
|
||||||
|
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new("[ Enter ] Close")
|
||||||
|
.style(ok_style)
|
||||||
|
.alignment(Alignment::Center),
|
||||||
|
button_area,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn centered_rect(&self, percent_x: u16, percent_y: u16, r: Rect) -> Rect {
|
||||||
|
let vert = Layout::default()
|
||||||
|
.direction(Direction::Vertical)
|
||||||
|
.constraints([
|
||||||
|
Constraint::Percentage((100 - percent_y) / 2),
|
||||||
|
Constraint::Percentage(percent_y),
|
||||||
|
Constraint::Percentage((100 - percent_y) / 2),
|
||||||
|
])
|
||||||
|
.split(r);
|
||||||
|
Layout::default()
|
||||||
|
.direction(Direction::Horizontal)
|
||||||
|
.constraints([
|
||||||
|
Constraint::Percentage((100 - percent_x) / 2),
|
||||||
|
Constraint::Percentage(percent_x),
|
||||||
|
Constraint::Percentage((100 - percent_x) / 2),
|
||||||
|
])
|
||||||
|
.split(vert[1])[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
297
socktop/src/ui/modal_connection.rs
Normal file
297
socktop/src/ui/modal_connection.rs
Normal file
@ -0,0 +1,297 @@
|
|||||||
|
//! Connection error modal rendering
|
||||||
|
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use super::modal_format::format_duration;
|
||||||
|
use super::theme::{
|
||||||
|
BTN_EXIT_BG_ACTIVE, BTN_EXIT_FG_ACTIVE, BTN_EXIT_FG_INACTIVE, BTN_EXIT_TEXT,
|
||||||
|
BTN_RETRY_BG_ACTIVE, BTN_RETRY_FG_ACTIVE, BTN_RETRY_FG_INACTIVE, BTN_RETRY_TEXT, ICON_CLUSTER,
|
||||||
|
ICON_COUNTDOWN_LABEL, ICON_MESSAGE, ICON_OFFLINE_LABEL, ICON_RETRY_LABEL, ICON_WARNING_TITLE,
|
||||||
|
LARGE_ERROR_ICON, MODAL_AGENT_FG, MODAL_BG, MODAL_BORDER_FG, MODAL_COUNTDOWN_LABEL_FG,
|
||||||
|
MODAL_FG, MODAL_HINT_FG, MODAL_ICON_PINK, MODAL_OFFLINE_LABEL_FG, MODAL_RETRY_LABEL_FG,
|
||||||
|
MODAL_TITLE_FG,
|
||||||
|
};
|
||||||
|
use ratatui::{
|
||||||
|
Frame,
|
||||||
|
layout::{Alignment, Constraint, Direction, Layout, Rect},
|
||||||
|
style::{Color, Modifier, Style},
|
||||||
|
text::{Line, Span, Text},
|
||||||
|
widgets::{Block, Borders, Paragraph, Wrap},
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::modal::{ModalButton, ModalManager};
|
||||||
|
|
||||||
|
impl ModalManager {
|
||||||
|
pub(super) fn render_connection_error(
|
||||||
|
&self,
|
||||||
|
f: &mut Frame,
|
||||||
|
area: Rect,
|
||||||
|
message: &str,
|
||||||
|
disconnected_at: Instant,
|
||||||
|
retry_count: u32,
|
||||||
|
auto_retry_countdown: Option<u64>,
|
||||||
|
) {
|
||||||
|
let duration_text = format_duration(disconnected_at.elapsed());
|
||||||
|
let chunks = Layout::default()
|
||||||
|
.direction(Direction::Vertical)
|
||||||
|
.constraints([
|
||||||
|
Constraint::Length(3),
|
||||||
|
Constraint::Min(4),
|
||||||
|
Constraint::Length(4),
|
||||||
|
])
|
||||||
|
.split(area);
|
||||||
|
let block = Block::default()
|
||||||
|
.title(ICON_WARNING_TITLE)
|
||||||
|
.title_style(
|
||||||
|
Style::default()
|
||||||
|
.fg(MODAL_TITLE_FG)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
)
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.border_style(Style::default().fg(MODAL_BORDER_FG))
|
||||||
|
.style(Style::default().bg(MODAL_BG).fg(MODAL_FG));
|
||||||
|
f.render_widget(block, area);
|
||||||
|
|
||||||
|
let content_area = chunks[1];
|
||||||
|
let max_w = content_area.width.saturating_sub(15) as usize;
|
||||||
|
let clean_message = if message.to_lowercase().contains("hostname verification")
|
||||||
|
|| message.contains("socktop_connector")
|
||||||
|
{
|
||||||
|
"Connection failed - hostname verification disabled".to_string()
|
||||||
|
} else if message.contains("Failed to fetch metrics:") {
|
||||||
|
if let Some(p) = message.find(':') {
|
||||||
|
let ess = message[p + 1..].trim();
|
||||||
|
if ess.len() > max_w {
|
||||||
|
format!("{}...", &ess[..max_w.saturating_sub(3)])
|
||||||
|
} else {
|
||||||
|
ess.to_string()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
"Connection error".to_string()
|
||||||
|
}
|
||||||
|
} else if message.starts_with("Retry failed:") {
|
||||||
|
if let Some(p) = message.find(':') {
|
||||||
|
let ess = message[p + 1..].trim();
|
||||||
|
if ess.len() > max_w {
|
||||||
|
format!("{}...", &ess[..max_w.saturating_sub(3)])
|
||||||
|
} else {
|
||||||
|
ess.to_string()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
"Retry failed".to_string()
|
||||||
|
}
|
||||||
|
} else if message.len() > max_w {
|
||||||
|
format!("{}...", &message[..max_w.saturating_sub(3)])
|
||||||
|
} else {
|
||||||
|
message.to_string()
|
||||||
|
};
|
||||||
|
let truncate = |s: &str| {
|
||||||
|
if s.len() > max_w {
|
||||||
|
format!("{}...", &s[..max_w.saturating_sub(3)])
|
||||||
|
} else {
|
||||||
|
s.to_string()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let agent_text = truncate("📡 Cannot connect to socktop agent");
|
||||||
|
let message_text = truncate(&clean_message);
|
||||||
|
let duration_display = truncate(&duration_text);
|
||||||
|
let retry_display = truncate(&retry_count.to_string());
|
||||||
|
let countdown_text = auto_retry_countdown.map(|c| {
|
||||||
|
if c == 0 {
|
||||||
|
"Auto retry now...".to_string()
|
||||||
|
} else {
|
||||||
|
format!("{c}s")
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Determine if we have enough space (height + width) to show large centered icon
|
||||||
|
let icon_max_width = LARGE_ERROR_ICON
|
||||||
|
.iter()
|
||||||
|
.map(|l| l.trim().chars().count())
|
||||||
|
.max()
|
||||||
|
.unwrap_or(0) as u16;
|
||||||
|
let large_allowed = content_area.height >= (LARGE_ERROR_ICON.len() as u16 + 8)
|
||||||
|
&& content_area.width >= icon_max_width + 6; // small margin for borders/padding
|
||||||
|
let mut icon_lines: Vec<Line> = Vec::new();
|
||||||
|
if large_allowed {
|
||||||
|
for &raw in LARGE_ERROR_ICON.iter() {
|
||||||
|
let trimmed = raw.trim();
|
||||||
|
icon_lines.push(Line::from(
|
||||||
|
trimmed
|
||||||
|
.chars()
|
||||||
|
.map(|ch| {
|
||||||
|
if ch == '!' {
|
||||||
|
Span::styled(
|
||||||
|
ch.to_string(),
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::White)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
)
|
||||||
|
} else if ch == '/' || ch == '\\' || ch == '_' {
|
||||||
|
// keep outline in pink
|
||||||
|
Span::styled(
|
||||||
|
ch.to_string(),
|
||||||
|
Style::default()
|
||||||
|
.fg(MODAL_ICON_PINK)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
)
|
||||||
|
} else if ch == ' ' {
|
||||||
|
Span::raw(" ")
|
||||||
|
} else {
|
||||||
|
Span::styled(ch.to_string(), Style::default().fg(MODAL_ICON_PINK))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
icon_lines.push(Line::from("")); // blank spacer line below icon
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut info_lines: Vec<Line> = Vec::new();
|
||||||
|
if !large_allowed {
|
||||||
|
info_lines.push(Line::from(vec![Span::styled(
|
||||||
|
ICON_CLUSTER,
|
||||||
|
Style::default().fg(MODAL_ICON_PINK),
|
||||||
|
)]));
|
||||||
|
info_lines.push(Line::from(""));
|
||||||
|
}
|
||||||
|
info_lines.push(Line::from(vec![Span::styled(
|
||||||
|
&agent_text,
|
||||||
|
Style::default().fg(MODAL_AGENT_FG),
|
||||||
|
)]));
|
||||||
|
info_lines.push(Line::from(""));
|
||||||
|
info_lines.push(Line::from(vec![
|
||||||
|
Span::styled(ICON_MESSAGE, Style::default().fg(MODAL_HINT_FG)),
|
||||||
|
Span::styled(&message_text, Style::default().fg(MODAL_AGENT_FG)),
|
||||||
|
]));
|
||||||
|
info_lines.push(Line::from(""));
|
||||||
|
info_lines.push(Line::from(vec![
|
||||||
|
Span::styled(
|
||||||
|
ICON_OFFLINE_LABEL,
|
||||||
|
Style::default().fg(MODAL_OFFLINE_LABEL_FG),
|
||||||
|
),
|
||||||
|
Span::styled(
|
||||||
|
&duration_display,
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::White)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
]));
|
||||||
|
info_lines.push(Line::from(vec![
|
||||||
|
Span::styled(ICON_RETRY_LABEL, Style::default().fg(MODAL_RETRY_LABEL_FG)),
|
||||||
|
Span::styled(
|
||||||
|
&retry_display,
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::White)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
]));
|
||||||
|
if let Some(cd) = &countdown_text {
|
||||||
|
info_lines.push(Line::from(vec![
|
||||||
|
Span::styled(
|
||||||
|
ICON_COUNTDOWN_LABEL,
|
||||||
|
Style::default().fg(MODAL_COUNTDOWN_LABEL_FG),
|
||||||
|
),
|
||||||
|
Span::styled(
|
||||||
|
cd,
|
||||||
|
Style::default()
|
||||||
|
.fg(Color::White)
|
||||||
|
.add_modifier(Modifier::BOLD),
|
||||||
|
),
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
|
||||||
|
let constrained = Rect {
|
||||||
|
x: content_area.x + 2,
|
||||||
|
y: content_area.y,
|
||||||
|
width: content_area.width.saturating_sub(4),
|
||||||
|
height: content_area.height,
|
||||||
|
};
|
||||||
|
if large_allowed {
|
||||||
|
let split = Layout::default()
|
||||||
|
.direction(Direction::Vertical)
|
||||||
|
.constraints([
|
||||||
|
Constraint::Length(icon_lines.len() as u16),
|
||||||
|
Constraint::Min(0),
|
||||||
|
])
|
||||||
|
.split(constrained);
|
||||||
|
// Center the icon block; each line already trimmed so per-line centering keeps shape
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(Text::from(icon_lines))
|
||||||
|
.alignment(Alignment::Center)
|
||||||
|
.wrap(Wrap { trim: false }),
|
||||||
|
split[0],
|
||||||
|
);
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(Text::from(info_lines))
|
||||||
|
.alignment(Alignment::Center)
|
||||||
|
.wrap(Wrap { trim: true }),
|
||||||
|
split[1],
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(Text::from(info_lines))
|
||||||
|
.alignment(Alignment::Center)
|
||||||
|
.wrap(Wrap { trim: true }),
|
||||||
|
constrained,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let button_area = Rect {
|
||||||
|
x: chunks[2].x,
|
||||||
|
y: chunks[2].y,
|
||||||
|
width: chunks[2].width,
|
||||||
|
height: chunks[2].height.saturating_sub(1),
|
||||||
|
};
|
||||||
|
self.render_connection_error_buttons(f, button_area);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render_connection_error_buttons(&self, f: &mut Frame, area: Rect) {
|
||||||
|
let button_chunks = Layout::default()
|
||||||
|
.direction(Direction::Horizontal)
|
||||||
|
.constraints([
|
||||||
|
Constraint::Percentage(30),
|
||||||
|
Constraint::Percentage(15),
|
||||||
|
Constraint::Percentage(10),
|
||||||
|
Constraint::Percentage(15),
|
||||||
|
Constraint::Percentage(30),
|
||||||
|
])
|
||||||
|
.split(area);
|
||||||
|
let retry_style = if self.active_button == ModalButton::Retry {
|
||||||
|
Style::default()
|
||||||
|
.bg(BTN_RETRY_BG_ACTIVE)
|
||||||
|
.fg(BTN_RETRY_FG_ACTIVE)
|
||||||
|
.add_modifier(Modifier::BOLD)
|
||||||
|
} else {
|
||||||
|
Style::default()
|
||||||
|
.fg(BTN_RETRY_FG_INACTIVE)
|
||||||
|
.add_modifier(Modifier::DIM)
|
||||||
|
};
|
||||||
|
let exit_style = if self.active_button == ModalButton::Exit {
|
||||||
|
Style::default()
|
||||||
|
.bg(BTN_EXIT_BG_ACTIVE)
|
||||||
|
.fg(BTN_EXIT_FG_ACTIVE)
|
||||||
|
.add_modifier(Modifier::BOLD)
|
||||||
|
} else {
|
||||||
|
Style::default()
|
||||||
|
.fg(BTN_EXIT_FG_INACTIVE)
|
||||||
|
.add_modifier(Modifier::DIM)
|
||||||
|
};
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(Text::from(Line::from(vec![Span::styled(
|
||||||
|
BTN_RETRY_TEXT,
|
||||||
|
retry_style,
|
||||||
|
)])))
|
||||||
|
.alignment(Alignment::Center),
|
||||||
|
button_chunks[1],
|
||||||
|
);
|
||||||
|
f.render_widget(
|
||||||
|
Paragraph::new(Text::from(Line::from(vec![Span::styled(
|
||||||
|
BTN_EXIT_TEXT,
|
||||||
|
exit_style,
|
||||||
|
)])))
|
||||||
|
.alignment(Alignment::Center),
|
||||||
|
button_chunks[3],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
112
socktop/src/ui/modal_format.rs
Normal file
112
socktop/src/ui/modal_format.rs
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
//! Formatting utilities for process details modal
|
||||||
|
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
/// Format uptime in human-readable form
|
||||||
|
pub fn format_uptime(secs: u64) -> String {
|
||||||
|
let days = secs / 86400;
|
||||||
|
let hours = (secs % 86400) / 3600;
|
||||||
|
let minutes = (secs % 3600) / 60;
|
||||||
|
let seconds = secs % 60;
|
||||||
|
|
||||||
|
if days > 0 {
|
||||||
|
format!("{days}d {hours}h {minutes}m")
|
||||||
|
} else if hours > 0 {
|
||||||
|
format!("{hours}h {minutes}m {seconds}s")
|
||||||
|
} else if minutes > 0 {
|
||||||
|
format!("{minutes}m {seconds}s")
|
||||||
|
} else {
|
||||||
|
format!("{seconds}s")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Format duration in human-readable form
|
||||||
|
pub fn format_duration(duration: Duration) -> String {
|
||||||
|
let total = duration.as_secs();
|
||||||
|
let h = total / 3600;
|
||||||
|
let m = (total % 3600) / 60;
|
||||||
|
let s = total % 60;
|
||||||
|
if h > 0 {
|
||||||
|
format!("{h}h {m}m {s}s")
|
||||||
|
} else if m > 0 {
|
||||||
|
format!("{m}m {s}s")
|
||||||
|
} else {
|
||||||
|
format!("{s}s")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Normalize CPU usage to 0-100% by dividing by thread count
|
||||||
|
pub fn normalize_cpu_usage(cpu_usage: f32, thread_count: u32) -> f32 {
|
||||||
|
let threads = thread_count.max(1) as f32;
|
||||||
|
(cpu_usage / threads).min(100.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculate dynamic Y-axis maximum in 10% increments
|
||||||
|
pub fn calculate_dynamic_y_max(max_value: f64) -> f64 {
|
||||||
|
((max_value / 10.0).ceil() * 10.0).clamp(10.0, 100.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_format_uptime_seconds() {
|
||||||
|
assert_eq!(format_uptime(45), "45s");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_format_uptime_minutes() {
|
||||||
|
assert_eq!(format_uptime(125), "2m 5s");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_format_uptime_hours() {
|
||||||
|
assert_eq!(format_uptime(3665), "1h 1m 5s");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_format_uptime_days() {
|
||||||
|
assert_eq!(format_uptime(90061), "1d 1h 1m");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_normalize_cpu_single_thread() {
|
||||||
|
assert_eq!(normalize_cpu_usage(50.0, 1), 50.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_normalize_cpu_multi_thread() {
|
||||||
|
assert_eq!(normalize_cpu_usage(400.0, 4), 100.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_normalize_cpu_zero_threads() {
|
||||||
|
// Should default to 1 thread to avoid division by zero
|
||||||
|
assert_eq!(normalize_cpu_usage(100.0, 0), 100.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_normalize_cpu_caps_at_100() {
|
||||||
|
assert_eq!(normalize_cpu_usage(150.0, 1), 100.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_dynamic_y_max_rounds_up() {
|
||||||
|
assert_eq!(calculate_dynamic_y_max(15.0), 20.0);
|
||||||
|
assert_eq!(calculate_dynamic_y_max(25.0), 30.0);
|
||||||
|
assert_eq!(calculate_dynamic_y_max(5.0), 10.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_dynamic_y_max_minimum() {
|
||||||
|
assert_eq!(calculate_dynamic_y_max(0.0), 10.0);
|
||||||
|
assert_eq!(calculate_dynamic_y_max(3.0), 10.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_dynamic_y_max_caps_at_100() {
|
||||||
|
assert_eq!(calculate_dynamic_y_max(95.0), 100.0);
|
||||||
|
assert_eq!(calculate_dynamic_y_max(100.0), 100.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
1156
socktop/src/ui/modal_process.rs
Normal file
1156
socktop/src/ui/modal_process.rs
Normal file
File diff suppressed because it is too large
Load Diff
77
socktop/src/ui/modal_types.rs
Normal file
77
socktop/src/ui/modal_types.rs
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
//! Type definitions for modal system
|
||||||
|
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
/// History data for process metrics rendering
|
||||||
|
pub struct ProcessHistoryData<'a> {
|
||||||
|
pub cpu: &'a std::collections::VecDeque<f32>,
|
||||||
|
pub mem: &'a std::collections::VecDeque<u64>,
|
||||||
|
pub io_read: &'a std::collections::VecDeque<u64>,
|
||||||
|
pub io_write: &'a std::collections::VecDeque<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process data for modal rendering
|
||||||
|
pub struct ProcessModalData<'a> {
|
||||||
|
pub details: Option<&'a socktop_connector::ProcessMetricsResponse>,
|
||||||
|
pub journal: Option<&'a socktop_connector::JournalResponse>,
|
||||||
|
pub history: ProcessHistoryData<'a>,
|
||||||
|
pub max_mem_bytes: u64,
|
||||||
|
pub unsupported: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parameters for rendering scatter plot
|
||||||
|
pub(super) struct ScatterPlotParams<'a> {
|
||||||
|
pub process: &'a socktop_connector::DetailedProcessInfo,
|
||||||
|
pub main_user_ms: f64,
|
||||||
|
pub main_system_ms: f64,
|
||||||
|
pub max_user: f64,
|
||||||
|
pub max_system: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum ModalType {
|
||||||
|
ConnectionError {
|
||||||
|
message: String,
|
||||||
|
disconnected_at: Instant,
|
||||||
|
retry_count: u32,
|
||||||
|
auto_retry_countdown: Option<u64>,
|
||||||
|
},
|
||||||
|
ProcessDetails {
|
||||||
|
pid: u32,
|
||||||
|
},
|
||||||
|
About,
|
||||||
|
Help,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
Confirmation {
|
||||||
|
title: String,
|
||||||
|
message: String,
|
||||||
|
confirm_text: String,
|
||||||
|
cancel_text: String,
|
||||||
|
},
|
||||||
|
#[allow(dead_code)]
|
||||||
|
Info {
|
||||||
|
title: String,
|
||||||
|
message: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum ModalAction {
|
||||||
|
None, // Modal didn't handle the key, pass to main window
|
||||||
|
Handled, // Modal handled the key, don't pass to main window
|
||||||
|
RetryConnection,
|
||||||
|
ExitApp,
|
||||||
|
Confirm,
|
||||||
|
Cancel,
|
||||||
|
Dismiss,
|
||||||
|
SwitchToParentProcess(u32), // Switch to viewing parent process details
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum ModalButton {
|
||||||
|
Retry,
|
||||||
|
Exit,
|
||||||
|
Confirm,
|
||||||
|
Cancel,
|
||||||
|
Ok,
|
||||||
|
}
|
||||||
@ -12,9 +12,72 @@ use std::cmp::Ordering;
|
|||||||
|
|
||||||
use crate::types::Metrics;
|
use crate::types::Metrics;
|
||||||
use crate::ui::cpu::{per_core_clamp, per_core_handle_scrollbar_mouse};
|
use crate::ui::cpu::{per_core_clamp, per_core_handle_scrollbar_mouse};
|
||||||
use crate::ui::theme::{SB_ARROW, SB_THUMB, SB_TRACK};
|
use crate::ui::theme::{
|
||||||
|
PROCESS_SELECTION_BG, PROCESS_SELECTION_FG, PROCESS_TOOLTIP_BG, PROCESS_TOOLTIP_FG, SB_ARROW,
|
||||||
|
SB_THUMB, SB_TRACK,
|
||||||
|
};
|
||||||
use crate::ui::util::human;
|
use crate::ui::util::human;
|
||||||
|
|
||||||
|
/// Simple fuzzy matching: returns true if all characters in needle appear in haystack in order (case-insensitive)
|
||||||
|
fn fuzzy_match(haystack: &str, needle: &str) -> bool {
|
||||||
|
if needle.is_empty() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
let haystack_lower = haystack.to_lowercase();
|
||||||
|
let needle_lower = needle.to_lowercase();
|
||||||
|
let mut haystack_chars = haystack_lower.chars();
|
||||||
|
|
||||||
|
for needle_char in needle_lower.chars() {
|
||||||
|
if !haystack_chars.any(|c| c == needle_char) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get filtered and sorted process indices based on search query and sort order
|
||||||
|
pub fn get_filtered_sorted_indices(
|
||||||
|
metrics: &Metrics,
|
||||||
|
search_query: &str,
|
||||||
|
sort_by: ProcSortBy,
|
||||||
|
) -> Vec<usize> {
|
||||||
|
// Filter processes by search query (fuzzy match)
|
||||||
|
let mut filtered_idxs: Vec<usize> = if search_query.is_empty() {
|
||||||
|
(0..metrics.top_processes.len()).collect()
|
||||||
|
} else {
|
||||||
|
(0..metrics.top_processes.len())
|
||||||
|
.filter(|&i| fuzzy_match(&metrics.top_processes[i].name, search_query))
|
||||||
|
.collect()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sort filtered rows
|
||||||
|
match sort_by {
|
||||||
|
ProcSortBy::CpuDesc => filtered_idxs.sort_by(|&a, &b| {
|
||||||
|
let aa = metrics.top_processes[a].cpu_usage;
|
||||||
|
let bb = metrics.top_processes[b].cpu_usage;
|
||||||
|
bb.partial_cmp(&aa).unwrap_or(Ordering::Equal)
|
||||||
|
}),
|
||||||
|
ProcSortBy::MemDesc => filtered_idxs.sort_by(|&a, &b| {
|
||||||
|
let aa = metrics.top_processes[a].mem_bytes;
|
||||||
|
let bb = metrics.top_processes[b].mem_bytes;
|
||||||
|
bb.cmp(&aa)
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
filtered_idxs
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parameters for drawing the top processes table
|
||||||
|
pub struct ProcessDisplayParams<'a> {
|
||||||
|
pub metrics: Option<&'a Metrics>,
|
||||||
|
pub scroll_offset: usize,
|
||||||
|
pub sort_by: ProcSortBy,
|
||||||
|
pub selected_process_pid: Option<u32>,
|
||||||
|
pub selected_process_index: Option<usize>,
|
||||||
|
pub search_query: &'a str,
|
||||||
|
pub search_active: bool,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||||
pub enum ProcSortBy {
|
pub enum ProcSortBy {
|
||||||
#[default]
|
#[default]
|
||||||
@ -31,28 +94,61 @@ const COLS: [Constraint; 5] = [
|
|||||||
Constraint::Length(8), // Mem %
|
Constraint::Length(8), // Mem %
|
||||||
];
|
];
|
||||||
|
|
||||||
pub fn draw_top_processes(
|
pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, params: ProcessDisplayParams) {
|
||||||
f: &mut ratatui::Frame<'_>,
|
|
||||||
area: Rect,
|
|
||||||
m: Option<&Metrics>,
|
|
||||||
scroll_offset: usize,
|
|
||||||
sort_by: ProcSortBy,
|
|
||||||
) {
|
|
||||||
// Draw outer block and title
|
// Draw outer block and title
|
||||||
let Some(mm) = m else { return };
|
let Some(mm) = params.metrics else { return };
|
||||||
let total = mm.process_count.unwrap_or(mm.top_processes.len());
|
let total = mm.process_count.unwrap_or(mm.top_processes.len());
|
||||||
let block = Block::default()
|
let block = Block::default()
|
||||||
.borders(Borders::ALL)
|
.borders(Borders::ALL)
|
||||||
.title(format!("Top Processes ({total} total)"));
|
.title(format!("Top Processes ({total} total)"));
|
||||||
f.render_widget(block, area);
|
f.render_widget(block, area);
|
||||||
|
|
||||||
// Inner area and content area (reserve 2 columns for scrollbar)
|
// Inner area (reserve space for search box if active)
|
||||||
let inner = Rect {
|
let inner = Rect {
|
||||||
x: area.x + 1,
|
x: area.x + 1,
|
||||||
y: area.y + 1,
|
y: area.y + 1,
|
||||||
width: area.width.saturating_sub(2),
|
width: area.width.saturating_sub(2),
|
||||||
height: area.height.saturating_sub(2),
|
height: area.height.saturating_sub(2),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Draw search box if active
|
||||||
|
let content_start_y = if params.search_active || !params.search_query.is_empty() {
|
||||||
|
let search_area = Rect {
|
||||||
|
x: inner.x,
|
||||||
|
y: inner.y,
|
||||||
|
width: inner.width,
|
||||||
|
height: 3, // Height for border + content
|
||||||
|
};
|
||||||
|
|
||||||
|
let search_text = if params.search_active {
|
||||||
|
format!("Search: {}_", params.search_query)
|
||||||
|
} else {
|
||||||
|
format!(
|
||||||
|
"Filter: {} (press / to edit, c to clear)",
|
||||||
|
params.search_query
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
let search_block = Block::default()
|
||||||
|
.borders(Borders::ALL)
|
||||||
|
.border_style(Style::default().fg(Color::Yellow));
|
||||||
|
let search_paragraph = Paragraph::new(search_text)
|
||||||
|
.block(search_block)
|
||||||
|
.style(Style::default().fg(Color::Yellow));
|
||||||
|
f.render_widget(search_paragraph, search_area);
|
||||||
|
|
||||||
|
inner.y + 3
|
||||||
|
} else {
|
||||||
|
inner.y
|
||||||
|
};
|
||||||
|
|
||||||
|
// Content area (reserve 2 columns for scrollbar)
|
||||||
|
let inner = Rect {
|
||||||
|
x: inner.x,
|
||||||
|
y: content_start_y,
|
||||||
|
width: inner.width,
|
||||||
|
height: inner.height.saturating_sub(content_start_y - (area.y + 1)),
|
||||||
|
};
|
||||||
if inner.height < 1 || inner.width < 3 {
|
if inner.height < 1 || inner.width < 3 {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -63,27 +159,15 @@ pub fn draw_top_processes(
|
|||||||
height: inner.height,
|
height: inner.height,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Sort rows (by CPU% or Mem bytes), descending.
|
// Get filtered and sorted indices
|
||||||
let mut idxs: Vec<usize> = (0..mm.top_processes.len()).collect();
|
let idxs = get_filtered_sorted_indices(mm, params.search_query, params.sort_by);
|
||||||
match sort_by {
|
|
||||||
ProcSortBy::CpuDesc => idxs.sort_by(|&a, &b| {
|
|
||||||
let aa = mm.top_processes[a].cpu_usage;
|
|
||||||
let bb = mm.top_processes[b].cpu_usage;
|
|
||||||
bb.partial_cmp(&aa).unwrap_or(Ordering::Equal)
|
|
||||||
}),
|
|
||||||
ProcSortBy::MemDesc => idxs.sort_by(|&a, &b| {
|
|
||||||
let aa = mm.top_processes[a].mem_bytes;
|
|
||||||
let bb = mm.top_processes[b].mem_bytes;
|
|
||||||
bb.cmp(&aa)
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scrolling
|
// Scrolling
|
||||||
let total_rows = idxs.len();
|
let total_rows = idxs.len();
|
||||||
let header_rows = 1usize;
|
let header_rows = 1usize;
|
||||||
let viewport_rows = content.height.saturating_sub(header_rows as u16) as usize;
|
let viewport_rows = content.height.saturating_sub(header_rows as u16) as usize;
|
||||||
let max_off = total_rows.saturating_sub(viewport_rows);
|
let max_off = total_rows.saturating_sub(viewport_rows);
|
||||||
let offset = scroll_offset.min(max_off);
|
let offset = params.scroll_offset.min(max_off);
|
||||||
let show_n = total_rows.saturating_sub(offset).min(viewport_rows);
|
let show_n = total_rows.saturating_sub(offset).min(viewport_rows);
|
||||||
|
|
||||||
// Build visible rows
|
// Build visible rows
|
||||||
@ -110,12 +194,29 @@ pub fn draw_top_processes(
|
|||||||
_ => Color::Red,
|
_ => Color::Red,
|
||||||
};
|
};
|
||||||
|
|
||||||
let emphasis = if (cpu_val - peak_cpu).abs() < f32::EPSILON {
|
let mut emphasis = if (cpu_val - peak_cpu).abs() < f32::EPSILON {
|
||||||
Style::default().add_modifier(Modifier::BOLD)
|
Style::default().add_modifier(Modifier::BOLD)
|
||||||
} else {
|
} else {
|
||||||
Style::default()
|
Style::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Check if this process is selected - prioritize PID matching
|
||||||
|
let is_selected = if let Some(selected_pid) = params.selected_process_pid {
|
||||||
|
selected_pid == p.pid
|
||||||
|
} else if let Some(selected_idx) = params.selected_process_index {
|
||||||
|
selected_idx == ix // ix is the absolute index in the sorted list
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
// Apply selection highlighting
|
||||||
|
if is_selected {
|
||||||
|
emphasis = emphasis
|
||||||
|
.bg(PROCESS_SELECTION_BG)
|
||||||
|
.fg(PROCESS_SELECTION_FG)
|
||||||
|
.add_modifier(Modifier::BOLD);
|
||||||
|
}
|
||||||
|
|
||||||
let cpu_str = fmt_cpu_pct(cpu_val);
|
let cpu_str = fmt_cpu_pct(cpu_val);
|
||||||
|
|
||||||
ratatui::widgets::Row::new(vec![
|
ratatui::widgets::Row::new(vec![
|
||||||
@ -131,11 +232,11 @@ pub fn draw_top_processes(
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Header with sort indicator
|
// Header with sort indicator
|
||||||
let cpu_hdr = match sort_by {
|
let cpu_hdr = match params.sort_by {
|
||||||
ProcSortBy::CpuDesc => "CPU % •",
|
ProcSortBy::CpuDesc => "CPU % •",
|
||||||
_ => "CPU %",
|
_ => "CPU %",
|
||||||
};
|
};
|
||||||
let mem_hdr = match sort_by {
|
let mem_hdr = match params.sort_by {
|
||||||
ProcSortBy::MemDesc => "Mem •",
|
ProcSortBy::MemDesc => "Mem •",
|
||||||
_ => "Mem",
|
_ => "Mem",
|
||||||
};
|
};
|
||||||
@ -151,6 +252,47 @@ pub fn draw_top_processes(
|
|||||||
.column_spacing(1);
|
.column_spacing(1);
|
||||||
f.render_widget(table, content);
|
f.render_widget(table, content);
|
||||||
|
|
||||||
|
// Draw tooltip if a process is selected
|
||||||
|
if let Some(selected_pid) = params.selected_process_pid {
|
||||||
|
// Find the selected process to get its name
|
||||||
|
let process_info = if let Some(metrics) = params.metrics {
|
||||||
|
metrics
|
||||||
|
.top_processes
|
||||||
|
.iter()
|
||||||
|
.find(|p| p.pid == selected_pid)
|
||||||
|
.map(|p| format!("PID {} • {}", p.pid, p.name))
|
||||||
|
.unwrap_or_else(|| format!("PID {selected_pid}"))
|
||||||
|
} else {
|
||||||
|
format!("PID {selected_pid}")
|
||||||
|
};
|
||||||
|
|
||||||
|
let tooltip_text = format!("{process_info} | Enter for details • X to unselect");
|
||||||
|
let tooltip_width = tooltip_text.len() as u16 + 2; // Add padding
|
||||||
|
let tooltip_height = 3;
|
||||||
|
|
||||||
|
// Position tooltip at bottom-right of the processes area
|
||||||
|
if area.width > tooltip_width + 2 && area.height > tooltip_height + 1 {
|
||||||
|
let tooltip_area = Rect {
|
||||||
|
x: area.x + area.width.saturating_sub(tooltip_width + 1),
|
||||||
|
y: area.y + area.height.saturating_sub(tooltip_height + 1),
|
||||||
|
width: tooltip_width,
|
||||||
|
height: tooltip_height,
|
||||||
|
};
|
||||||
|
|
||||||
|
let tooltip_block = Block::default().borders(Borders::ALL).style(
|
||||||
|
Style::default()
|
||||||
|
.bg(PROCESS_TOOLTIP_BG)
|
||||||
|
.fg(PROCESS_TOOLTIP_FG),
|
||||||
|
);
|
||||||
|
|
||||||
|
let tooltip_paragraph = Paragraph::new(tooltip_text)
|
||||||
|
.block(tooltip_block)
|
||||||
|
.wrap(ratatui::widgets::Wrap { trim: true });
|
||||||
|
|
||||||
|
f.render_widget(tooltip_paragraph, tooltip_area);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Draw scrollbar like CPU pane
|
// Draw scrollbar like CPU pane
|
||||||
let scroll_area = Rect {
|
let scroll_area = Rect {
|
||||||
x: inner.x + inner.width.saturating_sub(1),
|
x: inner.x + inner.width.saturating_sub(1),
|
||||||
@ -191,6 +333,18 @@ fn fmt_cpu_pct(v: f32) -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Handle keyboard scrolling (Up/Down/PageUp/PageDown/Home/End)
|
/// Handle keyboard scrolling (Up/Down/PageUp/PageDown/Home/End)
|
||||||
|
/// Parameters for process key event handling
|
||||||
|
pub struct ProcessKeyParams<'a> {
|
||||||
|
pub selected_process_pid: &'a mut Option<u32>,
|
||||||
|
pub selected_process_index: &'a mut Option<usize>,
|
||||||
|
pub key: crossterm::event::KeyEvent,
|
||||||
|
pub metrics: Option<&'a Metrics>,
|
||||||
|
pub sort_by: ProcSortBy,
|
||||||
|
pub search_query: &'a str,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// LEGACY: Use processes_handle_key_with_selection for enhanced functionality
|
||||||
|
#[allow(dead_code)]
|
||||||
pub fn processes_handle_key(
|
pub fn processes_handle_key(
|
||||||
scroll_offset: &mut usize,
|
scroll_offset: &mut usize,
|
||||||
key: crossterm::event::KeyEvent,
|
key: crossterm::event::KeyEvent,
|
||||||
@ -199,8 +353,105 @@ pub fn processes_handle_key(
|
|||||||
crate::ui::cpu::per_core_handle_key(scroll_offset, key, page_size);
|
crate::ui::cpu::per_core_handle_key(scroll_offset, key, page_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn processes_handle_key_with_selection(params: ProcessKeyParams) -> bool {
|
||||||
|
use crossterm::event::KeyCode;
|
||||||
|
|
||||||
|
match params.key.code {
|
||||||
|
KeyCode::Up => {
|
||||||
|
// Navigate through filtered and sorted results
|
||||||
|
if let Some(m) = params.metrics {
|
||||||
|
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
|
||||||
|
|
||||||
|
if idxs.is_empty() {
|
||||||
|
// No filtered results, clear selection
|
||||||
|
*params.selected_process_index = None;
|
||||||
|
*params.selected_process_pid = None;
|
||||||
|
} else if params.selected_process_index.is_none()
|
||||||
|
|| params.selected_process_pid.is_none()
|
||||||
|
{
|
||||||
|
// No selection - select the first process in filtered/sorted order
|
||||||
|
let first_idx = idxs[0];
|
||||||
|
*params.selected_process_index = Some(first_idx);
|
||||||
|
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||||
|
} else if let Some(current_idx) = *params.selected_process_index {
|
||||||
|
// Find current position in filtered/sorted list
|
||||||
|
if let Some(pos) = idxs.iter().position(|&idx| idx == current_idx) {
|
||||||
|
if pos > 0 {
|
||||||
|
// Move up in filtered/sorted list
|
||||||
|
let new_idx = idxs[pos - 1];
|
||||||
|
*params.selected_process_index = Some(new_idx);
|
||||||
|
*params.selected_process_pid = Some(m.top_processes[new_idx].pid);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Current selection not in filtered list, select first result
|
||||||
|
let first_idx = idxs[0];
|
||||||
|
*params.selected_process_index = Some(first_idx);
|
||||||
|
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true // Handled
|
||||||
|
}
|
||||||
|
KeyCode::Down => {
|
||||||
|
// Navigate through filtered and sorted results
|
||||||
|
if let Some(m) = params.metrics {
|
||||||
|
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
|
||||||
|
|
||||||
|
if idxs.is_empty() {
|
||||||
|
// No filtered results, clear selection
|
||||||
|
*params.selected_process_index = None;
|
||||||
|
*params.selected_process_pid = None;
|
||||||
|
} else if params.selected_process_index.is_none()
|
||||||
|
|| params.selected_process_pid.is_none()
|
||||||
|
{
|
||||||
|
// No selection - select the first process in filtered/sorted order
|
||||||
|
let first_idx = idxs[0];
|
||||||
|
*params.selected_process_index = Some(first_idx);
|
||||||
|
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||||
|
} else if let Some(current_idx) = *params.selected_process_index {
|
||||||
|
// Find current position in filtered/sorted list
|
||||||
|
if let Some(pos) = idxs.iter().position(|&idx| idx == current_idx) {
|
||||||
|
if pos + 1 < idxs.len() {
|
||||||
|
// Move down in filtered/sorted list
|
||||||
|
let new_idx = idxs[pos + 1];
|
||||||
|
*params.selected_process_index = Some(new_idx);
|
||||||
|
*params.selected_process_pid = Some(m.top_processes[new_idx].pid);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Current selection not in filtered list, select first result
|
||||||
|
let first_idx = idxs[0];
|
||||||
|
*params.selected_process_index = Some(first_idx);
|
||||||
|
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true // Handled
|
||||||
|
}
|
||||||
|
KeyCode::Char('x') | KeyCode::Char('X') => {
|
||||||
|
// Unselect any selected process
|
||||||
|
if params.selected_process_pid.is_some() || params.selected_process_index.is_some() {
|
||||||
|
*params.selected_process_pid = None;
|
||||||
|
*params.selected_process_index = None;
|
||||||
|
true // Handled
|
||||||
|
} else {
|
||||||
|
false // No selection to clear
|
||||||
|
}
|
||||||
|
}
|
||||||
|
KeyCode::Enter => {
|
||||||
|
// Signal that Enter was pressed with a selection
|
||||||
|
params.selected_process_pid.is_some() // Return true if we have a selection to handle
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
// No other keys handled - let scrollbar handle all navigation
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Handle mouse for content scrolling and scrollbar dragging.
|
/// Handle mouse for content scrolling and scrollbar dragging.
|
||||||
/// Returns Some(new_sort) if the header "CPU %" or "Mem" was clicked.
|
/// Returns Some(new_sort) if the header "CPU %" or "Mem" was clicked.
|
||||||
|
/// LEGACY: Use processes_handle_mouse_with_selection for enhanced functionality
|
||||||
|
#[allow(dead_code)]
|
||||||
pub fn processes_handle_mouse(
|
pub fn processes_handle_mouse(
|
||||||
scroll_offset: &mut usize,
|
scroll_offset: &mut usize,
|
||||||
drag: &mut Option<crate::ui::cpu::PerCoreScrollDrag>,
|
drag: &mut Option<crate::ui::cpu::PerCoreScrollDrag>,
|
||||||
@ -264,3 +515,124 @@ pub fn processes_handle_mouse(
|
|||||||
);
|
);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Parameters for process mouse event handling
|
||||||
|
pub struct ProcessMouseParams<'a> {
|
||||||
|
pub scroll_offset: &'a mut usize,
|
||||||
|
pub selected_process_pid: &'a mut Option<u32>,
|
||||||
|
pub selected_process_index: &'a mut Option<usize>,
|
||||||
|
pub drag: &'a mut Option<crate::ui::cpu::PerCoreScrollDrag>,
|
||||||
|
pub mouse: MouseEvent,
|
||||||
|
pub area: Rect,
|
||||||
|
pub total_rows: usize,
|
||||||
|
pub metrics: Option<&'a Metrics>,
|
||||||
|
pub sort_by: ProcSortBy,
|
||||||
|
pub search_query: &'a str,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Enhanced mouse handler that also manages process selection
|
||||||
|
/// Returns Some(new_sort) if the header was clicked, or handles row selection
|
||||||
|
pub fn processes_handle_mouse_with_selection(params: ProcessMouseParams) -> Option<ProcSortBy> {
|
||||||
|
// Inner and content areas (match draw_top_processes)
|
||||||
|
let inner = Rect {
|
||||||
|
x: params.area.x + 1,
|
||||||
|
y: params.area.y + 1,
|
||||||
|
width: params.area.width.saturating_sub(2),
|
||||||
|
height: params.area.height.saturating_sub(2),
|
||||||
|
};
|
||||||
|
if inner.height == 0 || inner.width <= 2 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate content area - must match draw_top_processes exactly!
|
||||||
|
// If search is active or query exists, content starts after search box (3 lines)
|
||||||
|
let search_active = !params.search_query.is_empty();
|
||||||
|
let content_start_y = if search_active { inner.y + 3 } else { inner.y };
|
||||||
|
|
||||||
|
let content = Rect {
|
||||||
|
x: inner.x,
|
||||||
|
y: content_start_y,
|
||||||
|
width: inner.width.saturating_sub(2),
|
||||||
|
height: inner
|
||||||
|
.height
|
||||||
|
.saturating_sub(if search_active { 3 } else { 0 }),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Scrollbar interactions (click arrows/page/drag)
|
||||||
|
per_core_handle_scrollbar_mouse(
|
||||||
|
params.scroll_offset,
|
||||||
|
params.drag,
|
||||||
|
params.mouse,
|
||||||
|
params.area,
|
||||||
|
params.total_rows,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Wheel scrolling when inside the content
|
||||||
|
crate::ui::cpu::per_core_handle_mouse(
|
||||||
|
params.scroll_offset,
|
||||||
|
params.mouse,
|
||||||
|
content,
|
||||||
|
content.height as usize,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Header click to change sort
|
||||||
|
let header_area = Rect {
|
||||||
|
x: content.x,
|
||||||
|
y: content.y,
|
||||||
|
width: content.width,
|
||||||
|
height: 1,
|
||||||
|
};
|
||||||
|
let inside_header = params.mouse.row == header_area.y
|
||||||
|
&& params.mouse.column >= header_area.x
|
||||||
|
&& params.mouse.column < header_area.x + header_area.width;
|
||||||
|
|
||||||
|
if inside_header && matches!(params.mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
|
||||||
|
// Split header into the same columns
|
||||||
|
let cols = Layout::default()
|
||||||
|
.direction(Direction::Horizontal)
|
||||||
|
.constraints(COLS.to_vec())
|
||||||
|
.split(header_area);
|
||||||
|
if params.mouse.column >= cols[2].x && params.mouse.column < cols[2].x + cols[2].width {
|
||||||
|
return Some(ProcSortBy::CpuDesc);
|
||||||
|
}
|
||||||
|
if params.mouse.column >= cols[3].x && params.mouse.column < cols[3].x + cols[3].width {
|
||||||
|
return Some(ProcSortBy::MemDesc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Row click for process selection
|
||||||
|
let data_start_row = content.y + 1; // Skip header
|
||||||
|
let data_area_height = content.height.saturating_sub(1); // Exclude header
|
||||||
|
|
||||||
|
if matches!(params.mouse.kind, MouseEventKind::Down(MouseButton::Left))
|
||||||
|
&& params.mouse.row >= data_start_row
|
||||||
|
&& params.mouse.row < data_start_row + data_area_height
|
||||||
|
&& params.mouse.column >= content.x
|
||||||
|
&& params.mouse.column < content.x + content.width
|
||||||
|
{
|
||||||
|
let clicked_row = (params.mouse.row - data_start_row) as usize;
|
||||||
|
|
||||||
|
// Find the actual process using the same filtering/sorting logic as the drawing code
|
||||||
|
if let Some(m) = params.metrics {
|
||||||
|
// Use the same filtered and sorted indices as display
|
||||||
|
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
|
||||||
|
|
||||||
|
// Calculate which process was actually clicked based on filtered/sorted order
|
||||||
|
let visible_process_position = *params.scroll_offset + clicked_row;
|
||||||
|
if visible_process_position < idxs.len() {
|
||||||
|
let actual_process_index = idxs[visible_process_position];
|
||||||
|
let clicked_process = &m.top_processes[actual_process_index];
|
||||||
|
*params.selected_process_pid = Some(clicked_process.pid);
|
||||||
|
*params.selected_process_index = Some(actual_process_index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clamp to valid range
|
||||||
|
per_core_clamp(
|
||||||
|
params.scroll_offset,
|
||||||
|
params.total_rows,
|
||||||
|
(content.height.saturating_sub(1)) as usize,
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|||||||
@ -6,3 +6,83 @@ use ratatui::style::Color;
|
|||||||
pub const SB_ARROW: Color = Color::Rgb(170, 170, 180);
|
pub const SB_ARROW: Color = Color::Rgb(170, 170, 180);
|
||||||
pub const SB_TRACK: Color = Color::Rgb(170, 170, 180);
|
pub const SB_TRACK: Color = Color::Rgb(170, 170, 180);
|
||||||
pub const SB_THUMB: Color = Color::Rgb(170, 170, 180);
|
pub const SB_THUMB: Color = Color::Rgb(170, 170, 180);
|
||||||
|
|
||||||
|
// Modal palette
|
||||||
|
pub const MODAL_DIM_BG: Color = Color::Rgb(15, 15, 25);
|
||||||
|
pub const MODAL_BG: Color = Color::Rgb(26, 26, 46);
|
||||||
|
pub const MODAL_FG: Color = Color::Rgb(230, 230, 230);
|
||||||
|
pub const MODAL_TITLE_FG: Color = Color::Rgb(255, 102, 102); // soft red for title text
|
||||||
|
pub const MODAL_BORDER_FG: Color = Color::Rgb(204, 51, 51); // darker red border
|
||||||
|
|
||||||
|
pub const MODAL_ICON_PINK: Color = Color::Rgb(255, 182, 193); // light pink icons line
|
||||||
|
pub const MODAL_AGENT_FG: Color = Color::Rgb(220, 220, 255); // pale periwinkle
|
||||||
|
pub const MODAL_HINT_FG: Color = Color::Rgb(255, 215, 0); // gold for message icon
|
||||||
|
pub const MODAL_OFFLINE_LABEL_FG: Color = Color::Rgb(135, 206, 235); // sky blue label
|
||||||
|
pub const MODAL_RETRY_LABEL_FG: Color = Color::Rgb(255, 165, 0); // orange label
|
||||||
|
pub const MODAL_COUNTDOWN_LABEL_FG: Color = Color::Rgb(255, 192, 203); // pink label for countdown
|
||||||
|
|
||||||
|
// Buttons
|
||||||
|
pub const BTN_RETRY_BG_ACTIVE: Color = Color::Rgb(46, 204, 113); // modern green
|
||||||
|
pub const BTN_RETRY_FG_ACTIVE: Color = Color::Rgb(26, 26, 46);
|
||||||
|
pub const BTN_RETRY_FG_INACTIVE: Color = Color::Rgb(46, 204, 113);
|
||||||
|
|
||||||
|
pub const BTN_EXIT_BG_ACTIVE: Color = Color::Rgb(255, 255, 255); // modern red
|
||||||
|
pub const BTN_EXIT_FG_ACTIVE: Color = Color::Rgb(26, 26, 46);
|
||||||
|
pub const BTN_EXIT_FG_INACTIVE: Color = Color::Rgb(255, 255, 255);
|
||||||
|
|
||||||
|
// Process selection colors
|
||||||
|
pub const PROCESS_SELECTION_BG: Color = Color::Rgb(147, 112, 219); // Medium slate blue (purple)
|
||||||
|
pub const PROCESS_SELECTION_FG: Color = Color::Rgb(255, 255, 255); // White text for contrast
|
||||||
|
pub const PROCESS_TOOLTIP_BG: Color = Color::Rgb(147, 112, 219); // Same purple as selection
|
||||||
|
pub const PROCESS_TOOLTIP_FG: Color = Color::Rgb(255, 255, 255); // White text for contrast
|
||||||
|
|
||||||
|
// Process details modal colors (matches main UI aesthetic - no custom colors, terminal defaults)
|
||||||
|
pub const PROCESS_DETAILS_ACCENT: Color = Color::Rgb(147, 112, 219); // Purple accent for highlights
|
||||||
|
|
||||||
|
// Emoji / icon strings (centralized so they can be themed/swapped later)
|
||||||
|
pub const ICON_WARNING_TITLE: &str = " 🔌 CONNECTION ERROR ";
|
||||||
|
pub const ICON_CLUSTER: &str = "⚠️";
|
||||||
|
pub const ICON_MESSAGE: &str = "💭 ";
|
||||||
|
pub const ICON_OFFLINE_LABEL: &str = "⏱️ Offline for: ";
|
||||||
|
pub const ICON_RETRY_LABEL: &str = "🔄 Retry attempts: ";
|
||||||
|
pub const ICON_COUNTDOWN_LABEL: &str = "⏰ Next auto retry: ";
|
||||||
|
pub const BTN_RETRY_TEXT: &str = " 🔄 Retry ";
|
||||||
|
pub const BTN_EXIT_TEXT: &str = " ❌ Exit ";
|
||||||
|
|
||||||
|
// warning icon
|
||||||
|
pub const LARGE_ERROR_ICON: &[&str] = &[
|
||||||
|
" /\\ ",
|
||||||
|
" / \\ ",
|
||||||
|
" / !! \\ ",
|
||||||
|
" / !!!! \\ ",
|
||||||
|
" / !! \\ ",
|
||||||
|
" / !!!! \\ ",
|
||||||
|
" / !! \\ ",
|
||||||
|
" /______________\\ ",
|
||||||
|
];
|
||||||
|
|
||||||
|
//about logo
|
||||||
|
pub const ASCII_ART: &str = r#"
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣠⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⣀⣤⣶⣾⠿⠿⠛⠃⠀⠀⠀⠀⠀⣀⣀⣠⡄⠀⠀⠀⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⠘⠛⢉⣠⣴⣾⣿⠿⠆⢰⣾⡿⠿⠛⠛⠋⠁⠀⠀⠀⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⠟⠋⣁⣤⣤⣶⠀⣠⣤⣶⣾⣿⣿⡿⠀⠀⠀⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣶⣿⣿⣿⣿⣿⡆⠘⠛⢉⣁⣤⣤⣤⡀⠀⠀⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⡀⢾⣿⣿⣿⣿⣿⡇⠀⠀⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣿⣿⣧⠈⢿⣿⣿⣿⣿⣷⠀⠀⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⣧⠈⢿⣿⣿⣿⣿⡄⠀⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⠿⠋⣁⠀⢿⣿⣿⣿⣷⡀⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣴⣿⣿⣿⣿⡟⢁⣴⣿⣿⡇⢸⣿⣿⡿⠟⠃⠀⠀
|
||||||
|
⠀⠀⠀⠀⠀⠀⢀⣠⣴⣿⣿⣿⣿⣿⣿⡟⢀⣿⣿⣿⡟⢀⣾⠟⢁⣤⣶⣿⠀⠀
|
||||||
|
⠀⠀⠀⠀⣠⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠸⡿⠟⢋⣠⣾⠃⣰⣿⣿⣿⡟⠀⠀
|
||||||
|
⠀⠀⣴⣄⠙⣿⣿⣿⣿⣿⡿⠿⠛⠋⣉⣁⣤⣴⣶⣿⣿⣿⠀⣿⡿⠟⠋⠀⠀⠀
|
||||||
|
⠀⠀⣿⣿⡆⠹⠟⠋⣁⣤⡄⢰⣿⠿⠟⠛⠋⠉⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||||
|
⠀⠀⠈⠉⠁⠀⠀⠀⠙⠛⠃⠈⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||||
|
|
||||||
|
███████╗ ██████╗ ██████╗████████╗ ██████╗ ██████╗
|
||||||
|
██╔════╝██╔═══██╗██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗
|
||||||
|
███████╗██║ ██║██║ ██║ ██║ ██║██████╔╝
|
||||||
|
╚════██║██║ ██║██║ ██║ ██║ ██║██╔═══╝
|
||||||
|
███████║╚██████╔╝╚██████╗ ██║ ╚██████╔╝██║
|
||||||
|
╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═╝
|
||||||
|
"#;
|
||||||
|
|||||||
@ -1,210 +0,0 @@
|
|||||||
//! Minimal WebSocket client helpers for requesting metrics from the agent.
|
|
||||||
|
|
||||||
use flate2::bufread::GzDecoder;
|
|
||||||
use futures_util::{SinkExt, StreamExt};
|
|
||||||
use prost::Message as _;
|
|
||||||
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
|
|
||||||
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
|
||||||
use rustls::{ClientConfig, RootCertStore};
|
|
||||||
use rustls::{DigitallySignedStruct, SignatureScheme};
|
|
||||||
use rustls_pemfile::Item;
|
|
||||||
use std::io::Read;
|
|
||||||
use std::{fs::File, io::BufReader, sync::Arc};
|
|
||||||
use tokio::net::TcpStream;
|
|
||||||
use tokio_tungstenite::{
|
|
||||||
connect_async, connect_async_tls_with_config, tungstenite::client::IntoClientRequest,
|
|
||||||
tungstenite::Message, Connector, MaybeTlsStream, WebSocketStream,
|
|
||||||
};
|
|
||||||
use url::Url;
|
|
||||||
|
|
||||||
use crate::types::{DiskInfo, Metrics, ProcessInfo, ProcessesPayload};
|
|
||||||
|
|
||||||
mod pb {
|
|
||||||
// generated by build.rs
|
|
||||||
include!(concat!(env!("OUT_DIR"), "/socktop.rs"));
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type WsStream = WebSocketStream<MaybeTlsStream<TcpStream>>;
|
|
||||||
|
|
||||||
// Connect to the agent and return the WS stream
|
|
||||||
pub async fn connect(
|
|
||||||
url: &str,
|
|
||||||
tls_ca: Option<&str>,
|
|
||||||
) -> Result<WsStream, Box<dyn std::error::Error>> {
|
|
||||||
let mut u = Url::parse(url)?;
|
|
||||||
if let Some(ca_path) = tls_ca {
|
|
||||||
if u.scheme() == "ws" {
|
|
||||||
let _ = u.set_scheme("wss");
|
|
||||||
}
|
|
||||||
return connect_with_ca(u.as_str(), ca_path).await;
|
|
||||||
}
|
|
||||||
let (ws, _) = connect_async(u.as_str()).await?;
|
|
||||||
Ok(ws)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn connect_with_ca(url: &str, ca_path: &str) -> Result<WsStream, Box<dyn std::error::Error>> {
|
|
||||||
let mut root = RootCertStore::empty();
|
|
||||||
let mut reader = BufReader::new(File::open(ca_path)?);
|
|
||||||
let mut der_certs = Vec::new();
|
|
||||||
while let Ok(Some(item)) = rustls_pemfile::read_one(&mut reader) {
|
|
||||||
if let Item::X509Certificate(der) = item {
|
|
||||||
der_certs.push(der);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
root.add_parsable_certificates(der_certs);
|
|
||||||
|
|
||||||
let mut cfg = ClientConfig::builder()
|
|
||||||
.with_root_certificates(root)
|
|
||||||
.with_no_client_auth();
|
|
||||||
|
|
||||||
let req = url.into_client_request()?;
|
|
||||||
let verify_domain = std::env::var("SOCKTOP_VERIFY_NAME").ok().as_deref() == Some("1");
|
|
||||||
if !verify_domain {
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct NoVerify;
|
|
||||||
impl ServerCertVerifier for NoVerify {
|
|
||||||
fn verify_server_cert(
|
|
||||||
&self,
|
|
||||||
_end_entity: &CertificateDer<'_>,
|
|
||||||
_intermediates: &[CertificateDer<'_>],
|
|
||||||
_server_name: &ServerName,
|
|
||||||
_ocsp_response: &[u8],
|
|
||||||
_now: UnixTime,
|
|
||||||
) -> Result<ServerCertVerified, rustls::Error> {
|
|
||||||
Ok(ServerCertVerified::assertion())
|
|
||||||
}
|
|
||||||
fn verify_tls12_signature(
|
|
||||||
&self,
|
|
||||||
_message: &[u8],
|
|
||||||
_cert: &CertificateDer<'_>,
|
|
||||||
_dss: &DigitallySignedStruct,
|
|
||||||
) -> Result<HandshakeSignatureValid, rustls::Error> {
|
|
||||||
Ok(HandshakeSignatureValid::assertion())
|
|
||||||
}
|
|
||||||
fn verify_tls13_signature(
|
|
||||||
&self,
|
|
||||||
_message: &[u8],
|
|
||||||
_cert: &CertificateDer<'_>,
|
|
||||||
_dss: &DigitallySignedStruct,
|
|
||||||
) -> Result<HandshakeSignatureValid, rustls::Error> {
|
|
||||||
Ok(HandshakeSignatureValid::assertion())
|
|
||||||
}
|
|
||||||
fn supported_verify_schemes(&self) -> Vec<SignatureScheme> {
|
|
||||||
// Provide common schemes; not strictly needed for skipping but keeps API happy
|
|
||||||
vec![
|
|
||||||
SignatureScheme::ECDSA_NISTP256_SHA256,
|
|
||||||
SignatureScheme::ED25519,
|
|
||||||
SignatureScheme::RSA_PSS_SHA256,
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cfg.dangerous().set_certificate_verifier(Arc::new(NoVerify));
|
|
||||||
eprintln!("socktop: hostname verification disabled (default). Use --verify-hostname to enable strict SAN checking.");
|
|
||||||
}
|
|
||||||
let cfg = Arc::new(cfg);
|
|
||||||
let (ws, _) =
|
|
||||||
connect_async_tls_with_config(req, None, verify_domain, Some(Connector::Rustls(cfg)))
|
|
||||||
.await?;
|
|
||||||
Ok(ws)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a "get_metrics" request and await a single JSON reply
|
|
||||||
pub async fn request_metrics(ws: &mut WsStream) -> Option<Metrics> {
|
|
||||||
if ws.send(Message::Text("get_metrics".into())).await.is_err() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
match ws.next().await {
|
|
||||||
Some(Ok(Message::Binary(b))) => {
|
|
||||||
gunzip_to_string(&b).and_then(|s| serde_json::from_str::<Metrics>(&s).ok())
|
|
||||||
}
|
|
||||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<Metrics>(&json).ok(),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decompress a gzip-compressed binary frame into a String.
|
|
||||||
fn gunzip_to_string(bytes: &[u8]) -> Option<String> {
|
|
||||||
let mut dec = GzDecoder::new(bytes);
|
|
||||||
let mut out = String::new();
|
|
||||||
dec.read_to_string(&mut out).ok()?;
|
|
||||||
Some(out)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gunzip_to_vec(bytes: &[u8]) -> Option<Vec<u8>> {
|
|
||||||
let mut dec = GzDecoder::new(bytes);
|
|
||||||
let mut out = Vec::new();
|
|
||||||
dec.read_to_end(&mut out).ok()?;
|
|
||||||
Some(out)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_gzip(bytes: &[u8]) -> bool {
|
|
||||||
bytes.len() >= 2 && bytes[0] == 0x1f && bytes[1] == 0x8b
|
|
||||||
}
|
|
||||||
// Suppress dead_code until these are wired into the app
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub enum Payload {
|
|
||||||
Metrics(Metrics),
|
|
||||||
Disks(Vec<DiskInfo>),
|
|
||||||
Processes(ProcessesPayload),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a "get_disks" request and await a JSON Vec<DiskInfo>
|
|
||||||
pub async fn request_disks(ws: &mut WsStream) -> Option<Vec<DiskInfo>> {
|
|
||||||
if ws.send(Message::Text("get_disks".into())).await.is_err() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
match ws.next().await {
|
|
||||||
Some(Ok(Message::Binary(b))) => {
|
|
||||||
gunzip_to_string(&b).and_then(|s| serde_json::from_str::<Vec<DiskInfo>>(&s).ok())
|
|
||||||
}
|
|
||||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<Vec<DiskInfo>>(&json).ok(),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a "get_processes" request and await a ProcessesPayload decoded from protobuf (binary, may be gzipped)
|
|
||||||
pub async fn request_processes(ws: &mut WsStream) -> Option<ProcessesPayload> {
|
|
||||||
if ws
|
|
||||||
.send(Message::Text("get_processes".into()))
|
|
||||||
.await
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
match ws.next().await {
|
|
||||||
Some(Ok(Message::Binary(b))) => {
|
|
||||||
let gz = is_gzip(&b);
|
|
||||||
let data = if gz { gunzip_to_vec(&b)? } else { b };
|
|
||||||
match pb::Processes::decode(data.as_slice()) {
|
|
||||||
Ok(pb) => {
|
|
||||||
let rows: Vec<ProcessInfo> = pb
|
|
||||||
.rows
|
|
||||||
.into_iter()
|
|
||||||
.map(|p: pb::Process| ProcessInfo {
|
|
||||||
pid: p.pid,
|
|
||||||
name: p.name,
|
|
||||||
cpu_usage: p.cpu_usage,
|
|
||||||
mem_bytes: p.mem_bytes,
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
Some(ProcessesPayload {
|
|
||||||
process_count: pb.process_count as usize,
|
|
||||||
top_processes: rows,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
if std::env::var("SOCKTOP_DEBUG").ok().as_deref() == Some("1") {
|
|
||||||
eprintln!("protobuf decode failed: {e}");
|
|
||||||
}
|
|
||||||
// Fallback: maybe it's JSON (bytes already decompressed if gz)
|
|
||||||
match String::from_utf8(data) {
|
|
||||||
Ok(s) => serde_json::from_str::<ProcessesPayload>(&s).ok(),
|
|
||||||
Err(_) => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<ProcessesPayload>(&json).ok(),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
46
socktop/tests/modal_tests.rs
Normal file
46
socktop/tests/modal_tests.rs
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
//! Tests for modal formatting and duration helper.
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
// Bring the format_duration function into scope by duplicating logic (private in module). If desired,
|
||||||
|
// this could be moved to a shared util module; for now we re-assert expected behavior.
|
||||||
|
fn format_duration_ref(duration: Duration) -> String {
|
||||||
|
let total_secs = duration.as_secs();
|
||||||
|
let hours = total_secs / 3600;
|
||||||
|
let minutes = (total_secs % 3600) / 60;
|
||||||
|
let seconds = total_secs % 60;
|
||||||
|
if hours > 0 {
|
||||||
|
format!("{hours}h {minutes}m {seconds}s")
|
||||||
|
} else if minutes > 0 {
|
||||||
|
format!("{minutes}m {seconds}s")
|
||||||
|
} else {
|
||||||
|
format!("{seconds}s")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_format_duration_boundaries() {
|
||||||
|
assert_eq!(format_duration_ref(Duration::from_secs(0)), "0s");
|
||||||
|
assert_eq!(format_duration_ref(Duration::from_secs(59)), "59s");
|
||||||
|
assert_eq!(format_duration_ref(Duration::from_secs(60)), "1m 0s");
|
||||||
|
assert_eq!(format_duration_ref(Duration::from_secs(61)), "1m 1s");
|
||||||
|
assert_eq!(format_duration_ref(Duration::from_secs(3600)), "1h 0m 0s");
|
||||||
|
assert_eq!(format_duration_ref(Duration::from_secs(3661)), "1h 1m 1s");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Basic test to ensure auto-retry countdown semantics are consistent for initial state.
|
||||||
|
#[test]
|
||||||
|
fn test_auto_retry_initial_none() {
|
||||||
|
// We can't construct App directly without pulling in whole UI; just assert logic mimic.
|
||||||
|
// For a more thorough test, refactor countdown logic into a pure function.
|
||||||
|
// This placeholder asserts desired initial semantics: when no disconnect/original time, countdown should be None.
|
||||||
|
// (When integrated, consider exposing a pure helper returning Option<u64>.)
|
||||||
|
let modal_active = false; // requirement: must be active for countdown
|
||||||
|
let disconnected_state = true; // assume disconnected state
|
||||||
|
let countdown = if disconnected_state && modal_active {
|
||||||
|
// would compute target
|
||||||
|
Some(0)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
assert!(countdown.is_none());
|
||||||
|
}
|
||||||
@ -60,7 +60,9 @@ fn test_profile_created_on_first_use() {
|
|||||||
let _guard = ENV_LOCK.lock().unwrap();
|
let _guard = ENV_LOCK.lock().unwrap();
|
||||||
// Isolate config in a temp dir
|
// Isolate config in a temp dir
|
||||||
let td = tempfile::tempdir().unwrap();
|
let td = tempfile::tempdir().unwrap();
|
||||||
|
unsafe {
|
||||||
std::env::set_var("XDG_CONFIG_HOME", td.path());
|
std::env::set_var("XDG_CONFIG_HOME", td.path());
|
||||||
|
}
|
||||||
// Ensure directory exists fresh
|
// Ensure directory exists fresh
|
||||||
std::fs::create_dir_all(td.path().join("socktop")).unwrap();
|
std::fs::create_dir_all(td.path().join("socktop")).unwrap();
|
||||||
let _ = fs::remove_file(profiles_path());
|
let _ = fs::remove_file(profiles_path());
|
||||||
@ -78,7 +80,9 @@ fn test_profile_created_on_first_use() {
|
|||||||
fn test_profile_overwrite_only_when_changed() {
|
fn test_profile_overwrite_only_when_changed() {
|
||||||
let _guard = ENV_LOCK.lock().unwrap();
|
let _guard = ENV_LOCK.lock().unwrap();
|
||||||
let td = tempfile::tempdir().unwrap();
|
let td = tempfile::tempdir().unwrap();
|
||||||
|
unsafe {
|
||||||
std::env::set_var("XDG_CONFIG_HOME", td.path());
|
std::env::set_var("XDG_CONFIG_HOME", td.path());
|
||||||
|
}
|
||||||
std::fs::create_dir_all(td.path().join("socktop")).unwrap();
|
std::fs::create_dir_all(td.path().join("socktop")).unwrap();
|
||||||
let _ = fs::remove_file(profiles_path());
|
let _ = fs::remove_file(profiles_path());
|
||||||
// Initial create
|
// Initial create
|
||||||
@ -101,7 +105,9 @@ fn test_profile_overwrite_only_when_changed() {
|
|||||||
fn test_profile_tls_ca_persisted() {
|
fn test_profile_tls_ca_persisted() {
|
||||||
let _guard = ENV_LOCK.lock().unwrap();
|
let _guard = ENV_LOCK.lock().unwrap();
|
||||||
let td = tempfile::tempdir().unwrap();
|
let td = tempfile::tempdir().unwrap();
|
||||||
|
unsafe {
|
||||||
std::env::set_var("XDG_CONFIG_HOME", td.path());
|
std::env::set_var("XDG_CONFIG_HOME", td.path());
|
||||||
|
}
|
||||||
std::fs::create_dir_all(td.path().join("socktop")).unwrap();
|
std::fs::create_dir_all(td.path().join("socktop")).unwrap();
|
||||||
let _ = fs::remove_file(profiles_path());
|
let _ = fs::remove_file(profiles_path());
|
||||||
let (_ok, _out) = run_socktop(&[
|
let (_ok, _out) = run_socktop(&[
|
||||||
|
|||||||
@ -1,29 +0,0 @@
|
|||||||
use socktop::ws::{connect, request_metrics, request_processes};
|
|
||||||
|
|
||||||
// Integration probe: only runs when SOCKTOP_WS is set to an agent WebSocket URL.
|
|
||||||
// Example: SOCKTOP_WS=ws://127.0.0.1:3000/ws cargo test -p socktop --test ws_probe -- --nocapture
|
|
||||||
#[tokio::test]
|
|
||||||
async fn probe_ws_endpoints() {
|
|
||||||
// Gate the test to avoid CI failures when no agent is running.
|
|
||||||
let url = match std::env::var("SOCKTOP_WS") {
|
|
||||||
Ok(v) if !v.is_empty() => v,
|
|
||||||
_ => {
|
|
||||||
eprintln!(
|
|
||||||
"skipping ws_probe: set SOCKTOP_WS=ws://host:port/ws to run this integration test"
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Optional pinned CA for WSS/self-signed setups
|
|
||||||
let tls_ca = std::env::var("SOCKTOP_TLS_CA").ok();
|
|
||||||
let mut ws = connect(&url, tls_ca.as_deref()).await.expect("connect ws");
|
|
||||||
|
|
||||||
// Should get fast metrics quickly
|
|
||||||
let m = request_metrics(&mut ws).await;
|
|
||||||
assert!(m.is_some(), "expected Metrics payload within timeout");
|
|
||||||
|
|
||||||
// Processes may be gzipped and a bit slower, but should arrive
|
|
||||||
let p = request_processes(&mut ws).await;
|
|
||||||
assert!(p.is_some(), "expected Processes payload within timeout");
|
|
||||||
}
|
|
||||||
@ -1,37 +1,47 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "socktop_agent"
|
name = "socktop_agent"
|
||||||
version = "0.1.3"
|
version = "1.50.2"
|
||||||
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
||||||
description = "Remote system monitor over WebSocket, TUI like top"
|
description = "Socktop agent daemon. Serves host metrics over WebSocket."
|
||||||
edition = "2021"
|
edition = "2024"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
readme = "README.md"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tokio = { version = "1", features = ["full"] }
|
# Tokio: Use minimal features instead of "full" to reduce binary size
|
||||||
|
# Only include: rt-multi-thread (async runtime), net (WebSocket), sync (Mutex/RwLock), macros (#[tokio::test])
|
||||||
|
# Excluded: io, fs, process, signal, time (not needed for this workload)
|
||||||
|
# Savings: ~200-300KB binary size, faster compile times
|
||||||
|
tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros"] }
|
||||||
axum = { version = "0.7", features = ["ws", "macros"] }
|
axum = { version = "0.7", features = ["ws", "macros"] }
|
||||||
sysinfo = { version = "0.37", features = ["network", "disk", "component"] }
|
sysinfo = { version = "0.37", features = ["network", "disk", "component"] }
|
||||||
serde = { version = "1", features = ["derive"] }
|
serde = { version = "1", features = ["derive"] }
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
|
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
|
||||||
futures-util = "0.3.31"
|
futures-util = "0.3.31"
|
||||||
tracing = "0.1"
|
tracing = { version = "0.1", optional = true }
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = true }
|
||||||
# nvml-wrapper removed (unused; GPU metrics via gfxinfo only now)
|
|
||||||
gfxinfo = "0.1.2"
|
gfxinfo = "0.1.2"
|
||||||
once_cell = "1.19"
|
once_cell = "1.19"
|
||||||
axum-server = { version = "0.6", features = ["tls-rustls"] }
|
axum-server = { version = "0.7", features = ["tls-rustls"] }
|
||||||
rustls = "0.23"
|
rustls = { version = "0.23", features = ["aws-lc-rs"] }
|
||||||
rustls-pemfile = "2.1"
|
rustls-pemfile = "2.1"
|
||||||
rcgen = "0.13" # pure-Rust self-signed cert generation (replaces openssl vendored build)
|
rcgen = "0.13"
|
||||||
anyhow = "1"
|
anyhow = "1"
|
||||||
hostname = "0.3"
|
hostname = "0.3"
|
||||||
prost = { workspace = true }
|
prost = { workspace = true }
|
||||||
time = { version = "0.3", default-features = false, features = ["formatting", "macros", "parsing" ] }
|
time = { version = "0.3", default-features = false, features = ["formatting", "macros", "parsing" ] }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = []
|
||||||
|
logging = ["tracing", "tracing-subscriber"]
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
prost-build = "0.13"
|
prost-build = "0.13"
|
||||||
tonic-build = { version = "0.12", default-features = false, optional = true }
|
tonic-build = { version = "0.12", default-features = false, optional = true }
|
||||||
protoc-bin-vendored = "3"
|
protoc-bin-vendored = "3"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
assert_cmd = "2.0"
|
assert_cmd = "2.0"
|
||||||
tempfile = "3.10"
|
tempfile = "3.10"
|
||||||
|
tokio-tungstenite = "0.21"
|
||||||
|
|||||||
396
socktop_agent/README.md
Normal file
396
socktop_agent/README.md
Normal file
@ -0,0 +1,396 @@
|
|||||||
|
# socktop_agent (server)
|
||||||
|
|
||||||
|
Lightweight on‑demand metrics WebSocket server for the socktop TUI.
|
||||||
|
|
||||||
|
Highlights:
|
||||||
|
- Collects system metrics only when requested (keeps idle CPU <1%)
|
||||||
|
- Optional TLS (self‑signed cert auto‑generated & pinned by client)
|
||||||
|
- JSON for fast metrics / disks; protobuf (optionally gzipped) for processes
|
||||||
|
- Accurate per‑process CPU% on Linux via /proc jiffies delta
|
||||||
|
- Optional GPU & temperature metrics (disable via env vars)
|
||||||
|
- Simple token auth (?token=...) support
|
||||||
|
|
||||||
|
Run (no TLS):
|
||||||
|
```
|
||||||
|
cargo install socktop_agent
|
||||||
|
socktop_agent --port 3000
|
||||||
|
```
|
||||||
|
Enable TLS:
|
||||||
|
```
|
||||||
|
SOCKTOP_ENABLE_SSL=1 socktop_agent --port 8443
|
||||||
|
# cert/key stored under $XDG_DATA_HOME/socktop_agent/tls
|
||||||
|
```
|
||||||
|
Environment toggles:
|
||||||
|
- SOCKTOP_AGENT_GPU=0 (disable GPU collection)
|
||||||
|
- SOCKTOP_AGENT_TEMP=0 (disable temperature)
|
||||||
|
- SOCKTOP_TOKEN=secret (require token param from client)
|
||||||
|
- SOCKTOP_AGENT_METRICS_TTL_MS=250 (cache fast metrics window)
|
||||||
|
- SOCKTOP_AGENT_PROCESSES_TTL_MS=1000
|
||||||
|
- SOCKTOP_AGENT_DISKS_TTL_MS=1000
|
||||||
|
|
||||||
|
*NOTE ON ENV vars*
|
||||||
|
|
||||||
|
Generally these have been added for debugging purposes. you do not need to configure them, default values are tuned and GPU will deisable itself after the first poll if not available.
|
||||||
|
|
||||||
|
Systemd unit example & full docs:
|
||||||
|
https://github.com/jasonwitty/socktop
|
||||||
|
|
||||||
|
## WebSocket API Integration Guide
|
||||||
|
|
||||||
|
The socktop_agent exposes a WebSocket API that can be directly integrated with your own applications. This allows you to build custom monitoring dashboards or analysis tools using the agent's metrics.
|
||||||
|
|
||||||
|
### WebSocket Endpoint
|
||||||
|
|
||||||
|
```
|
||||||
|
ws://HOST:PORT/ws # Without TLS
|
||||||
|
wss://HOST:PORT/ws # With TLS
|
||||||
|
```
|
||||||
|
|
||||||
|
With authentication token (if configured):
|
||||||
|
```
|
||||||
|
ws://HOST:PORT/ws?token=YOUR_TOKEN
|
||||||
|
wss://HOST:PORT/ws?token=YOUR_TOKEN
|
||||||
|
```
|
||||||
|
|
||||||
|
### Communication Protocol
|
||||||
|
|
||||||
|
All communication uses JSON format for requests and responses, except for the process list which uses Protocol Buffers (protobuf) format with optional gzip compression.
|
||||||
|
|
||||||
|
#### Request Types
|
||||||
|
|
||||||
|
Send a JSON message with a `type` field to request specific metrics:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"type": "metrics"} // Request fast-changing metrics (CPU, memory, network)
|
||||||
|
{"type": "disks"} // Request disk information
|
||||||
|
{"type": "processes"} // Request process list (returns protobuf)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Response Formats
|
||||||
|
|
||||||
|
1. **Fast Metrics** (JSON):
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"cpu_total": 12.4,
|
||||||
|
"cpu_per_core": [11.2, 15.7],
|
||||||
|
"mem_total": 33554432,
|
||||||
|
"mem_used": 18321408,
|
||||||
|
"swap_total": 0,
|
||||||
|
"swap_used": 0,
|
||||||
|
"hostname": "myserver",
|
||||||
|
"cpu_temp_c": 42.5,
|
||||||
|
"networks": [{"name":"eth0","received":12345678,"transmitted":87654321}],
|
||||||
|
"gpus": [{"name":"nvidia-0","usage":56.7,"memory_total":8589934592,"memory_used":1073741824,"temp_c":65.0}]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Disks** (JSON):
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{"name":"nvme0n1p2","total":512000000000,"available":320000000000},
|
||||||
|
{"name":"sda1","total":1000000000000,"available":750000000000}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Processes** (Protocol Buffers):
|
||||||
|
|
||||||
|
Processes are returned in Protocol Buffers format, optionally gzip-compressed for large process lists. The protobuf schema is:
|
||||||
|
|
||||||
|
```protobuf
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
message Process {
|
||||||
|
uint32 pid = 1;
|
||||||
|
string name = 2;
|
||||||
|
float cpu_usage = 3;
|
||||||
|
uint64 mem_bytes = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProcessList {
|
||||||
|
uint32 process_count = 1;
|
||||||
|
repeated Process processes = 2;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example Integration (JavaScript/Node.js)
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
const WebSocket = require('ws');
|
||||||
|
|
||||||
|
// Connect to the agent
|
||||||
|
const ws = new WebSocket('ws://localhost:3000/ws');
|
||||||
|
|
||||||
|
ws.on('open', function open() {
|
||||||
|
console.log('Connected to socktop_agent');
|
||||||
|
|
||||||
|
// Request metrics immediately on connection
|
||||||
|
ws.send(JSON.stringify({type: 'metrics'}));
|
||||||
|
|
||||||
|
// Set up regular polling
|
||||||
|
setInterval(() => {
|
||||||
|
ws.send(JSON.stringify({type: 'metrics'}));
|
||||||
|
}, 1000);
|
||||||
|
|
||||||
|
// Request processes every 3 seconds
|
||||||
|
setInterval(() => {
|
||||||
|
ws.send(JSON.stringify({type: 'processes'}));
|
||||||
|
}, 3000);
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('message', function incoming(data) {
|
||||||
|
// Check if the response is JSON or binary (protobuf)
|
||||||
|
try {
|
||||||
|
const jsonData = JSON.parse(data);
|
||||||
|
console.log('Received JSON data:', jsonData);
|
||||||
|
} catch (e) {
|
||||||
|
console.log('Received binary data (protobuf), length:', data.length);
|
||||||
|
// Process binary protobuf data with a library like protobufjs
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('close', function close() {
|
||||||
|
console.log('Disconnected from socktop_agent');
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example Integration (Python)
|
||||||
|
|
||||||
|
```python
|
||||||
|
import json
|
||||||
|
import asyncio
|
||||||
|
import websockets
|
||||||
|
|
||||||
|
async def monitor_system():
|
||||||
|
uri = "ws://localhost:3000/ws"
|
||||||
|
async with websockets.connect(uri) as websocket:
|
||||||
|
print("Connected to socktop_agent")
|
||||||
|
|
||||||
|
# Request initial metrics
|
||||||
|
await websocket.send(json.dumps({"type": "metrics"}))
|
||||||
|
|
||||||
|
# Set up regular polling
|
||||||
|
while True:
|
||||||
|
# Request metrics
|
||||||
|
await websocket.send(json.dumps({"type": "metrics"}))
|
||||||
|
|
||||||
|
# Receive and process response
|
||||||
|
response = await websocket.recv()
|
||||||
|
|
||||||
|
# Check if response is JSON or binary (protobuf)
|
||||||
|
try:
|
||||||
|
data = json.loads(response)
|
||||||
|
print(f"CPU: {data['cpu_total']}%, Memory: {data['mem_used']/data['mem_total']*100:.1f}%")
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
print(f"Received binary data, length: {len(response)}")
|
||||||
|
# Process binary protobuf data with a library like protobuf
|
||||||
|
|
||||||
|
# Wait before next poll
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
asyncio.run(monitor_system())
|
||||||
|
```
|
||||||
|
|
||||||
|
### Notes for Integration
|
||||||
|
|
||||||
|
1. **Error Handling**: The WebSocket connection may close unexpectedly; implement reconnection logic in your client.
|
||||||
|
|
||||||
|
2. **Rate Limiting**: Avoid excessive polling that could impact the system being monitored. Recommended intervals:
|
||||||
|
- Metrics: 500ms or slower
|
||||||
|
- Processes: 2000ms or slower
|
||||||
|
- Disks: 5000ms or slower
|
||||||
|
|
||||||
|
3. **Authentication**: If the agent is configured with a token, always include it in the WebSocket URL.
|
||||||
|
|
||||||
|
4. **Protocol Buffers Handling**: For processing the binary process list data, use a Protocol Buffers library for your language and the schema provided in the `proto/processes.proto` file.
|
||||||
|
|
||||||
|
5. **Compression**: Process lists may be gzip-compressed. Check if the response starts with the gzip magic bytes (`0x1f, 0x8b`) and decompress if necessary.
|
||||||
|
|
||||||
|
## LLM Integration Guide
|
||||||
|
|
||||||
|
If you're using an LLM to generate code for integrating with socktop_agent, this section provides structured information to help the model understand the API better.
|
||||||
|
|
||||||
|
### API Schema
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# WebSocket API Schema for socktop_agent
|
||||||
|
endpoint: ws://HOST:PORT/ws or wss://HOST:PORT/ws (with TLS)
|
||||||
|
authentication:
|
||||||
|
type: query parameter
|
||||||
|
parameter: token
|
||||||
|
example: ws://HOST:PORT/ws?token=YOUR_TOKEN
|
||||||
|
|
||||||
|
requests:
|
||||||
|
- type: metrics
|
||||||
|
format: JSON
|
||||||
|
example: {"type": "metrics"}
|
||||||
|
description: Fast-changing metrics (CPU, memory, network)
|
||||||
|
|
||||||
|
- type: disks
|
||||||
|
format: JSON
|
||||||
|
example: {"type": "disks"}
|
||||||
|
description: Disk information
|
||||||
|
|
||||||
|
- type: processes
|
||||||
|
format: JSON
|
||||||
|
example: {"type": "processes"}
|
||||||
|
description: Process list (returns protobuf)
|
||||||
|
|
||||||
|
responses:
|
||||||
|
- request_type: metrics
|
||||||
|
format: JSON
|
||||||
|
schema:
|
||||||
|
cpu_total: float # percentage of total CPU usage
|
||||||
|
cpu_per_core: [float] # array of per-core CPU usage percentages
|
||||||
|
mem_total: uint64 # total memory in bytes
|
||||||
|
mem_used: uint64 # used memory in bytes
|
||||||
|
swap_total: uint64 # total swap in bytes
|
||||||
|
swap_used: uint64 # used swap in bytes
|
||||||
|
hostname: string # system hostname
|
||||||
|
cpu_temp_c: float? # CPU temperature in Celsius (optional)
|
||||||
|
networks: [
|
||||||
|
{
|
||||||
|
name: string # network interface name
|
||||||
|
received: uint64 # total bytes received
|
||||||
|
transmitted: uint64 # total bytes transmitted
|
||||||
|
}
|
||||||
|
]
|
||||||
|
gpus: [
|
||||||
|
{
|
||||||
|
name: string # GPU device name
|
||||||
|
usage: float # GPU usage percentage
|
||||||
|
memory_total: uint64 # total GPU memory in bytes
|
||||||
|
memory_used: uint64 # used GPU memory in bytes
|
||||||
|
temp_c: float # GPU temperature in Celsius
|
||||||
|
}
|
||||||
|
]?
|
||||||
|
|
||||||
|
- request_type: disks
|
||||||
|
format: JSON
|
||||||
|
schema:
|
||||||
|
[
|
||||||
|
{
|
||||||
|
name: string # disk name
|
||||||
|
total: uint64 # total space in bytes
|
||||||
|
available: uint64 # available space in bytes
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
- request_type: processes
|
||||||
|
format: Protocol Buffers (optionally gzip-compressed)
|
||||||
|
schema: See protobuf definition below
|
||||||
|
```
|
||||||
|
|
||||||
|
### Protobuf Schema (processes.proto)
|
||||||
|
|
||||||
|
```protobuf
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
message Process {
|
||||||
|
uint32 pid = 1;
|
||||||
|
string name = 2;
|
||||||
|
float cpu_usage = 3;
|
||||||
|
uint64 mem_bytes = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProcessList {
|
||||||
|
uint32 process_count = 1;
|
||||||
|
repeated Process processes = 2;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step-by-Step Integration Pseudocode
|
||||||
|
|
||||||
|
```
|
||||||
|
1. Establish WebSocket connection to ws://HOST:PORT/ws
|
||||||
|
- Add token if required: ws://HOST:PORT/ws?token=YOUR_TOKEN
|
||||||
|
|
||||||
|
2. For regular metrics updates:
|
||||||
|
- Send: {"type": "metrics"}
|
||||||
|
- Parse JSON response
|
||||||
|
- Extract CPU, memory, network info
|
||||||
|
|
||||||
|
3. For disk information:
|
||||||
|
- Send: {"type": "disks"}
|
||||||
|
- Parse JSON response
|
||||||
|
- Extract disk usage data
|
||||||
|
|
||||||
|
4. For process list:
|
||||||
|
- Send: {"type": "processes"}
|
||||||
|
- Check if response is binary
|
||||||
|
- If starts with 0x1f, 0x8b bytes:
|
||||||
|
- Decompress using gzip
|
||||||
|
- Parse binary data using protobuf schema
|
||||||
|
- Extract process information
|
||||||
|
|
||||||
|
5. Implement reconnection logic:
|
||||||
|
- On connection close/error
|
||||||
|
- Use exponential backoff
|
||||||
|
|
||||||
|
6. Respect rate limits:
|
||||||
|
- metrics: ≥ 500ms interval
|
||||||
|
- disks: ≥ 5000ms interval
|
||||||
|
- processes: ≥ 2000ms interval
|
||||||
|
```
|
||||||
|
|
||||||
|
### Common Implementation Patterns
|
||||||
|
|
||||||
|
**Pattern 1: Periodic Polling**
|
||||||
|
```javascript
|
||||||
|
// Set up separate timers for different metric types
|
||||||
|
const metricsInterval = setInterval(() => ws.send(JSON.stringify({type: 'metrics'})), 500);
|
||||||
|
const disksInterval = setInterval(() => ws.send(JSON.stringify({type: 'disks'})), 5000);
|
||||||
|
const processesInterval = setInterval(() => ws.send(JSON.stringify({type: 'processes'})), 2000);
|
||||||
|
|
||||||
|
// Clean up on disconnect
|
||||||
|
ws.on('close', () => {
|
||||||
|
clearInterval(metricsInterval);
|
||||||
|
clearInterval(disksInterval);
|
||||||
|
clearInterval(processesInterval);
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Pattern 2: Processing Binary Protobuf Data**
|
||||||
|
```javascript
|
||||||
|
// Using protobufjs
|
||||||
|
const root = protobuf.loadSync('processes.proto');
|
||||||
|
const ProcessList = root.lookupType('ProcessList');
|
||||||
|
|
||||||
|
ws.on('message', function(data) {
|
||||||
|
if (typeof data !== 'string') {
|
||||||
|
// Check for gzip compression
|
||||||
|
if (data[0] === 0x1f && data[1] === 0x8b) {
|
||||||
|
data = gunzipSync(data); // Use appropriate decompression library
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode protobuf
|
||||||
|
const processes = ProcessList.decode(new Uint8Array(data));
|
||||||
|
console.log(`Total processes: ${processes.process_count}`);
|
||||||
|
processes.processes.forEach(p => {
|
||||||
|
console.log(`PID: ${p.pid}, Name: ${p.name}, CPU: ${p.cpu_usage}%`);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
```
|
||||||
|
|
||||||
|
**Pattern 3: Reconnection Logic**
|
||||||
|
```javascript
|
||||||
|
function connect() {
|
||||||
|
const ws = new WebSocket('ws://localhost:3000/ws');
|
||||||
|
|
||||||
|
ws.on('open', () => {
|
||||||
|
console.log('Connected');
|
||||||
|
// Start polling
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('close', () => {
|
||||||
|
console.log('Connection lost, reconnecting...');
|
||||||
|
setTimeout(connect, 1000); // Reconnect after 1 second
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle other events...
|
||||||
|
}
|
||||||
|
|
||||||
|
connect();
|
||||||
|
```
|
||||||
@ -1,13 +1,13 @@
|
|||||||
fn main() {
|
fn main() {
|
||||||
// Vendored protoc for reproducible builds
|
// Vendored protoc for reproducible builds
|
||||||
let protoc = protoc_bin_vendored::protoc_bin_path().expect("protoc");
|
let protoc = protoc_bin_vendored::protoc_bin_path().expect("protoc");
|
||||||
std::env::set_var("PROTOC", &protoc);
|
|
||||||
|
|
||||||
println!("cargo:rerun-if-changed=proto/processes.proto");
|
println!("cargo:rerun-if-changed=proto/processes.proto");
|
||||||
|
|
||||||
// Compile protobuf definitions for processes
|
// Compile protobuf definitions for processes
|
||||||
let mut cfg = prost_build::Config::new();
|
let mut cfg = prost_build::Config::new();
|
||||||
cfg.out_dir(std::env::var("OUT_DIR").unwrap());
|
cfg.out_dir(std::env::var("OUT_DIR").unwrap());
|
||||||
|
cfg.protoc_executable(protoc); // Use the vendored protoc directly
|
||||||
// Use local path (ensures file is inside published crate tarball)
|
// Use local path (ensures file is inside published crate tarball)
|
||||||
cfg.compile_protos(&["proto/processes.proto"], &["proto"]) // relative to CARGO_MANIFEST_DIR
|
cfg.compile_protos(&["proto/processes.proto"], &["proto"]) // relative to CARGO_MANIFEST_DIR
|
||||||
.expect("compile protos");
|
.expect("compile protos");
|
||||||
|
|||||||
95
socktop_agent/src/cache.rs
Normal file
95
socktop_agent/src/cache.rs
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
//! Caching for process metrics and journal entries
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
|
use crate::types::{ProcessMetricsResponse, JournalResponse};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct CacheEntry<T> {
|
||||||
|
data: T,
|
||||||
|
cached_at: Instant,
|
||||||
|
ttl: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> CacheEntry<T> {
|
||||||
|
fn is_expired(&self) -> bool {
|
||||||
|
self.cached_at.elapsed() > self.ttl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ProcessCache {
|
||||||
|
process_metrics: RwLock<HashMap<u32, CacheEntry<ProcessMetricsResponse>>>,
|
||||||
|
journal_entries: RwLock<HashMap<u32, CacheEntry<JournalResponse>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProcessCache {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
process_metrics: RwLock::new(HashMap::new()),
|
||||||
|
journal_entries: RwLock::new(HashMap::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get cached process metrics if available and not expired (250ms TTL)
|
||||||
|
pub async fn get_process_metrics(&self, pid: u32) -> Option<ProcessMetricsResponse> {
|
||||||
|
let cache = self.process_metrics.read().await;
|
||||||
|
if let Some(entry) = cache.get(&pid) {
|
||||||
|
if !entry.is_expired() {
|
||||||
|
return Some(entry.data.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cache process metrics with 250ms TTL
|
||||||
|
pub async fn set_process_metrics(&self, pid: u32, data: ProcessMetricsResponse) {
|
||||||
|
let mut cache = self.process_metrics.write().await;
|
||||||
|
cache.insert(pid, CacheEntry {
|
||||||
|
data,
|
||||||
|
cached_at: Instant::now(),
|
||||||
|
ttl: Duration::from_millis(250),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get cached journal entries if available and not expired (1s TTL)
|
||||||
|
pub async fn get_journal_entries(&self, pid: u32) -> Option<JournalResponse> {
|
||||||
|
let cache = self.journal_entries.read().await;
|
||||||
|
if let Some(entry) = cache.get(&pid) {
|
||||||
|
if !entry.is_expired() {
|
||||||
|
return Some(entry.data.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cache journal entries with 1s TTL
|
||||||
|
pub async fn set_journal_entries(&self, pid: u32, data: JournalResponse) {
|
||||||
|
let mut cache = self.journal_entries.write().await;
|
||||||
|
cache.insert(pid, CacheEntry {
|
||||||
|
data,
|
||||||
|
cached_at: Instant::now(),
|
||||||
|
ttl: Duration::from_secs(1),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clean up expired entries periodically
|
||||||
|
pub async fn cleanup_expired(&self) {
|
||||||
|
{
|
||||||
|
let mut cache = self.process_metrics.write().await;
|
||||||
|
cache.retain(|_, entry| !entry.is_expired());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut cache = self.journal_entries.write().await;
|
||||||
|
cache.retain(|_, entry| !entry.is_expired());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ProcessCache {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
17
socktop_agent/src/lib.rs
Normal file
17
socktop_agent/src/lib.rs
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
//! Library interface for socktop_agent functionality
|
||||||
|
//! This allows testing of agent functions.
|
||||||
|
|
||||||
|
pub mod gpu;
|
||||||
|
pub mod metrics;
|
||||||
|
pub mod proto;
|
||||||
|
pub mod state;
|
||||||
|
pub mod tls;
|
||||||
|
pub mod types;
|
||||||
|
pub mod ws;
|
||||||
|
|
||||||
|
// Re-export commonly used types and functions for testing
|
||||||
|
pub use metrics::{collect_journal_entries, collect_process_metrics};
|
||||||
|
pub use state::{AppState, CacheEntry};
|
||||||
|
pub use types::{
|
||||||
|
DetailedProcessInfo, JournalEntry, JournalResponse, LogLevel, ProcessMetricsResponse,
|
||||||
|
};
|
||||||
@ -8,7 +8,7 @@ mod state;
|
|||||||
mod types;
|
mod types;
|
||||||
mod ws;
|
mod ws;
|
||||||
|
|
||||||
use axum::{http::StatusCode, routing::get, Router};
|
use axum::{Router, http::StatusCode, routing::get};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
@ -29,12 +29,53 @@ fn arg_value(name: &str) -> Option<String> {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
// (tests moved to end of file to satisfy clippy::items_after_test_module)
|
fn main() -> anyhow::Result<()> {
|
||||||
|
// Install rustls crypto provider before any TLS operations
|
||||||
|
// This is required when using axum-server's tls-rustls feature
|
||||||
|
rustls::crypto::aws_lc_rs::default_provider()
|
||||||
|
.install_default()
|
||||||
|
.ok(); // Ignore error if already installed
|
||||||
|
|
||||||
#[tokio::main]
|
#[cfg(feature = "logging")]
|
||||||
async fn main() -> anyhow::Result<()> {
|
|
||||||
tracing_subscriber::fmt::init();
|
tracing_subscriber::fmt::init();
|
||||||
|
|
||||||
|
// Configure Tokio runtime with optimized thread pool for reduced overhead.
|
||||||
|
//
|
||||||
|
// The agent is primarily I/O-bound (WebSocket, /proc file reads, sysinfo)
|
||||||
|
// with no CPU-intensive or blocking operations, so a smaller thread pool
|
||||||
|
// is beneficial:
|
||||||
|
//
|
||||||
|
// Benefits:
|
||||||
|
// - Lower memory footprint (~1-2MB per thread saved)
|
||||||
|
// - Reduced context switching overhead
|
||||||
|
// - Fewer idle threads consuming resources
|
||||||
|
// - Better for resource-constrained systems
|
||||||
|
//
|
||||||
|
// Trade-offs:
|
||||||
|
// - Slightly reduced throughput under very high concurrent connections
|
||||||
|
// - Could introduce latency if blocking operations are added (don't do this!)
|
||||||
|
//
|
||||||
|
// Default: 2 threads (sufficient for typical workloads with 1-10 clients)
|
||||||
|
// Override: Set SOCKTOP_WORKER_THREADS=4 to use more threads if needed
|
||||||
|
//
|
||||||
|
// Note: Default Tokio uses num_cpus threads which is excessive for this workload.
|
||||||
|
|
||||||
|
let worker_threads = std::env::var("SOCKTOP_WORKER_THREADS")
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| s.parse::<usize>().ok())
|
||||||
|
.unwrap_or(2)
|
||||||
|
.clamp(1, 16); // Ensure 1-16 threads
|
||||||
|
|
||||||
|
let runtime = tokio::runtime::Builder::new_multi_thread()
|
||||||
|
.worker_threads(worker_threads)
|
||||||
|
.thread_name("socktop-agent")
|
||||||
|
.enable_all()
|
||||||
|
.build()?;
|
||||||
|
|
||||||
|
runtime.block_on(async_main())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn async_main() -> anyhow::Result<()> {
|
||||||
// Version flag (print and exit). Keep before heavy initialization.
|
// Version flag (print and exit). Keep before heavy initialization.
|
||||||
if arg_flag("--version") || arg_flag("-V") {
|
if arg_flag("--version") || arg_flag("-V") {
|
||||||
println!("socktop_agent {}", env!("CARGO_PKG_VERSION"));
|
println!("socktop_agent {}", env!("CARGO_PKG_VERSION"));
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,8 @@
|
|||||||
//! Shared agent state: sysinfo handles and hot JSON cache.
|
//! Shared agent state: sysinfo handles and hot JSON cache.
|
||||||
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize};
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicUsize};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use sysinfo::{Components, Disks, Networks, System};
|
use sysinfo::{Components, Disks, Networks, System};
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::Mutex;
|
||||||
@ -20,6 +19,22 @@ pub struct ProcCpuTracker {
|
|||||||
pub last_per_pid: HashMap<u32, u64>,
|
pub last_per_pid: HashMap<u32, u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(not(target_os = "linux"))]
|
||||||
|
pub struct ProcessCache {
|
||||||
|
pub names: HashMap<u32, String>,
|
||||||
|
pub reusable_vec: Vec<crate::types::ProcessInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(target_os = "linux"))]
|
||||||
|
impl Default for ProcessCache {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
names: HashMap::with_capacity(1000), // Pre-allocate for typical modern system process count
|
||||||
|
reusable_vec: Vec::with_capacity(1000),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AppState {
|
pub struct AppState {
|
||||||
pub sys: SharedSystem,
|
pub sys: SharedSystem,
|
||||||
@ -32,6 +47,10 @@ pub struct AppState {
|
|||||||
#[cfg(target_os = "linux")]
|
#[cfg(target_os = "linux")]
|
||||||
pub proc_cpu: Arc<Mutex<ProcCpuTracker>>,
|
pub proc_cpu: Arc<Mutex<ProcCpuTracker>>,
|
||||||
|
|
||||||
|
// Process name caching and vector reuse for non-Linux to reduce allocations
|
||||||
|
#[cfg(not(target_os = "linux"))]
|
||||||
|
pub proc_cache: Arc<Mutex<ProcessCache>>,
|
||||||
|
|
||||||
// Connection tracking (to allow future idle sleeps if desired)
|
// Connection tracking (to allow future idle sleeps if desired)
|
||||||
pub client_count: Arc<AtomicUsize>,
|
pub client_count: Arc<AtomicUsize>,
|
||||||
|
|
||||||
@ -44,6 +63,11 @@ pub struct AppState {
|
|||||||
pub cache_metrics: Arc<Mutex<CacheEntry<crate::types::Metrics>>>,
|
pub cache_metrics: Arc<Mutex<CacheEntry<crate::types::Metrics>>>,
|
||||||
pub cache_disks: Arc<Mutex<CacheEntry<Vec<crate::types::DiskInfo>>>>,
|
pub cache_disks: Arc<Mutex<CacheEntry<Vec<crate::types::DiskInfo>>>>,
|
||||||
pub cache_processes: Arc<Mutex<CacheEntry<crate::types::ProcessesPayload>>>,
|
pub cache_processes: Arc<Mutex<CacheEntry<crate::types::ProcessesPayload>>>,
|
||||||
|
|
||||||
|
// Process detail caches (per-PID)
|
||||||
|
pub cache_process_metrics:
|
||||||
|
Arc<Mutex<HashMap<u32, CacheEntry<crate::types::ProcessMetricsResponse>>>>,
|
||||||
|
pub cache_journal_entries: Arc<Mutex<HashMap<u32, CacheEntry<crate::types::JournalResponse>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@ -52,6 +76,12 @@ pub struct CacheEntry<T> {
|
|||||||
pub value: Option<T>,
|
pub value: Option<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T> Default for CacheEntry<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T> CacheEntry<T> {
|
impl<T> CacheEntry<T> {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -66,11 +96,14 @@ impl<T> CacheEntry<T> {
|
|||||||
self.value = Some(v);
|
self.value = Some(v);
|
||||||
self.at = Some(Instant::now());
|
self.at = Some(Instant::now());
|
||||||
}
|
}
|
||||||
pub fn take_clone(&self) -> Option<T>
|
pub fn get(&self) -> Option<&T> {
|
||||||
where
|
self.value.as_ref()
|
||||||
T: Clone,
|
}
|
||||||
{
|
}
|
||||||
self.value.clone()
|
|
||||||
|
impl Default for AppState {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,6 +122,8 @@ impl AppState {
|
|||||||
hostname: System::host_name().unwrap_or_else(|| "unknown".into()),
|
hostname: System::host_name().unwrap_or_else(|| "unknown".into()),
|
||||||
#[cfg(target_os = "linux")]
|
#[cfg(target_os = "linux")]
|
||||||
proc_cpu: Arc::new(Mutex::new(ProcCpuTracker::default())),
|
proc_cpu: Arc::new(Mutex::new(ProcCpuTracker::default())),
|
||||||
|
#[cfg(not(target_os = "linux"))]
|
||||||
|
proc_cache: Arc::new(Mutex::new(ProcessCache::default())),
|
||||||
client_count: Arc::new(AtomicUsize::new(0)),
|
client_count: Arc::new(AtomicUsize::new(0)),
|
||||||
auth_token: std::env::var("SOCKTOP_TOKEN")
|
auth_token: std::env::var("SOCKTOP_TOKEN")
|
||||||
.ok()
|
.ok()
|
||||||
@ -98,6 +133,8 @@ impl AppState {
|
|||||||
cache_metrics: Arc::new(Mutex::new(CacheEntry::new())),
|
cache_metrics: Arc::new(Mutex::new(CacheEntry::new())),
|
||||||
cache_disks: Arc::new(Mutex::new(CacheEntry::new())),
|
cache_disks: Arc::new(Mutex::new(CacheEntry::new())),
|
||||||
cache_processes: Arc::new(Mutex::new(CacheEntry::new())),
|
cache_processes: Arc::new(Mutex::new(CacheEntry::new())),
|
||||||
|
cache_process_metrics: Arc::new(Mutex::new(HashMap::new())),
|
||||||
|
cache_journal_entries: Arc::new(Mutex::new(HashMap::new())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -9,6 +9,8 @@ pub struct DiskInfo {
|
|||||||
pub name: String,
|
pub name: String,
|
||||||
pub total: u64,
|
pub total: u64,
|
||||||
pub available: u64,
|
pub available: u64,
|
||||||
|
pub temperature: Option<f32>,
|
||||||
|
pub is_partition: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize)]
|
#[derive(Debug, Clone, Serialize)]
|
||||||
@ -47,3 +49,76 @@ pub struct ProcessesPayload {
|
|||||||
pub process_count: usize,
|
pub process_count: usize,
|
||||||
pub top_processes: Vec<ProcessInfo>,
|
pub top_processes: Vec<ProcessInfo>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct ThreadInfo {
|
||||||
|
pub tid: u32, // Thread ID
|
||||||
|
pub name: String, // Thread name (from /proc/{pid}/task/{tid}/comm)
|
||||||
|
pub cpu_time_user: u64, // User CPU time in microseconds
|
||||||
|
pub cpu_time_system: u64, // System CPU time in microseconds
|
||||||
|
pub status: String, // Thread status (Running, Sleeping, etc.)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct DetailedProcessInfo {
|
||||||
|
pub pid: u32,
|
||||||
|
pub name: String,
|
||||||
|
pub command: String,
|
||||||
|
pub cpu_usage: f32,
|
||||||
|
pub mem_bytes: u64,
|
||||||
|
pub virtual_mem_bytes: u64,
|
||||||
|
pub shared_mem_bytes: Option<u64>,
|
||||||
|
pub thread_count: u32,
|
||||||
|
pub fd_count: Option<u32>,
|
||||||
|
pub status: String,
|
||||||
|
pub parent_pid: Option<u32>,
|
||||||
|
pub user_id: u32,
|
||||||
|
pub group_id: u32,
|
||||||
|
pub start_time: u64, // Unix timestamp
|
||||||
|
pub cpu_time_user: u64, // Microseconds
|
||||||
|
pub cpu_time_system: u64, // Microseconds
|
||||||
|
pub read_bytes: Option<u64>,
|
||||||
|
pub write_bytes: Option<u64>,
|
||||||
|
pub working_directory: Option<String>,
|
||||||
|
pub executable_path: Option<String>,
|
||||||
|
pub child_processes: Vec<DetailedProcessInfo>,
|
||||||
|
pub threads: Vec<ThreadInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct ProcessMetricsResponse {
|
||||||
|
pub process: DetailedProcessInfo,
|
||||||
|
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct JournalEntry {
|
||||||
|
pub timestamp: String, // ISO 8601 formatted timestamp
|
||||||
|
pub priority: LogLevel,
|
||||||
|
pub message: String,
|
||||||
|
pub unit: Option<String>, // systemd unit name
|
||||||
|
pub pid: Option<u32>,
|
||||||
|
pub comm: Option<String>, // process command name
|
||||||
|
pub uid: Option<u32>,
|
||||||
|
pub gid: Option<u32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub enum LogLevel {
|
||||||
|
Emergency = 0,
|
||||||
|
Alert = 1,
|
||||||
|
Critical = 2,
|
||||||
|
Error = 3,
|
||||||
|
Warning = 4,
|
||||||
|
Notice = 5,
|
||||||
|
Info = 6,
|
||||||
|
Debug = 7,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct JournalResponse {
|
||||||
|
pub entries: Vec<JournalEntry>,
|
||||||
|
pub total_count: u32,
|
||||||
|
pub truncated: bool,
|
||||||
|
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||||
|
}
|
||||||
|
|||||||
@ -5,28 +5,50 @@ use axum::{
|
|||||||
extract::{Query, State, WebSocketUpgrade},
|
extract::{Query, State, WebSocketUpgrade},
|
||||||
response::Response,
|
response::Response,
|
||||||
};
|
};
|
||||||
use flate2::{write::GzEncoder, Compression};
|
use flate2::{Compression, write::GzEncoder};
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
|
use once_cell::sync::OnceCell;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
use tokio::sync::Mutex;
|
||||||
|
|
||||||
use crate::metrics::{collect_disks, collect_fast_metrics, collect_processes_all};
|
use crate::metrics::{collect_disks, collect_fast_metrics, collect_processes_all};
|
||||||
use crate::proto::pb;
|
use crate::proto::pb;
|
||||||
use crate::state::AppState;
|
use crate::state::AppState;
|
||||||
|
|
||||||
|
// Compression threshold based on typical payload size
|
||||||
|
// Temporarily increased for testing - revert to 768 for production
|
||||||
|
//const COMPRESSION_THRESHOLD: usize = 50_000;
|
||||||
|
const COMPRESSION_THRESHOLD: usize = 768;
|
||||||
|
|
||||||
|
// Reusable buffer for compression to avoid allocations
|
||||||
|
struct CompressionCache {
|
||||||
|
processes_vec: Vec<pb::Process>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CompressionCache {
|
||||||
|
fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
processes_vec: Vec::with_capacity(512), // Typical process count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static COMPRESSION_CACHE: OnceCell<Mutex<CompressionCache>> = OnceCell::new();
|
||||||
|
|
||||||
pub async fn ws_handler(
|
pub async fn ws_handler(
|
||||||
ws: WebSocketUpgrade,
|
ws: WebSocketUpgrade,
|
||||||
State(state): State<AppState>,
|
State(state): State<AppState>,
|
||||||
Query(q): Query<HashMap<String, String>>,
|
Query(q): Query<HashMap<String, String>>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
// optional auth
|
// optional auth
|
||||||
if let Some(expected) = state.auth_token.as_ref() {
|
if let Some(expected) = state.auth_token.as_ref()
|
||||||
if q.get("token") != Some(expected) {
|
&& q.get("token") != Some(expected)
|
||||||
|
{
|
||||||
return ws.on_upgrade(|socket| async move {
|
return ws.on_upgrade(|socket| async move {
|
||||||
let _ = socket.close().await;
|
let _ = socket.close().await;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
ws.on_upgrade(move |socket| handle_socket(socket, state))
|
ws.on_upgrade(move |socket| handle_socket(socket, state))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,39 +68,135 @@ async fn handle_socket(mut socket: WebSocket, state: AppState) {
|
|||||||
}
|
}
|
||||||
Message::Text(ref text) if text == "get_processes" => {
|
Message::Text(ref text) if text == "get_processes" => {
|
||||||
let payload = collect_processes_all(&state).await;
|
let payload = collect_processes_all(&state).await;
|
||||||
|
|
||||||
// Map to protobuf message
|
// Map to protobuf message
|
||||||
let rows: Vec<pb::Process> = payload
|
// Get cached buffers
|
||||||
.top_processes
|
let cache = COMPRESSION_CACHE.get_or_init(|| Mutex::new(CompressionCache::new()));
|
||||||
.into_iter()
|
let mut cache = cache.lock().await;
|
||||||
.map(|p| pb::Process {
|
|
||||||
|
// Reuse process vector to build the list
|
||||||
|
cache.processes_vec.clear();
|
||||||
|
cache
|
||||||
|
.processes_vec
|
||||||
|
.extend(payload.top_processes.into_iter().map(|p| pb::Process {
|
||||||
pid: p.pid,
|
pid: p.pid,
|
||||||
name: p.name,
|
name: p.name,
|
||||||
cpu_usage: p.cpu_usage,
|
cpu_usage: p.cpu_usage,
|
||||||
mem_bytes: p.mem_bytes,
|
mem_bytes: p.mem_bytes,
|
||||||
})
|
}));
|
||||||
.collect();
|
|
||||||
let pb = pb::Processes {
|
let pb = pb::Processes {
|
||||||
process_count: payload.process_count as u64,
|
process_count: payload.process_count as u64,
|
||||||
rows,
|
rows: std::mem::take(&mut cache.processes_vec),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut buf = Vec::with_capacity(8 * 1024);
|
let mut buf = Vec::with_capacity(8 * 1024);
|
||||||
if prost::Message::encode(&pb, &mut buf).is_err() {
|
if prost::Message::encode(&pb, &mut buf).is_err() {
|
||||||
let _ = socket.send(Message::Close(None)).await;
|
let _ = socket.send(Message::Close(None)).await;
|
||||||
} else {
|
} else {
|
||||||
// compress if large
|
// compress if large
|
||||||
if buf.len() <= 768 {
|
if buf.len() <= COMPRESSION_THRESHOLD {
|
||||||
let _ = socket.send(Message::Binary(buf)).await;
|
let _ = socket.send(Message::Binary(buf)).await;
|
||||||
} else {
|
} else {
|
||||||
let mut enc = GzEncoder::new(Vec::new(), Compression::fast());
|
// Create a new encoder for each message to ensure proper gzip headers
|
||||||
if enc.write_all(&buf).is_ok() {
|
let mut encoder =
|
||||||
let bin = enc.finish().unwrap_or(buf);
|
GzEncoder::new(Vec::with_capacity(buf.len()), Compression::fast());
|
||||||
let _ = socket.send(Message::Binary(bin)).await;
|
match encoder.write_all(&buf).and_then(|_| encoder.finish()) {
|
||||||
} else {
|
Ok(compressed) => {
|
||||||
|
let _ = socket.send(Message::Binary(compressed)).await;
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
let _ = socket.send(Message::Binary(buf)).await;
|
let _ = socket.send(Message::Binary(buf)).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
drop(cache); // Explicit drop to release mutex early
|
||||||
|
}
|
||||||
|
Message::Text(ref text) if text.starts_with("get_process_metrics:") => {
|
||||||
|
if let Some(pid_str) = text.strip_prefix("get_process_metrics:")
|
||||||
|
&& let Ok(pid) = pid_str.parse::<u32>()
|
||||||
|
{
|
||||||
|
let ttl = std::time::Duration::from_millis(250); // 250ms TTL
|
||||||
|
|
||||||
|
// Check cache first
|
||||||
|
{
|
||||||
|
let cache = state.cache_process_metrics.lock().await;
|
||||||
|
if let Some(entry) = cache.get(&pid)
|
||||||
|
&& entry.is_fresh(ttl)
|
||||||
|
&& let Some(cached_response) = entry.get()
|
||||||
|
{
|
||||||
|
let _ = send_json(&mut socket, cached_response).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect fresh data
|
||||||
|
match crate::metrics::collect_process_metrics(pid, &state).await {
|
||||||
|
Ok(response) => {
|
||||||
|
// Cache the response
|
||||||
|
{
|
||||||
|
let mut cache = state.cache_process_metrics.lock().await;
|
||||||
|
cache
|
||||||
|
.entry(pid)
|
||||||
|
.or_insert_with(crate::state::CacheEntry::new)
|
||||||
|
.set(response.clone());
|
||||||
|
}
|
||||||
|
let _ = send_json(&mut socket, &response).await;
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let error_response = serde_json::json!({
|
||||||
|
"error": err,
|
||||||
|
"request": "get_process_metrics",
|
||||||
|
"pid": pid
|
||||||
|
});
|
||||||
|
let _ = send_json(&mut socket, &error_response).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Message::Text(ref text) if text.starts_with("get_journal_entries:") => {
|
||||||
|
if let Some(pid_str) = text.strip_prefix("get_journal_entries:")
|
||||||
|
&& let Ok(pid) = pid_str.parse::<u32>()
|
||||||
|
{
|
||||||
|
let ttl = std::time::Duration::from_secs(1); // 1s TTL
|
||||||
|
|
||||||
|
// Check cache first
|
||||||
|
{
|
||||||
|
let cache = state.cache_journal_entries.lock().await;
|
||||||
|
if let Some(entry) = cache.get(&pid)
|
||||||
|
&& entry.is_fresh(ttl)
|
||||||
|
&& let Some(cached_response) = entry.get()
|
||||||
|
{
|
||||||
|
let _ = send_json(&mut socket, cached_response).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect fresh data
|
||||||
|
match crate::metrics::collect_journal_entries(pid) {
|
||||||
|
Ok(response) => {
|
||||||
|
// Cache the response
|
||||||
|
{
|
||||||
|
let mut cache = state.cache_journal_entries.lock().await;
|
||||||
|
cache
|
||||||
|
.entry(pid)
|
||||||
|
.or_insert_with(crate::state::CacheEntry::new)
|
||||||
|
.set(response.clone());
|
||||||
|
}
|
||||||
|
let _ = send_json(&mut socket, &response).await;
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let error_response = serde_json::json!({
|
||||||
|
"error": err,
|
||||||
|
"request": "get_journal_entries",
|
||||||
|
"pid": pid
|
||||||
|
});
|
||||||
|
let _ = send_json(&mut socket, &error_response).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Message::Close(_) => break,
|
Message::Close(_) => break,
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
@ -91,7 +209,7 @@ async fn handle_socket(mut socket: WebSocket, state: AppState) {
|
|||||||
// Small, cheap gzip for larger payloads; send text for small.
|
// Small, cheap gzip for larger payloads; send text for small.
|
||||||
async fn send_json<T: serde::Serialize>(ws: &mut WebSocket, value: &T) -> Result<(), axum::Error> {
|
async fn send_json<T: serde::Serialize>(ws: &mut WebSocket, value: &T) -> Result<(), axum::Error> {
|
||||||
let json = serde_json::to_string(value).expect("serialize");
|
let json = serde_json::to_string(value).expect("serialize");
|
||||||
if json.len() <= 768 {
|
if json.len() <= COMPRESSION_THRESHOLD {
|
||||||
return ws.send(Message::Text(json)).await;
|
return ws.send(Message::Text(json)).await;
|
||||||
}
|
}
|
||||||
let mut enc = GzEncoder::new(Vec::new(), Compression::fast());
|
let mut enc = GzEncoder::new(Vec::new(), Compression::fast());
|
||||||
@ -99,3 +217,83 @@ async fn send_json<T: serde::Serialize>(ws: &mut WebSocket, value: &T) -> Result
|
|||||||
let bin = enc.finish().unwrap_or_else(|_| json.into_bytes());
|
let bin = enc.finish().unwrap_or_else(|_| json.into_bytes());
|
||||||
ws.send(Message::Binary(bin)).await
|
ws.send(Message::Binary(bin)).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use prost::Message as ProstMessage;
|
||||||
|
use sysinfo::System;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_process_list_not_empty() {
|
||||||
|
// Initialize system data first to ensure we have processes
|
||||||
|
let mut sys = System::new_all();
|
||||||
|
sys.refresh_all();
|
||||||
|
|
||||||
|
// Create state and put the refreshed system in it
|
||||||
|
let state = AppState::new();
|
||||||
|
{
|
||||||
|
let mut sys_lock = state.sys.lock().await;
|
||||||
|
*sys_lock = sys;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get processes directly using the collection function
|
||||||
|
let processes = collect_processes_all(&state).await;
|
||||||
|
|
||||||
|
// Convert to protobuf message format
|
||||||
|
let cache = COMPRESSION_CACHE.get_or_init(|| Mutex::new(CompressionCache::new()));
|
||||||
|
let mut cache = cache.lock().await;
|
||||||
|
|
||||||
|
// Reuse process vector to build the list
|
||||||
|
cache.processes_vec.clear();
|
||||||
|
cache
|
||||||
|
.processes_vec
|
||||||
|
.extend(processes.top_processes.into_iter().map(|p| pb::Process {
|
||||||
|
pid: p.pid,
|
||||||
|
name: p.name,
|
||||||
|
cpu_usage: p.cpu_usage,
|
||||||
|
mem_bytes: p.mem_bytes,
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Create the protobuf message
|
||||||
|
let pb = pb::Processes {
|
||||||
|
process_count: processes.process_count as u64,
|
||||||
|
rows: cache.processes_vec.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Test protobuf encoding/decoding
|
||||||
|
let mut buf = Vec::new();
|
||||||
|
prost::Message::encode(&pb, &mut buf).expect("Failed to encode protobuf");
|
||||||
|
let decoded = pb::Processes::decode(buf.as_slice()).expect("Failed to decode protobuf");
|
||||||
|
|
||||||
|
// Print debug info
|
||||||
|
println!("Process count: {}", pb.process_count);
|
||||||
|
println!("Process vector length: {}", pb.rows.len());
|
||||||
|
println!("Encoded size: {} bytes", buf.len());
|
||||||
|
println!("Decoded process count: {}", decoded.rows.len());
|
||||||
|
|
||||||
|
// Print first few processes if available
|
||||||
|
for (i, process) in pb.rows.iter().take(5).enumerate() {
|
||||||
|
println!(
|
||||||
|
"Process {}: {} (PID: {}) CPU: {:.1}% MEM: {} bytes",
|
||||||
|
i + 1,
|
||||||
|
process.name,
|
||||||
|
process.pid,
|
||||||
|
process.cpu_usage,
|
||||||
|
process.mem_bytes
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate
|
||||||
|
assert!(!pb.rows.is_empty(), "Process list should not be empty");
|
||||||
|
assert!(
|
||||||
|
pb.process_count > 0,
|
||||||
|
"Process count should be greater than 0"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
pb.process_count as usize,
|
||||||
|
pb.rows.len(),
|
||||||
|
"Process count mismatch with actual rows"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
132
socktop_agent/tests/cache_tests.rs
Normal file
132
socktop_agent/tests/cache_tests.rs
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
//! Tests for the process cache functionality
|
||||||
|
|
||||||
|
use socktop_agent::state::{AppState, CacheEntry};
|
||||||
|
use socktop_agent::types::{DetailedProcessInfo, JournalResponse, ProcessMetricsResponse};
|
||||||
|
use std::time::Duration;
|
||||||
|
use tokio::time::sleep;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_process_cache_ttl() {
|
||||||
|
let state = AppState::new();
|
||||||
|
let pid = 12345;
|
||||||
|
|
||||||
|
// Create mock data
|
||||||
|
let process_info = DetailedProcessInfo {
|
||||||
|
pid,
|
||||||
|
name: "test_process".to_string(),
|
||||||
|
command: "test command".to_string(),
|
||||||
|
cpu_usage: 50.0,
|
||||||
|
mem_bytes: 1024 * 1024,
|
||||||
|
virtual_mem_bytes: 2048 * 1024,
|
||||||
|
shared_mem_bytes: Some(512 * 1024),
|
||||||
|
thread_count: 4,
|
||||||
|
fd_count: Some(10),
|
||||||
|
status: "Running".to_string(),
|
||||||
|
parent_pid: Some(1),
|
||||||
|
user_id: 1000,
|
||||||
|
group_id: 1000,
|
||||||
|
start_time: 1234567890,
|
||||||
|
cpu_time_user: 100000,
|
||||||
|
cpu_time_system: 50000,
|
||||||
|
read_bytes: Some(1024),
|
||||||
|
write_bytes: Some(2048),
|
||||||
|
working_directory: Some("/tmp".to_string()),
|
||||||
|
executable_path: Some("/usr/bin/test".to_string()),
|
||||||
|
child_processes: vec![],
|
||||||
|
threads: vec![],
|
||||||
|
};
|
||||||
|
|
||||||
|
let metrics_response = ProcessMetricsResponse {
|
||||||
|
process: process_info,
|
||||||
|
cached_at: 1234567890,
|
||||||
|
};
|
||||||
|
|
||||||
|
let journal_response = JournalResponse {
|
||||||
|
entries: vec![],
|
||||||
|
total_count: 0,
|
||||||
|
truncated: false,
|
||||||
|
cached_at: 1234567890,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Test process metrics caching
|
||||||
|
{
|
||||||
|
let mut cache = state.cache_process_metrics.lock().await;
|
||||||
|
cache
|
||||||
|
.entry(pid)
|
||||||
|
.or_insert_with(CacheEntry::new)
|
||||||
|
.set(metrics_response.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should get cached value immediately
|
||||||
|
{
|
||||||
|
let cache = state.cache_process_metrics.lock().await;
|
||||||
|
let ttl = Duration::from_millis(250);
|
||||||
|
if let Some(entry) = cache.get(&pid) {
|
||||||
|
assert!(entry.is_fresh(ttl));
|
||||||
|
assert!(entry.get().is_some());
|
||||||
|
assert_eq!(entry.get().unwrap().process.pid, pid);
|
||||||
|
} else {
|
||||||
|
panic!("Expected cached entry");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!("✓ Process metrics cached and retrieved successfully");
|
||||||
|
|
||||||
|
// Test journal entries caching
|
||||||
|
{
|
||||||
|
let mut cache = state.cache_journal_entries.lock().await;
|
||||||
|
cache
|
||||||
|
.entry(pid)
|
||||||
|
.or_insert_with(CacheEntry::new)
|
||||||
|
.set(journal_response.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should get cached value immediately
|
||||||
|
{
|
||||||
|
let cache = state.cache_journal_entries.lock().await;
|
||||||
|
let ttl = Duration::from_secs(1);
|
||||||
|
if let Some(entry) = cache.get(&pid) {
|
||||||
|
assert!(entry.is_fresh(ttl));
|
||||||
|
assert!(entry.get().is_some());
|
||||||
|
assert_eq!(entry.get().unwrap().total_count, 0);
|
||||||
|
} else {
|
||||||
|
panic!("Expected cached entry");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!("✓ Journal entries cached and retrieved successfully");
|
||||||
|
|
||||||
|
// Wait for process metrics to expire (250ms + buffer)
|
||||||
|
sleep(Duration::from_millis(300)).await;
|
||||||
|
|
||||||
|
// Process metrics should be expired now
|
||||||
|
{
|
||||||
|
let cache = state.cache_process_metrics.lock().await;
|
||||||
|
let ttl = Duration::from_millis(250);
|
||||||
|
if let Some(entry) = cache.get(&pid) {
|
||||||
|
assert!(!entry.is_fresh(ttl));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!("✓ Process metrics correctly expired after TTL");
|
||||||
|
|
||||||
|
// Journal entries should still be valid (1s TTL)
|
||||||
|
{
|
||||||
|
let cache = state.cache_journal_entries.lock().await;
|
||||||
|
let ttl = Duration::from_secs(1);
|
||||||
|
if let Some(entry) = cache.get(&pid) {
|
||||||
|
assert!(entry.is_fresh(ttl));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!("✓ Journal entries still valid within TTL");
|
||||||
|
|
||||||
|
// Wait for journal entries to expire (additional 800ms to reach 1s total)
|
||||||
|
sleep(Duration::from_millis(800)).await;
|
||||||
|
|
||||||
|
// Journal entries should be expired now
|
||||||
|
{
|
||||||
|
let cache = state.cache_journal_entries.lock().await;
|
||||||
|
let ttl = Duration::from_secs(1);
|
||||||
|
if let Some(entry) = cache.get(&pid) {
|
||||||
|
assert!(!entry.is_fresh(ttl));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!("✓ Journal entries correctly expired after TTL");
|
||||||
|
}
|
||||||
89
socktop_agent/tests/process_details.rs
Normal file
89
socktop_agent/tests/process_details.rs
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
//! Tests for process detail collection functionality
|
||||||
|
|
||||||
|
use socktop_agent::metrics::{collect_journal_entries, collect_process_metrics};
|
||||||
|
use socktop_agent::state::AppState;
|
||||||
|
use std::process;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_collect_process_metrics_self() {
|
||||||
|
// Test collecting metrics for our own process
|
||||||
|
let pid = process::id();
|
||||||
|
let state = AppState::new();
|
||||||
|
|
||||||
|
match collect_process_metrics(pid, &state).await {
|
||||||
|
Ok(response) => {
|
||||||
|
assert_eq!(response.process.pid, pid);
|
||||||
|
assert!(!response.process.name.is_empty());
|
||||||
|
// Command might be empty on some systems, so don't assert on it
|
||||||
|
assert!(response.cached_at > 0);
|
||||||
|
println!(
|
||||||
|
"✓ Process metrics collected for PID {}: {} ({})",
|
||||||
|
pid, response.process.name, response.process.command
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// This might fail if sysinfo can't find the process, which is possible
|
||||||
|
println!("⚠ Warning: Failed to collect process metrics for self: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_collect_journal_entries_self() {
|
||||||
|
// Test collecting journal entries for our own process
|
||||||
|
let pid = process::id();
|
||||||
|
|
||||||
|
match collect_journal_entries(pid) {
|
||||||
|
Ok(response) => {
|
||||||
|
assert!(response.cached_at > 0);
|
||||||
|
println!(
|
||||||
|
"✓ Journal entries collected for PID {}: {} entries",
|
||||||
|
pid, response.total_count
|
||||||
|
);
|
||||||
|
if !response.entries.is_empty() {
|
||||||
|
let entry = &response.entries[0];
|
||||||
|
println!(" Latest entry: {}", entry.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// This might fail if journalctl is not available or restricted
|
||||||
|
println!("⚠ Warning: Failed to collect journal entries for self: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_collect_process_metrics_invalid_pid() {
|
||||||
|
// Test with an invalid PID
|
||||||
|
let invalid_pid = 999999;
|
||||||
|
let state = AppState::new();
|
||||||
|
|
||||||
|
match collect_process_metrics(invalid_pid, &state).await {
|
||||||
|
Ok(_) => {
|
||||||
|
println!("⚠ Warning: Unexpectedly found process for invalid PID {invalid_pid}");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("✓ Correctly failed for invalid PID {invalid_pid}: {e}");
|
||||||
|
assert!(e.contains("not found"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_collect_journal_entries_invalid_pid() {
|
||||||
|
// Test with an invalid PID - journalctl might still return empty results
|
||||||
|
let invalid_pid = 999999;
|
||||||
|
|
||||||
|
match collect_journal_entries(invalid_pid) {
|
||||||
|
Ok(response) => {
|
||||||
|
println!(
|
||||||
|
"✓ Journal query completed for invalid PID {} (empty result expected): {} entries",
|
||||||
|
invalid_pid, response.total_count
|
||||||
|
);
|
||||||
|
// Should be empty or very few entries
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("✓ Journal query failed for invalid PID {invalid_pid}: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,4 +1,3 @@
|
|||||||
use assert_cmd::prelude::*;
|
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
@ -17,7 +16,7 @@ fn generates_self_signed_cert_and_key_in_xdg_path() {
|
|||||||
let xdg = tmpdir.path().to_path_buf();
|
let xdg = tmpdir.path().to_path_buf();
|
||||||
|
|
||||||
// Run the agent once with --enableSSL, short timeout so it exits quickly when killed
|
// Run the agent once with --enableSSL, short timeout so it exits quickly when killed
|
||||||
let mut cmd = Command::cargo_bin("socktop_agent").expect("binary exists");
|
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("socktop_agent"));
|
||||||
// Bind to an ephemeral port (-p 0) to avoid conflicts/flakes
|
// Bind to an ephemeral port (-p 0) to avoid conflicts/flakes
|
||||||
cmd.env("XDG_CONFIG_HOME", &xdg)
|
cmd.env("XDG_CONFIG_HOME", &xdg)
|
||||||
.arg("--enableSSL")
|
.arg("--enableSSL")
|
||||||
|
|||||||
60
socktop_connector/Cargo.toml
Normal file
60
socktop_connector/Cargo.toml
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
[package]
|
||||||
|
name = "socktop_connector"
|
||||||
|
version = "1.50.0"
|
||||||
|
edition = "2024"
|
||||||
|
license = "MIT"
|
||||||
|
description = "WebSocket connector library for socktop agent communication"
|
||||||
|
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
||||||
|
repository = "https://github.com/jasonwitty/socktop"
|
||||||
|
readme = "README.md"
|
||||||
|
keywords = ["monitoring", "websocket", "metrics", "system"]
|
||||||
|
categories = ["network-programming", "development-tools"]
|
||||||
|
documentation = "https://docs.rs/socktop_connector"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
crate-type = ["cdylib", "rlib"]
|
||||||
|
|
||||||
|
# docs.rs specific metadata
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
# WebSocket client - only for non-WASM targets
|
||||||
|
tokio-tungstenite = { workspace = true, optional = true }
|
||||||
|
tokio = { workspace = true, optional = true }
|
||||||
|
futures-util = { workspace = true, optional = true }
|
||||||
|
url = { workspace = true, optional = true }
|
||||||
|
|
||||||
|
# WASM WebSocket support
|
||||||
|
wasm-bindgen = { version = "0.2", optional = true }
|
||||||
|
wasm-bindgen-futures = { version = "0.4", optional = true }
|
||||||
|
js-sys = { version = "0.3", optional = true }
|
||||||
|
web-sys = { version = "0.3", features = ["WebSocket", "MessageEvent", "ErrorEvent", "CloseEvent", "BinaryType", "Window", "console"], optional = true }
|
||||||
|
|
||||||
|
# TLS support
|
||||||
|
rustls = { version = "0.23", features = ["ring"], optional = true }
|
||||||
|
rustls-pemfile = { version = "2.1", optional = true }
|
||||||
|
|
||||||
|
# Serialization - always available
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
|
||||||
|
# Compression - used in both networking and WASM modes
|
||||||
|
flate2 = "1.0"
|
||||||
|
|
||||||
|
# Protobuf - always available
|
||||||
|
prost = { workspace = true }
|
||||||
|
|
||||||
|
# Error handling - always available
|
||||||
|
thiserror = "2.0"
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
prost-build = "0.13"
|
||||||
|
protoc-bin-vendored = "3.0"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = ["networking", "tls"]
|
||||||
|
networking = ["tokio-tungstenite", "tokio", "futures-util", "url"]
|
||||||
|
tls = ["networking", "rustls", "rustls-pemfile"]
|
||||||
|
wasm = ["wasm-bindgen", "wasm-bindgen-futures", "js-sys", "web-sys"] # WASM-compatible networking with compression
|
||||||
21
socktop_connector/LICENSE
Normal file
21
socktop_connector/LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2025 Jason Witty
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
486
socktop_connector/README.md
Normal file
486
socktop_connector/README.md
Normal file
@ -0,0 +1,486 @@
|
|||||||
|
# socktop_connector
|
||||||
|
|
||||||
|
A WebSocket connector library for communicating with socktop agents.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
`socktop_connector` provides a high-level, type-safe interface for connecting to socktop agents over WebSocket connections. It handles connection management, TLS certificate pinning, compression, and protocol buffer decoding automatically.
|
||||||
|
|
||||||
|
The library is designed for professional use with structured error handling that allows you to pattern match on specific error types, making it easy to implement robust error recovery and monitoring strategies.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **WebSocket Communication**: Support for both `ws://` and `wss://` connections
|
||||||
|
- **TLS Security**: Certificate pinning for secure connections with self-signed certificates
|
||||||
|
- **Hostname Verification**: Configurable hostname verification for TLS connections
|
||||||
|
- **Type Safety**: Strongly typed requests and responses
|
||||||
|
- **Automatic Compression**: Handles gzip compression/decompression transparently
|
||||||
|
- **Protocol Buffer Support**: Decodes binary process data automatically
|
||||||
|
- **Error Handling**: Comprehensive error handling with structured error types for pattern matching
|
||||||
|
|
||||||
|
## Connection Types
|
||||||
|
|
||||||
|
### Non-TLS Connections (`ws://`)
|
||||||
|
Use `connect_to_socktop_agent()` for unencrypted WebSocket connections.
|
||||||
|
|
||||||
|
### TLS Connections (`wss://`)
|
||||||
|
Use `connect_to_socktop_agent_with_tls()` for encrypted connections with certificate pinning. You can control hostname verification with the `verify_hostname` parameter.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
Add this to your `Cargo.toml`:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[dependencies]
|
||||||
|
socktop_connector = "0.1.5"
|
||||||
|
tokio = { version = "1", features = ["rt", "rt-multi-thread", "net", "time", "macros"] }
|
||||||
|
```
|
||||||
|
|
||||||
|
### Basic Usage
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use socktop_connector::{connect_to_socktop_agent, AgentRequest, AgentResponse};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
// Connect to a socktop agent (non-TLS connections are always unverified)
|
||||||
|
let mut connector = connect_to_socktop_agent("ws://localhost:3000/ws").await?;
|
||||||
|
|
||||||
|
// Request metrics
|
||||||
|
match connector.request(AgentRequest::Metrics).await? {
|
||||||
|
AgentResponse::Metrics(metrics) => {
|
||||||
|
println!("CPU: {}%, Memory: {}/{}MB",
|
||||||
|
metrics.cpu_total,
|
||||||
|
metrics.mem_used / 1024 / 1024,
|
||||||
|
metrics.mem_total / 1024 / 1024
|
||||||
|
);
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request process list
|
||||||
|
match connector.request(AgentRequest::Processes).await? {
|
||||||
|
AgentResponse::Processes(processes) => {
|
||||||
|
println!("Total processes: {}", processes.process_count);
|
||||||
|
for process in processes.top_processes.iter().take(5) {
|
||||||
|
println!(" {} (PID: {}) - CPU: {}%",
|
||||||
|
process.name, process.pid, process.cpu_usage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Error Handling with Pattern Matching
|
||||||
|
|
||||||
|
Take advantage of structured error types for robust error handling:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use socktop_connector::{connect_to_socktop_agent, ConnectorError, AgentRequest};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
// Handle connection errors specifically
|
||||||
|
let mut connector = match connect_to_socktop_agent("ws://localhost:3000/ws").await {
|
||||||
|
Ok(conn) => conn,
|
||||||
|
Err(ConnectorError::WebSocketError(e)) => {
|
||||||
|
eprintln!("Failed to connect to WebSocket: {}", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Err(ConnectorError::UrlError(e)) => {
|
||||||
|
eprintln!("Invalid URL provided: {}", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Connection failed: {}", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Handle request errors specifically
|
||||||
|
match connector.request(AgentRequest::Metrics).await {
|
||||||
|
Ok(response) => println!("Success: {:?}", response),
|
||||||
|
Err(ConnectorError::JsonError(e)) => {
|
||||||
|
eprintln!("Failed to parse server response: {}", e);
|
||||||
|
}
|
||||||
|
Err(ConnectorError::WebSocketError(e)) => {
|
||||||
|
eprintln!("Communication error: {}", e);
|
||||||
|
}
|
||||||
|
Err(e) => eprintln!("Request failed: {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### TLS with Certificate Pinning
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use socktop_connector::{connect_to_socktop_agent_with_tls, AgentRequest};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
// Connect with TLS certificate pinning and hostname verification
|
||||||
|
let mut connector = connect_to_socktop_agent_with_tls(
|
||||||
|
"wss://remote-host:8443/ws",
|
||||||
|
"/path/to/cert.pem",
|
||||||
|
false // Enable hostname verification
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
let response = connector.request(AgentRequest::Disks).await?;
|
||||||
|
println!("Got disk info: {:?}", response);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Advanced Configuration
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use socktop_connector::{ConnectorConfig, SocktopConnector, AgentRequest};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
// Create a custom configuration
|
||||||
|
let config = ConnectorConfig::new("wss://remote-host:8443/ws")
|
||||||
|
.with_tls_ca("/path/to/cert.pem")
|
||||||
|
.with_hostname_verification(false);
|
||||||
|
|
||||||
|
// Create and connect
|
||||||
|
let mut connector = SocktopConnector::new(config);
|
||||||
|
connector.connect().await?;
|
||||||
|
|
||||||
|
// Make requests
|
||||||
|
let response = connector.request(AgentRequest::Metrics).await?;
|
||||||
|
|
||||||
|
// Clean disconnect
|
||||||
|
connector.disconnect().await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### WebSocket Protocol Configuration
|
||||||
|
|
||||||
|
For version compatibility (if applies), you can configure WebSocket protocol version and sub-protocols:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use socktop_connector::{ConnectorConfig, SocktopConnector, connect_to_socktop_agent_with_config};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
// Method 1: Using the convenience function
|
||||||
|
let connector = connect_to_socktop_agent_with_config(
|
||||||
|
"ws://localhost:3000/ws",
|
||||||
|
Some(vec!["socktop".to_string(), "v1".to_string()]), // Sub-protocols
|
||||||
|
Some("13".to_string()), // WebSocket version (13 is standard)
|
||||||
|
).await?;
|
||||||
|
|
||||||
|
// Method 2: Using ConnectorConfig builder
|
||||||
|
let config = ConnectorConfig::new("ws://localhost:3000/ws")
|
||||||
|
.with_protocols(vec!["socktop".to_string()])
|
||||||
|
.with_version("13");
|
||||||
|
|
||||||
|
let mut connector = SocktopConnector::new(config);
|
||||||
|
connector.connect().await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** WebSocket version 13 is the current standard and is used by default. The sub-protocols feature is useful for protocol negotiation with servers that support multiple protocols.
|
||||||
|
|
||||||
|
## Continuous Updates
|
||||||
|
|
||||||
|
The socktop agent provides real-time system metrics. Each request returns the current snapshot, but you can implement continuous monitoring by making requests in a loop:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use socktop_connector::{connect_to_socktop_agent, AgentRequest, AgentResponse, ConnectorError};
|
||||||
|
use tokio::time::{sleep, Duration};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
let mut connector = connect_to_socktop_agent("ws://localhost:3000/ws").await?;
|
||||||
|
|
||||||
|
// Monitor system metrics every 2 seconds
|
||||||
|
loop {
|
||||||
|
match connector.request(AgentRequest::Metrics).await {
|
||||||
|
Ok(AgentResponse::Metrics(metrics)) => {
|
||||||
|
// Calculate total network activity across all interfaces
|
||||||
|
let total_rx: u64 = metrics.networks.iter().map(|n| n.received).sum();
|
||||||
|
let total_tx: u64 = metrics.networks.iter().map(|n| n.transmitted).sum();
|
||||||
|
|
||||||
|
println!("CPU: {:.1}%, Memory: {:.1}%, Network: ↓{} ↑{}",
|
||||||
|
metrics.cpu_total,
|
||||||
|
(metrics.mem_used as f64 / metrics.mem_total as f64) * 100.0,
|
||||||
|
format_bytes(total_rx),
|
||||||
|
format_bytes(total_tx)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Error getting metrics: {}", e);
|
||||||
|
|
||||||
|
// You can pattern match on specific error types for different handling
|
||||||
|
match e {
|
||||||
|
socktop_connector::ConnectorError::WebSocketError(_) => {
|
||||||
|
eprintln!("Connection lost, attempting to reconnect...");
|
||||||
|
// Implement reconnection logic here
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
socktop_connector::ConnectorError::JsonError(_) => {
|
||||||
|
eprintln!("Data parsing error, continuing...");
|
||||||
|
// Continue with next iteration for transient parsing errors
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
eprintln!("Other error, stopping monitoring");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
sleep(Duration::from_secs(2)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn format_bytes(bytes: u64) -> String {
|
||||||
|
const UNITS: &[&str] = &["B", "KB", "MB", "GB"];
|
||||||
|
let mut size = bytes as f64;
|
||||||
|
let mut unit_index = 0;
|
||||||
|
|
||||||
|
while size >= 1024.0 && unit_index < UNITS.len() - 1 {
|
||||||
|
size /= 1024.0;
|
||||||
|
unit_index += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
format!("{:.1}{}", size, UNITS[unit_index])
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Understanding Data Freshness
|
||||||
|
|
||||||
|
The socktop agent implements intelligent caching to avoid overwhelming the system:
|
||||||
|
|
||||||
|
- **Metrics**: Cached for ~250ms by default (cheap / fast-changing data like CPU, memory)
|
||||||
|
- **Processes**: Cached for ~1500ms by default (exppensive / moderately changing data)
|
||||||
|
- **Disks**: Cached for ~1000ms by default (cheap / slowly changing data)
|
||||||
|
|
||||||
|
These values have been generally tuned in advance. You should not need to override them. The reason for this cache is for the use case that multiple clients are requesting data. In general a single client should never really hit a cached response since the polling rates are slower that the cache intervals. Cache intervals have been tuned based on how much work the agent has to do in the case of reloading fresh data.
|
||||||
|
|
||||||
|
|
||||||
|
This means:
|
||||||
|
|
||||||
|
1. **Multiple rapid requests** for the same data type will return cached results
|
||||||
|
2. **Different data types** have independent cache timers
|
||||||
|
3. **Fresh data** is automatically retrieved when cache expires
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use socktop_connector::{connect_to_socktop_agent, AgentRequest, AgentResponse};
|
||||||
|
use tokio::time::{sleep, Duration};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
let mut connector = connect_to_socktop_agent("ws://localhost:3000/ws").await?;
|
||||||
|
|
||||||
|
// This demonstrates cache behavior
|
||||||
|
println!("Requesting metrics twice quickly...");
|
||||||
|
|
||||||
|
// First request - fresh data from system
|
||||||
|
let start = std::time::Instant::now();
|
||||||
|
connector.request(AgentRequest::Metrics).await?;
|
||||||
|
println!("First request took: {:?}", start.elapsed());
|
||||||
|
|
||||||
|
// Second request immediately - cached data
|
||||||
|
let start = std::time::Instant::now();
|
||||||
|
connector.request(AgentRequest::Metrics).await?;
|
||||||
|
println!("Second request took: {:?}", start.elapsed()); // Much faster!
|
||||||
|
|
||||||
|
// Wait for cache to expire, then request again
|
||||||
|
sleep(Duration::from_millis(300)).await;
|
||||||
|
let start = std::time::Instant::now();
|
||||||
|
connector.request(AgentRequest::Metrics).await?;
|
||||||
|
println!("Third request (after cache expiry): {:?}", start.elapsed());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The WebSocket connection remains open between requests, providing efficient real-time monitoring without connection overhead.
|
||||||
|
|
||||||
|
## Request Types
|
||||||
|
|
||||||
|
The library supports three types of requests:
|
||||||
|
|
||||||
|
- `AgentRequest::Metrics` - Get current system metrics (CPU, memory, network, etc.)
|
||||||
|
- `AgentRequest::Disks` - Get disk usage information
|
||||||
|
- `AgentRequest::Processes` - Get running process information
|
||||||
|
|
||||||
|
## Response Types
|
||||||
|
|
||||||
|
Responses are automatically parsed into strongly-typed structures:
|
||||||
|
|
||||||
|
- `AgentResponse::Metrics(Metrics)` - System metrics with CPU, memory, network data
|
||||||
|
- `AgentResponse::Disks(Vec<DiskInfo>)` - List of disk usage information
|
||||||
|
- `AgentResponse::Processes(ProcessesPayload)` - Process list with CPU and memory usage
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
The library provides flexible configuration through the `ConnectorConfig` builder:
|
||||||
|
|
||||||
|
- `with_tls_ca(path)` - Enable TLS with certificate pinning
|
||||||
|
- `with_hostname_verification(bool)` - Control hostname verification for TLS connections
|
||||||
|
- `true` (recommended): Verify the server hostname matches the certificate
|
||||||
|
- `false`: Skip hostname verification (useful for localhost or IP-based connections)
|
||||||
|
- `with_protocols(Vec<String>)` - Set WebSocket sub-protocols for protocol negotiation
|
||||||
|
- `with_version(String)` - Set WebSocket protocol version (default is "13", the current standard)
|
||||||
|
|
||||||
|
**Note**: Hostname verification only applies to TLS connections (`wss://`). Non-TLS connections (`ws://`) don't use certificates, so hostname verification is not applicable.
|
||||||
|
|
||||||
|
## WASM Compatibility (experimental)
|
||||||
|
|
||||||
|
`socktop_connector` provides **full WebSocket support** for WebAssembly (WASM) environments, including complete networking functionality with automatic compression and protobuf decoding.
|
||||||
|
|
||||||
|
### Quick Setup
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[dependencies]
|
||||||
|
socktop_connector = { version = "0.1.5", default-features = false, features = ["wasm"] }
|
||||||
|
wasm-bindgen = "0.2"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
```
|
||||||
|
|
||||||
|
### What Works
|
||||||
|
- ✅ Full WebSocket connectivity (`ws://` connections)
|
||||||
|
- ✅ All request types (`Metrics`, `Disks`, `Processes`)
|
||||||
|
- ✅ Automatic gzip decompression for metrics and disks
|
||||||
|
- ✅ Automatic protobuf decoding for process data
|
||||||
|
- ✅ All types (`ConnectorConfig`, `AgentRequest`, `AgentResponse`)
|
||||||
|
- ✅ JSON serialization/deserialization
|
||||||
|
- ✅ Protocol and version configuration
|
||||||
|
|
||||||
|
### What Doesn't Work
|
||||||
|
- ❌ TLS connections (`wss://`) - use `ws://` only
|
||||||
|
- ❌ TLS certificate handling
|
||||||
|
|
||||||
|
### Basic WASM Usage
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use wasm_bindgen::prelude::*;
|
||||||
|
use socktop_connector::{ConnectorConfig, SocktopConnector, AgentRequest};
|
||||||
|
|
||||||
|
#[wasm_bindgen]
|
||||||
|
pub async fn test_connection() {
|
||||||
|
let config = ConnectorConfig::new("ws://localhost:3000/ws");
|
||||||
|
let mut connector = SocktopConnector::new(config);
|
||||||
|
|
||||||
|
match connector.connect().await {
|
||||||
|
Ok(()) => {
|
||||||
|
// Request metrics with automatic gzip decompression
|
||||||
|
let response = connector.request(AgentRequest::Metrics).await.unwrap();
|
||||||
|
console_log!("Got metrics: {:?}", response);
|
||||||
|
|
||||||
|
// Request processes with automatic protobuf decoding
|
||||||
|
let response = connector.request(AgentRequest::Processes).await.unwrap();
|
||||||
|
console_log!("Got processes: {:?}", response);
|
||||||
|
}
|
||||||
|
Err(e) => console_log!("Connection failed: {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Complete WASM Guide
|
||||||
|
|
||||||
|
For detailed implementation examples, complete code samples, and a working test environment, see the **[WASM Compatibility Guide](../socktop_wasm_test/README.md)** in the `socktop_wasm_test/` directory.
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
- **Production TLS**: You can enable hostname verification (`verify_hostname: true`) for production systems, This will add an additional level of production of verifying the hostname against the certificate. Generally this is to stop a man in the middle attack, but since it will be the client who is fooled and not the server, the risk and likelyhood of this use case is rather low. Which is why this is disabled by default.
|
||||||
|
- **Certificate Pinning**: Use `with_tls_ca()` for self-signed certificates, the socktop agent will generate certificates on start. see main readme for more details.
|
||||||
|
- **Non-TLS**: Use only for development or trusted networks
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
Currently no environment variables are used. All configuration is done through the API.
|
||||||
|
|
||||||
|
## Error Handling
|
||||||
|
|
||||||
|
The library uses structured error types via `thiserror` for comprehensive error handling. You can pattern match on specific error types:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use socktop_connector::{connect_to_socktop_agent, ConnectorError, AgentRequest};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() {
|
||||||
|
match connect_to_socktop_agent("invalid://url").await {
|
||||||
|
Ok(mut connector) => {
|
||||||
|
// Handle successful connection
|
||||||
|
match connector.request(AgentRequest::Metrics).await {
|
||||||
|
Ok(response) => println!("Got response: {:?}", response),
|
||||||
|
Err(ConnectorError::WebSocketError(e)) => {
|
||||||
|
eprintln!("WebSocket communication failed: {}", e);
|
||||||
|
}
|
||||||
|
Err(ConnectorError::JsonError(e)) => {
|
||||||
|
eprintln!("Failed to parse response: {}", e);
|
||||||
|
}
|
||||||
|
Err(e) => eprintln!("Other error: {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(ConnectorError::UrlError(e)) => {
|
||||||
|
eprintln!("Invalid URL: {}", e);
|
||||||
|
}
|
||||||
|
Err(ConnectorError::WebSocketError(e)) => {
|
||||||
|
eprintln!("Failed to connect: {}", e);
|
||||||
|
}
|
||||||
|
Err(ConnectorError::TlsError(msg)) => {
|
||||||
|
eprintln!("TLS error: {}", msg);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Connection failed: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Error Types
|
||||||
|
|
||||||
|
The `ConnectorError` enum provides specific variants for different error conditions:
|
||||||
|
|
||||||
|
- `ConnectorError::WebSocketError` - WebSocket connection or communication errors
|
||||||
|
- `ConnectorError::TlsError` - TLS-related errors (certificate validation, etc.)
|
||||||
|
- `ConnectorError::UrlError` - URL parsing errors
|
||||||
|
- `ConnectorError::JsonError` - JSON serialization/deserialization errors
|
||||||
|
- `ConnectorError::ProtocolError` - Protocol-level errors
|
||||||
|
- `ConnectorError::CompressionError` - Gzip compression/decompression errors
|
||||||
|
- `ConnectorError::IoError` - I/O errors
|
||||||
|
- `ConnectorError::Other` - Other errors with descriptive messages
|
||||||
|
|
||||||
|
All errors implement `std::error::Error` so they work seamlessly with `Box<dyn std::error::Error>`, `anyhow`, and other error handling crates.
|
||||||
|
|
||||||
|
### Migration from Generic Errors
|
||||||
|
|
||||||
|
If you were previously using the library with generic error handling, your existing code will continue to work:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// This continues to work as before
|
||||||
|
async fn my_function() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
let mut connector = connect_to_socktop_agent("ws://localhost:3000/ws").await?;
|
||||||
|
let response = connector.request(AgentRequest::Metrics).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// But now you can also use structured error handling for better control
|
||||||
|
async fn improved_function() -> Result<(), ConnectorError> {
|
||||||
|
let mut connector = connect_to_socktop_agent("ws://localhost:3000/ws").await?;
|
||||||
|
let response = connector.request(AgentRequest::Metrics).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT License - see the LICENSE file for details.
|
||||||
10
socktop_connector/build.rs
Normal file
10
socktop_connector/build.rs
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
// Set the protoc binary path to use the vendored version for CI compatibility
|
||||||
|
// SAFETY: We're only setting PROTOC in a build script environment, which is safe
|
||||||
|
unsafe {
|
||||||
|
std::env::set_var("PROTOC", protoc_bin_vendored::protoc_bin_path()?);
|
||||||
|
}
|
||||||
|
|
||||||
|
prost_build::compile_protos(&["processes.proto"], &["."])?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
38
socktop_connector/examples/wasm_example.rs
Normal file
38
socktop_connector/examples/wasm_example.rs
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
//! Example of using socktop_connector in a WASM environment.
|
||||||
|
//!
|
||||||
|
//! This example demonstrates how to use the connector without TLS dependencies
|
||||||
|
//! for WebAssembly builds.
|
||||||
|
|
||||||
|
use socktop_connector::{AgentRequest, ConnectorConfig, connect_to_socktop_agent};
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
println!("WASM-compatible socktop connector example");
|
||||||
|
|
||||||
|
// For WASM builds, use ws:// (not wss://) to avoid TLS dependencies
|
||||||
|
let url = "ws://localhost:3000/ws";
|
||||||
|
|
||||||
|
// Method 1: Simple connection (recommended for most use cases)
|
||||||
|
let mut connector = connect_to_socktop_agent(url).await?;
|
||||||
|
|
||||||
|
// Method 2: With custom WebSocket configuration
|
||||||
|
let config = ConnectorConfig::new(url)
|
||||||
|
.with_protocols(vec!["socktop".to_string()])
|
||||||
|
.with_version("13".to_string());
|
||||||
|
|
||||||
|
let mut connector_custom = socktop_connector::SocktopConnector::new(config);
|
||||||
|
connector_custom.connect().await?;
|
||||||
|
|
||||||
|
// Make a request to get metrics
|
||||||
|
match connector.request(AgentRequest::Metrics).await {
|
||||||
|
Ok(response) => {
|
||||||
|
println!("Successfully received response: {response:?}");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("Request failed: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("WASM example completed successfully!");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
15
socktop_connector/processes.proto
Normal file
15
socktop_connector/processes.proto
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package socktop;
|
||||||
|
|
||||||
|
// All running processes. Sorting is done client-side.
|
||||||
|
message Processes {
|
||||||
|
uint64 process_count = 1; // total processes in the system
|
||||||
|
repeated Process rows = 2; // all processes
|
||||||
|
}
|
||||||
|
|
||||||
|
message Process {
|
||||||
|
uint32 pid = 1;
|
||||||
|
string name = 2;
|
||||||
|
float cpu_usage = 3; // 0..100
|
||||||
|
uint64 mem_bytes = 4; // RSS bytes
|
||||||
|
}
|
||||||
48
socktop_connector/src/config.rs
Normal file
48
socktop_connector/src/config.rs
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
//! Configuration for socktop WebSocket connections.
|
||||||
|
|
||||||
|
/// Configuration for connecting to a socktop agent.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ConnectorConfig {
|
||||||
|
pub url: String,
|
||||||
|
pub tls_ca_path: Option<String>,
|
||||||
|
pub verify_hostname: bool,
|
||||||
|
pub ws_protocols: Option<Vec<String>>,
|
||||||
|
pub ws_version: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectorConfig {
|
||||||
|
/// Create a new connector configuration with the given URL.
|
||||||
|
pub fn new(url: impl Into<String>) -> Self {
|
||||||
|
Self {
|
||||||
|
url: url.into(),
|
||||||
|
tls_ca_path: None,
|
||||||
|
verify_hostname: false,
|
||||||
|
ws_protocols: None,
|
||||||
|
ws_version: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the path to a custom TLS CA certificate file.
|
||||||
|
pub fn with_tls_ca(mut self, ca_path: impl Into<String>) -> Self {
|
||||||
|
self.tls_ca_path = Some(ca_path.into());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Enable or disable hostname verification for TLS connections.
|
||||||
|
pub fn with_hostname_verification(mut self, verify: bool) -> Self {
|
||||||
|
self.verify_hostname = verify;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set WebSocket sub-protocols to negotiate.
|
||||||
|
pub fn with_protocols(mut self, protocols: Vec<String>) -> Self {
|
||||||
|
self.ws_protocols = Some(protocols);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set WebSocket protocol version (default is "13").
|
||||||
|
pub fn with_version(mut self, version: impl Into<String>) -> Self {
|
||||||
|
self.ws_version = Some(version.into());
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
1152
socktop_connector/src/connector.rs
Normal file
1152
socktop_connector/src/connector.rs
Normal file
File diff suppressed because it is too large
Load Diff
276
socktop_connector/src/connector_impl.rs
Normal file
276
socktop_connector/src/connector_impl.rs
Normal file
@ -0,0 +1,276 @@
|
|||||||
|
//! Modular SocktopConnector implementation using networking and WASM modules.
|
||||||
|
|
||||||
|
use crate::config::ConnectorConfig;
|
||||||
|
use crate::error::{ConnectorError, Result};
|
||||||
|
use crate::{AgentRequest, AgentResponse};
|
||||||
|
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
use crate::networking::{
|
||||||
|
WsStream, connect_to_agent, request_disks, request_journal_entries, request_metrics,
|
||||||
|
request_process_metrics, request_processes,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(all(feature = "wasm", not(feature = "networking")))]
|
||||||
|
use crate::wasm::{connect_to_agent, send_request_and_wait};
|
||||||
|
|
||||||
|
#[cfg(all(feature = "wasm", not(feature = "networking")))]
|
||||||
|
use crate::{DiskInfo, Metrics, ProcessesPayload};
|
||||||
|
|
||||||
|
#[cfg(all(feature = "wasm", not(feature = "networking")))]
|
||||||
|
use web_sys::WebSocket;
|
||||||
|
|
||||||
|
/// Main connector for communicating with socktop agents
|
||||||
|
pub struct SocktopConnector {
|
||||||
|
pub config: ConnectorConfig,
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
stream: Option<WsStream>,
|
||||||
|
#[cfg(all(feature = "wasm", not(feature = "networking")))]
|
||||||
|
websocket: Option<WebSocket>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SocktopConnector {
|
||||||
|
/// Create a new connector with the given configuration
|
||||||
|
pub fn new(config: ConnectorConfig) -> Self {
|
||||||
|
Self {
|
||||||
|
config,
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
stream: None,
|
||||||
|
#[cfg(all(feature = "wasm", not(feature = "networking")))]
|
||||||
|
websocket: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
impl SocktopConnector {
|
||||||
|
/// Connect to the agent
|
||||||
|
pub async fn connect(&mut self) -> Result<()> {
|
||||||
|
let stream = connect_to_agent(&self.config).await?;
|
||||||
|
self.stream = Some(stream);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a request to the agent and get the response
|
||||||
|
pub async fn request(&mut self, request: AgentRequest) -> Result<AgentResponse> {
|
||||||
|
let stream = self.stream.as_mut().ok_or(ConnectorError::NotConnected)?;
|
||||||
|
|
||||||
|
match request {
|
||||||
|
AgentRequest::Metrics => {
|
||||||
|
let metrics = request_metrics(stream)
|
||||||
|
.await
|
||||||
|
.ok_or_else(|| ConnectorError::invalid_response("Failed to get metrics"))?;
|
||||||
|
Ok(AgentResponse::Metrics(metrics))
|
||||||
|
}
|
||||||
|
AgentRequest::Disks => {
|
||||||
|
let disks = request_disks(stream)
|
||||||
|
.await
|
||||||
|
.ok_or_else(|| ConnectorError::invalid_response("Failed to get disks"))?;
|
||||||
|
Ok(AgentResponse::Disks(disks))
|
||||||
|
}
|
||||||
|
AgentRequest::Processes => {
|
||||||
|
let processes = request_processes(stream)
|
||||||
|
.await
|
||||||
|
.ok_or_else(|| ConnectorError::invalid_response("Failed to get processes"))?;
|
||||||
|
Ok(AgentResponse::Processes(processes))
|
||||||
|
}
|
||||||
|
AgentRequest::ProcessMetrics { pid } => {
|
||||||
|
let process_metrics =
|
||||||
|
request_process_metrics(stream, pid).await.ok_or_else(|| {
|
||||||
|
ConnectorError::invalid_response("Failed to get process metrics")
|
||||||
|
})?;
|
||||||
|
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||||
|
}
|
||||||
|
AgentRequest::JournalEntries { pid } => {
|
||||||
|
let journal_entries =
|
||||||
|
request_journal_entries(stream, pid).await.ok_or_else(|| {
|
||||||
|
ConnectorError::invalid_response("Failed to get journal entries")
|
||||||
|
})?;
|
||||||
|
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the connector is connected
|
||||||
|
pub fn is_connected(&self) -> bool {
|
||||||
|
self.stream.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disconnect from the agent
|
||||||
|
pub async fn disconnect(&mut self) -> Result<()> {
|
||||||
|
if let Some(mut stream) = self.stream.take() {
|
||||||
|
let _ = stream.close(None).await;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WASM WebSocket implementation
|
||||||
|
#[cfg(all(feature = "wasm", not(feature = "networking")))]
|
||||||
|
impl SocktopConnector {
|
||||||
|
/// Connect to the agent using WASM WebSocket
|
||||||
|
pub async fn connect(&mut self) -> Result<()> {
|
||||||
|
let websocket = connect_to_agent(&self.config).await?;
|
||||||
|
self.websocket = Some(websocket);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a request to the agent and get the response
|
||||||
|
pub async fn request(&mut self, request: AgentRequest) -> Result<AgentResponse> {
|
||||||
|
let ws = self
|
||||||
|
.websocket
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(ConnectorError::NotConnected)?;
|
||||||
|
|
||||||
|
send_request_and_wait(ws, request).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the connector is connected
|
||||||
|
pub fn is_connected(&self) -> bool {
|
||||||
|
use crate::utils::WEBSOCKET_OPEN;
|
||||||
|
self.websocket
|
||||||
|
.as_ref()
|
||||||
|
.is_some_and(|ws| ws.ready_state() == WEBSOCKET_OPEN)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disconnect from the agent
|
||||||
|
pub async fn disconnect(&mut self) -> Result<()> {
|
||||||
|
if let Some(ws) = self.websocket.take() {
|
||||||
|
let _ = ws.close();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request metrics from the agent
|
||||||
|
pub async fn get_metrics(&mut self) -> Result<Metrics> {
|
||||||
|
match self.request(AgentRequest::Metrics).await? {
|
||||||
|
AgentResponse::Metrics(metrics) => Ok(metrics),
|
||||||
|
_ => Err(ConnectorError::protocol_error(
|
||||||
|
"Unexpected response type for metrics",
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request disk information from the agent
|
||||||
|
pub async fn get_disks(&mut self) -> Result<Vec<DiskInfo>> {
|
||||||
|
match self.request(AgentRequest::Disks).await? {
|
||||||
|
AgentResponse::Disks(disks) => Ok(disks),
|
||||||
|
_ => Err(ConnectorError::protocol_error(
|
||||||
|
"Unexpected response type for disks",
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request process information from the agent
|
||||||
|
pub async fn get_processes(&mut self) -> Result<ProcessesPayload> {
|
||||||
|
match self.request(AgentRequest::Processes).await? {
|
||||||
|
AgentResponse::Processes(processes) => Ok(processes),
|
||||||
|
_ => Err(ConnectorError::protocol_error(
|
||||||
|
"Unexpected response type for processes",
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stub implementations when neither networking nor wasm is enabled
|
||||||
|
#[cfg(not(any(feature = "networking", feature = "wasm")))]
|
||||||
|
impl SocktopConnector {
|
||||||
|
/// Connect to the socktop agent endpoint.
|
||||||
|
///
|
||||||
|
/// Note: Networking functionality is disabled. Enable the "networking" feature to use this function.
|
||||||
|
pub async fn connect(&mut self) -> Result<()> {
|
||||||
|
Err(ConnectorError::protocol_error(
|
||||||
|
"Networking functionality disabled. Enable the 'networking' feature to connect to agents.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a request to the agent and await a response.
|
||||||
|
///
|
||||||
|
/// Note: Networking functionality is disabled. Enable the "networking" feature to use this function.
|
||||||
|
pub async fn request(&mut self, _request: AgentRequest) -> Result<AgentResponse> {
|
||||||
|
Err(ConnectorError::protocol_error(
|
||||||
|
"Networking functionality disabled. Enable the 'networking' feature to send requests.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Close the connection to the agent.
|
||||||
|
///
|
||||||
|
/// Note: Networking functionality is disabled. This is a no-op when networking is disabled.
|
||||||
|
pub async fn disconnect(&mut self) -> Result<()> {
|
||||||
|
Ok(()) // No-op when networking is disabled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience function to create a connector and connect in one step.
|
||||||
|
///
|
||||||
|
/// This function is for non-TLS WebSocket connections (`ws://`). Since there's no
|
||||||
|
/// certificate involved, hostname verification is not applicable.
|
||||||
|
///
|
||||||
|
/// For TLS connections with certificate pinning, use `connect_to_socktop_agent_with_tls()`.
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
pub async fn connect_to_socktop_agent(url: impl Into<String>) -> Result<SocktopConnector> {
|
||||||
|
let config = ConnectorConfig::new(url);
|
||||||
|
let mut connector = SocktopConnector::new(config);
|
||||||
|
connector.connect().await?;
|
||||||
|
Ok(connector)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience function to create a connector with TLS and connect in one step.
|
||||||
|
///
|
||||||
|
/// This function enables TLS with certificate pinning using the provided CA certificate.
|
||||||
|
/// The `verify_hostname` parameter controls whether the server's hostname is verified
|
||||||
|
/// against the certificate (recommended for production, can be disabled for testing).
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "tls")))]
|
||||||
|
pub async fn connect_to_socktop_agent_with_tls(
|
||||||
|
url: impl Into<String>,
|
||||||
|
ca_path: impl Into<String>,
|
||||||
|
verify_hostname: bool,
|
||||||
|
) -> Result<SocktopConnector> {
|
||||||
|
let config = ConnectorConfig::new(url)
|
||||||
|
.with_tls_ca(ca_path)
|
||||||
|
.with_hostname_verification(verify_hostname);
|
||||||
|
let mut connector = SocktopConnector::new(config);
|
||||||
|
connector.connect().await?;
|
||||||
|
Ok(connector)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convenience function to create a connector with custom WebSocket protocol configuration.
|
||||||
|
///
|
||||||
|
/// This function allows you to specify WebSocket protocol version and sub-protocols.
|
||||||
|
/// Most users should use the simpler `connect_to_socktop_agent()` function instead.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```no_run
|
||||||
|
/// use socktop_connector::connect_to_socktop_agent_with_config;
|
||||||
|
///
|
||||||
|
/// # #[tokio::main]
|
||||||
|
/// # async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
/// let connector = connect_to_socktop_agent_with_config(
|
||||||
|
/// "ws://localhost:3000/ws",
|
||||||
|
/// Some(vec!["socktop".to_string()]), // WebSocket sub-protocols
|
||||||
|
/// Some("13".to_string()), // WebSocket version (13 is standard)
|
||||||
|
/// ).await?;
|
||||||
|
/// # Ok(())
|
||||||
|
/// # }
|
||||||
|
/// ```
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
pub async fn connect_to_socktop_agent_with_config(
|
||||||
|
url: impl Into<String>,
|
||||||
|
protocols: Option<Vec<String>>,
|
||||||
|
version: Option<String>,
|
||||||
|
) -> Result<SocktopConnector> {
|
||||||
|
let mut config = ConnectorConfig::new(url);
|
||||||
|
|
||||||
|
if let Some(protocols) = protocols {
|
||||||
|
config = config.with_protocols(protocols);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(version) = version {
|
||||||
|
config = config.with_version(version);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut connector = SocktopConnector::new(config);
|
||||||
|
connector.connect().await?;
|
||||||
|
Ok(connector)
|
||||||
|
}
|
||||||
155
socktop_connector/src/error.rs
Normal file
155
socktop_connector/src/error.rs
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
//! Error types for socktop_connector
|
||||||
|
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
/// Errors that can occur when using socktop_connector
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
pub enum ConnectorError {
|
||||||
|
/// WebSocket connection failed
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
#[error("WebSocket connection failed: {source}")]
|
||||||
|
ConnectionFailed {
|
||||||
|
source: Box<tokio_tungstenite::tungstenite::Error>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// URL parsing error
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
#[error("Invalid URL: {url}")]
|
||||||
|
InvalidUrl {
|
||||||
|
url: String,
|
||||||
|
#[source]
|
||||||
|
source: url::ParseError,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// TLS certificate error
|
||||||
|
#[error("TLS certificate error: {message}")]
|
||||||
|
TlsError {
|
||||||
|
message: String,
|
||||||
|
#[source]
|
||||||
|
source: Box<dyn std::error::Error + Send + Sync>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Certificate file not found or invalid
|
||||||
|
#[error("Certificate file error at '{path}': {message}")]
|
||||||
|
CertificateError { path: String, message: String },
|
||||||
|
|
||||||
|
/// Invalid server response format
|
||||||
|
#[error("Invalid response from server: {message}")]
|
||||||
|
InvalidResponse { message: String },
|
||||||
|
|
||||||
|
/// JSON parsing error
|
||||||
|
#[error("JSON parsing error: {source}")]
|
||||||
|
JsonError {
|
||||||
|
#[from]
|
||||||
|
source: serde_json::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Request/response protocol error
|
||||||
|
#[error("Protocol error: {message}")]
|
||||||
|
ProtocolError { message: String },
|
||||||
|
|
||||||
|
/// Connection is not established
|
||||||
|
#[error("Not connected to server")]
|
||||||
|
NotConnected,
|
||||||
|
|
||||||
|
/// Connection was closed unexpectedly
|
||||||
|
#[error("Connection closed: {reason}")]
|
||||||
|
ConnectionClosed { reason: String },
|
||||||
|
|
||||||
|
/// IO error (network, file system, etc.)
|
||||||
|
#[error("IO error: {source}")]
|
||||||
|
IoError {
|
||||||
|
#[from]
|
||||||
|
source: std::io::Error,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Compression/decompression error
|
||||||
|
#[error("Compression error: {message}")]
|
||||||
|
CompressionError { message: String },
|
||||||
|
|
||||||
|
/// Protocol Buffer parsing error
|
||||||
|
#[error("Protocol buffer error: {source}")]
|
||||||
|
ProtobufError {
|
||||||
|
#[from]
|
||||||
|
source: prost::DecodeError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Result type alias for connector operations
|
||||||
|
pub type Result<T> = std::result::Result<T, ConnectorError>;
|
||||||
|
|
||||||
|
impl ConnectorError {
|
||||||
|
/// Create a TLS error with context
|
||||||
|
pub fn tls_error(
|
||||||
|
message: impl Into<String>,
|
||||||
|
source: impl std::error::Error + Send + Sync + 'static,
|
||||||
|
) -> Self {
|
||||||
|
Self::TlsError {
|
||||||
|
message: message.into(),
|
||||||
|
source: Box::new(source),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a certificate error
|
||||||
|
pub fn certificate_error(path: impl Into<String>, message: impl Into<String>) -> Self {
|
||||||
|
Self::CertificateError {
|
||||||
|
path: path.into(),
|
||||||
|
message: message.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a protocol error
|
||||||
|
pub fn protocol_error(message: impl Into<String>) -> Self {
|
||||||
|
Self::ProtocolError {
|
||||||
|
message: message.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create an invalid response error
|
||||||
|
pub fn invalid_response(message: impl Into<String>) -> Self {
|
||||||
|
Self::InvalidResponse {
|
||||||
|
message: message.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a connection closed error
|
||||||
|
pub fn connection_closed(reason: impl Into<String>) -> Self {
|
||||||
|
Self::ConnectionClosed {
|
||||||
|
reason: reason.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a compression error
|
||||||
|
pub fn compression_error(message: impl Into<String>) -> Self {
|
||||||
|
Self::CompressionError {
|
||||||
|
message: message.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a serialization error (wraps JSON error)
|
||||||
|
pub fn serialization_error(message: impl Into<String>) -> Self {
|
||||||
|
Self::ProtocolError {
|
||||||
|
message: message.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
impl From<url::ParseError> for ConnectorError {
|
||||||
|
fn from(source: url::ParseError) -> Self {
|
||||||
|
Self::InvalidUrl {
|
||||||
|
url: "unknown".to_string(), // We don't have the URL in the error context
|
||||||
|
source,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manual From implementation for boxed tungstenite errors
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
impl From<tokio_tungstenite::tungstenite::Error> for ConnectorError {
|
||||||
|
fn from(source: tokio_tungstenite::tungstenite::Error) -> Self {
|
||||||
|
Self::ConnectionFailed {
|
||||||
|
source: Box::new(source),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
183
socktop_connector/src/lib.rs
Normal file
183
socktop_connector/src/lib.rs
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
//! WebSocket connector library for socktop agents.
|
||||||
|
//!
|
||||||
|
//! This library provides a high-level interface for connecting to socktop agents
|
||||||
|
//! over WebSocket connections with support for TLS and certificate pinning.
|
||||||
|
//!
|
||||||
|
//! # Quick Start
|
||||||
|
//!
|
||||||
|
//! ```no_run
|
||||||
|
//! use socktop_connector::{connect_to_socktop_agent, AgentRequest, AgentResponse};
|
||||||
|
//!
|
||||||
|
//! #[tokio::main]
|
||||||
|
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
//! let mut connector = connect_to_socktop_agent("ws://localhost:3000/ws").await?;
|
||||||
|
//!
|
||||||
|
//! // Get comprehensive system metrics
|
||||||
|
//! if let Ok(AgentResponse::Metrics(metrics)) = connector.request(AgentRequest::Metrics).await {
|
||||||
|
//! println!("Hostname: {}", metrics.hostname);
|
||||||
|
//! println!("CPU Usage: {:.1}%", metrics.cpu_total);
|
||||||
|
//!
|
||||||
|
//! // CPU temperature if available
|
||||||
|
//! if let Some(temp) = metrics.cpu_temp_c {
|
||||||
|
//! println!("CPU Temperature: {:.1}°C", temp);
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! // Memory usage
|
||||||
|
//! println!("Memory: {:.1} GB / {:.1} GB",
|
||||||
|
//! metrics.mem_used as f64 / 1_000_000_000.0,
|
||||||
|
//! metrics.mem_total as f64 / 1_000_000_000.0);
|
||||||
|
//!
|
||||||
|
//! // Per-core CPU usage
|
||||||
|
//! for (i, usage) in metrics.cpu_per_core.iter().enumerate() {
|
||||||
|
//! println!("Core {}: {:.1}%", i, usage);
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! // GPU information
|
||||||
|
//! if let Some(gpus) = &metrics.gpus {
|
||||||
|
//! for gpu in gpus {
|
||||||
|
//! if let Some(name) = &gpu.name {
|
||||||
|
//! println!("GPU {}: {:.1}% usage", name, gpu.utilization.unwrap_or(0.0));
|
||||||
|
//! if let Some(temp) = gpu.temp {
|
||||||
|
//! println!(" Temperature: {:.1}°C", temp);
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! // Get process information
|
||||||
|
//! if let Ok(AgentResponse::Processes(processes)) = connector.request(AgentRequest::Processes).await {
|
||||||
|
//! println!("Running processes: {}", processes.process_count);
|
||||||
|
//! for proc in &processes.top_processes {
|
||||||
|
//! println!(" PID {}: {} ({:.1}% CPU, {:.1} MB RAM)",
|
||||||
|
//! proc.pid, proc.name, proc.cpu_usage, proc.mem_bytes as f64 / 1_000_000.0);
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! // Get disk information
|
||||||
|
//! if let Ok(AgentResponse::Disks(disks)) = connector.request(AgentRequest::Disks).await {
|
||||||
|
//! for disk in disks {
|
||||||
|
//! let used_gb = (disk.total - disk.available) as f64 / 1_000_000_000.0;
|
||||||
|
//! let total_gb = disk.total as f64 / 1_000_000_000.0;
|
||||||
|
//! println!("Disk {}: {:.1} GB / {:.1} GB", disk.name, used_gb, total_gb);
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! Ok(())
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! # TLS Support
|
||||||
|
//!
|
||||||
|
//! ```no_run
|
||||||
|
//! use socktop_connector::connect_to_socktop_agent_with_tls;
|
||||||
|
//!
|
||||||
|
//! # #[tokio::main]
|
||||||
|
//! # async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
//! let connector = connect_to_socktop_agent_with_tls(
|
||||||
|
//! "wss://secure-host:3000/ws",
|
||||||
|
//! "/path/to/ca.pem",
|
||||||
|
//! false // Enable hostname verification
|
||||||
|
//! ).await?;
|
||||||
|
//! # Ok(())
|
||||||
|
//! # }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! # Continuous Monitoring
|
||||||
|
//!
|
||||||
|
//! For real-time system monitoring, you can make requests in a loop. The agent
|
||||||
|
//! implements intelligent caching to avoid overwhelming the system:
|
||||||
|
//!
|
||||||
|
//! ```no_run
|
||||||
|
//! use socktop_connector::{connect_to_socktop_agent, AgentRequest, AgentResponse};
|
||||||
|
//! use tokio::time::{sleep, Duration};
|
||||||
|
//!
|
||||||
|
//! #[tokio::main]
|
||||||
|
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
//! let mut connector = connect_to_socktop_agent("ws://localhost:3000/ws").await?;
|
||||||
|
//!
|
||||||
|
//! // Monitor system metrics every 2 seconds
|
||||||
|
//! loop {
|
||||||
|
//! match connector.request(AgentRequest::Metrics).await {
|
||||||
|
//! Ok(AgentResponse::Metrics(metrics)) => {
|
||||||
|
//! // Calculate total network activity across all interfaces
|
||||||
|
//! let total_rx: u64 = metrics.networks.iter().map(|n| n.received).sum();
|
||||||
|
//! let total_tx: u64 = metrics.networks.iter().map(|n| n.transmitted).sum();
|
||||||
|
//!
|
||||||
|
//! println!("CPU: {:.1}%, Memory: {:.1}%, Network: ↓{} ↑{}",
|
||||||
|
//! metrics.cpu_total,
|
||||||
|
//! (metrics.mem_used as f64 / metrics.mem_total as f64) * 100.0,
|
||||||
|
//! format_bytes(total_rx),
|
||||||
|
//! format_bytes(total_tx)
|
||||||
|
//! );
|
||||||
|
//! }
|
||||||
|
//! Err(e) => {
|
||||||
|
//! eprintln!("Connection error: {}", e);
|
||||||
|
//! break;
|
||||||
|
//! }
|
||||||
|
//! _ => unreachable!(),
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! sleep(Duration::from_secs(2)).await;
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! Ok(())
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! fn format_bytes(bytes: u64) -> String {
|
||||||
|
//! const UNITS: &[&str] = &["B", "KB", "MB", "GB"];
|
||||||
|
//! let mut size = bytes as f64;
|
||||||
|
//! let mut unit_index = 0;
|
||||||
|
//!
|
||||||
|
//! while size >= 1024.0 && unit_index < UNITS.len() - 1 {
|
||||||
|
//! size /= 1024.0;
|
||||||
|
//! unit_index += 1;
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! format!("{:.1}{}", size, UNITS[unit_index])
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
|
||||||
|
// Core modules
|
||||||
|
pub mod config;
|
||||||
|
pub mod error;
|
||||||
|
pub mod types;
|
||||||
|
pub mod utils;
|
||||||
|
|
||||||
|
// Implementation modules
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
pub mod networking;
|
||||||
|
|
||||||
|
#[cfg(feature = "wasm")]
|
||||||
|
pub mod wasm;
|
||||||
|
|
||||||
|
// Main connector implementation
|
||||||
|
pub mod connector_impl;
|
||||||
|
|
||||||
|
// Re-export the main types
|
||||||
|
pub use config::ConnectorConfig;
|
||||||
|
pub use connector_impl::SocktopConnector;
|
||||||
|
pub use error::{ConnectorError, Result};
|
||||||
|
pub use types::{
|
||||||
|
AgentRequest, AgentResponse, DetailedProcessInfo, DiskInfo, GpuInfo, JournalEntry,
|
||||||
|
JournalResponse, LogLevel, Metrics, NetworkInfo, ProcessInfo, ProcessMetricsResponse,
|
||||||
|
ProcessesPayload,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Re-export convenience functions
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
pub use connector_impl::{connect_to_socktop_agent, connect_to_socktop_agent_with_config};
|
||||||
|
|
||||||
|
#[cfg(all(feature = "tls", feature = "networking"))]
|
||||||
|
pub use connector_impl::connect_to_socktop_agent_with_tls;
|
||||||
|
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
pub use networking::WsStream;
|
||||||
|
|
||||||
|
// Protobuf types for internal use
|
||||||
|
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||||
|
pub mod pb {
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/socktop.rs"));
|
||||||
|
}
|
||||||
183
socktop_connector/src/networking/connection.rs
Normal file
183
socktop_connector/src/networking/connection.rs
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
//! WebSocket connection handling for native (non-WASM) environments.
|
||||||
|
|
||||||
|
use crate::config::ConnectorConfig;
|
||||||
|
use crate::error::{ConnectorError, Result};
|
||||||
|
|
||||||
|
use std::io::BufReader;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio_tungstenite::tungstenite::client::IntoClientRequest;
|
||||||
|
use tokio_tungstenite::{MaybeTlsStream, WebSocketStream, connect_async};
|
||||||
|
use url::Url;
|
||||||
|
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
use {
|
||||||
|
rustls::{self, ClientConfig},
|
||||||
|
rustls::{
|
||||||
|
DigitallySignedStruct, RootCertStore, SignatureScheme,
|
||||||
|
client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier},
|
||||||
|
crypto::ring,
|
||||||
|
pki_types::{CertificateDer, ServerName, UnixTime},
|
||||||
|
},
|
||||||
|
rustls_pemfile::Item,
|
||||||
|
std::fs::File,
|
||||||
|
tokio_tungstenite::Connector,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub type WsStream = WebSocketStream<MaybeTlsStream<tokio::net::TcpStream>>;
|
||||||
|
|
||||||
|
/// Connect to the agent and return the WS stream
|
||||||
|
pub async fn connect_to_agent(config: &ConnectorConfig) -> Result<WsStream> {
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
ensure_crypto_provider();
|
||||||
|
|
||||||
|
let mut u = Url::parse(&config.url)?;
|
||||||
|
if let Some(ca_path) = &config.tls_ca_path {
|
||||||
|
if u.scheme() == "ws" {
|
||||||
|
let _ = u.set_scheme("wss");
|
||||||
|
}
|
||||||
|
return connect_with_ca_and_config(u.as_str(), ca_path, config).await;
|
||||||
|
}
|
||||||
|
// No TLS - hostname verification is not applicable
|
||||||
|
connect_without_ca_and_config(u.as_str(), config).await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn connect_without_ca_and_config(url: &str, config: &ConnectorConfig) -> Result<WsStream> {
|
||||||
|
let mut req = url.into_client_request()?;
|
||||||
|
|
||||||
|
// Apply WebSocket protocol configuration
|
||||||
|
if let Some(version) = &config.ws_version {
|
||||||
|
req.headers_mut().insert(
|
||||||
|
"Sec-WebSocket-Version",
|
||||||
|
version
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| ConnectorError::protocol_error("Invalid WebSocket version"))?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(protocols) = &config.ws_protocols {
|
||||||
|
let protocols_str = protocols.join(", ");
|
||||||
|
req.headers_mut().insert(
|
||||||
|
"Sec-WebSocket-Protocol",
|
||||||
|
protocols_str
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| ConnectorError::protocol_error("Invalid WebSocket protocols"))?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let (ws, _) = connect_async(req).await?;
|
||||||
|
Ok(ws)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
async fn connect_with_ca_and_config(
|
||||||
|
url: &str,
|
||||||
|
ca_path: &str,
|
||||||
|
config: &ConnectorConfig,
|
||||||
|
) -> Result<WsStream> {
|
||||||
|
// Initialize the crypto provider for rustls
|
||||||
|
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||||
|
|
||||||
|
let mut root = RootCertStore::empty();
|
||||||
|
let mut reader = BufReader::new(File::open(ca_path)?);
|
||||||
|
let mut der_certs = Vec::new();
|
||||||
|
while let Ok(Some(item)) = rustls_pemfile::read_one(&mut reader) {
|
||||||
|
if let Item::X509Certificate(der) = item {
|
||||||
|
der_certs.push(der);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
root.add_parsable_certificates(der_certs);
|
||||||
|
|
||||||
|
let mut cfg = ClientConfig::builder()
|
||||||
|
.with_root_certificates(root)
|
||||||
|
.with_no_client_auth();
|
||||||
|
|
||||||
|
let mut req = url.into_client_request()?;
|
||||||
|
|
||||||
|
// Apply WebSocket protocol configuration
|
||||||
|
if let Some(version) = &config.ws_version {
|
||||||
|
req.headers_mut().insert(
|
||||||
|
"Sec-WebSocket-Version",
|
||||||
|
version
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| ConnectorError::protocol_error("Invalid WebSocket version"))?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(protocols) = &config.ws_protocols {
|
||||||
|
let protocols_str = protocols.join(", ");
|
||||||
|
req.headers_mut().insert(
|
||||||
|
"Sec-WebSocket-Protocol",
|
||||||
|
protocols_str
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| ConnectorError::protocol_error("Invalid WebSocket protocols"))?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !config.verify_hostname {
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct NoVerify;
|
||||||
|
impl ServerCertVerifier for NoVerify {
|
||||||
|
fn verify_server_cert(
|
||||||
|
&self,
|
||||||
|
_end_entity: &CertificateDer<'_>,
|
||||||
|
_intermediates: &[CertificateDer<'_>],
|
||||||
|
_server_name: &ServerName,
|
||||||
|
_ocsp_response: &[u8],
|
||||||
|
_now: UnixTime,
|
||||||
|
) -> std::result::Result<ServerCertVerified, rustls::Error> {
|
||||||
|
Ok(ServerCertVerified::assertion())
|
||||||
|
}
|
||||||
|
fn verify_tls12_signature(
|
||||||
|
&self,
|
||||||
|
_message: &[u8],
|
||||||
|
_cert: &CertificateDer<'_>,
|
||||||
|
_dss: &DigitallySignedStruct,
|
||||||
|
) -> std::result::Result<HandshakeSignatureValid, rustls::Error> {
|
||||||
|
Ok(HandshakeSignatureValid::assertion())
|
||||||
|
}
|
||||||
|
fn verify_tls13_signature(
|
||||||
|
&self,
|
||||||
|
_message: &[u8],
|
||||||
|
_cert: &CertificateDer<'_>,
|
||||||
|
_dss: &DigitallySignedStruct,
|
||||||
|
) -> std::result::Result<HandshakeSignatureValid, rustls::Error> {
|
||||||
|
Ok(HandshakeSignatureValid::assertion())
|
||||||
|
}
|
||||||
|
fn supported_verify_schemes(&self) -> Vec<SignatureScheme> {
|
||||||
|
vec![
|
||||||
|
SignatureScheme::ECDSA_NISTP256_SHA256,
|
||||||
|
SignatureScheme::ED25519,
|
||||||
|
SignatureScheme::RSA_PSS_SHA256,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cfg.dangerous().set_certificate_verifier(Arc::new(NoVerify));
|
||||||
|
// Note: hostname verification disabled (default). Set SOCKTOP_VERIFY_NAME=1 to enable strict SAN checking.
|
||||||
|
}
|
||||||
|
let cfg = Arc::new(cfg);
|
||||||
|
let (ws, _) = tokio_tungstenite::connect_async_tls_with_config(
|
||||||
|
req,
|
||||||
|
None,
|
||||||
|
config.verify_hostname,
|
||||||
|
Some(Connector::Rustls(cfg)),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok(ws)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "tls"))]
|
||||||
|
async fn connect_with_ca_and_config(
|
||||||
|
_url: &str,
|
||||||
|
_ca_path: &str,
|
||||||
|
_config: &ConnectorConfig,
|
||||||
|
) -> Result<WsStream> {
|
||||||
|
Err(ConnectorError::tls_error(
|
||||||
|
"TLS support not compiled in",
|
||||||
|
std::io::Error::new(std::io::ErrorKind::Unsupported, "TLS not available"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
fn ensure_crypto_provider() {
|
||||||
|
let _ = ring::default_provider().install_default();
|
||||||
|
}
|
||||||
7
socktop_connector/src/networking/mod.rs
Normal file
7
socktop_connector/src/networking/mod.rs
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
//! Networking module for native WebSocket connections.
|
||||||
|
|
||||||
|
pub mod connection;
|
||||||
|
pub mod requests;
|
||||||
|
|
||||||
|
pub use connection::*;
|
||||||
|
pub use requests::*;
|
||||||
118
socktop_connector/src/networking/requests.rs
Normal file
118
socktop_connector/src/networking/requests.rs
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
//! WebSocket request handlers for native (non-WASM) environments.
|
||||||
|
|
||||||
|
use crate::networking::WsStream;
|
||||||
|
use crate::types::{JournalResponse, ProcessMetricsResponse};
|
||||||
|
use crate::utils::{gunzip_to_string, gunzip_to_vec, is_gzip};
|
||||||
|
use crate::{DiskInfo, Metrics, ProcessInfo, ProcessesPayload, pb};
|
||||||
|
|
||||||
|
use futures_util::{SinkExt, StreamExt};
|
||||||
|
use prost::Message as ProstMessage;
|
||||||
|
use tokio_tungstenite::tungstenite::Message;
|
||||||
|
|
||||||
|
/// Send a "get_metrics" request and await a single JSON reply
|
||||||
|
pub async fn request_metrics(ws: &mut WsStream) -> Option<Metrics> {
|
||||||
|
if ws.send(Message::Text("get_metrics".into())).await.is_err() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
match ws.next().await {
|
||||||
|
Some(Ok(Message::Binary(b))) => gunzip_to_string(&b)
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| serde_json::from_str::<Metrics>(&s).ok()),
|
||||||
|
Some(Ok(Message::Text(json))) => serde_json::from_str::<Metrics>(&json).ok(),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a "get_disks" request and await a JSON Vec<DiskInfo>
|
||||||
|
pub async fn request_disks(ws: &mut WsStream) -> Option<Vec<DiskInfo>> {
|
||||||
|
if ws.send(Message::Text("get_disks".into())).await.is_err() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
match ws.next().await {
|
||||||
|
Some(Ok(Message::Binary(b))) => gunzip_to_string(&b)
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| serde_json::from_str::<Vec<DiskInfo>>(&s).ok()),
|
||||||
|
Some(Ok(Message::Text(json))) => serde_json::from_str::<Vec<DiskInfo>>(&json).ok(),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a "get_processes" request and await a ProcessesPayload decoded from protobuf (binary, may be gzipped)
|
||||||
|
pub async fn request_processes(ws: &mut WsStream) -> Option<ProcessesPayload> {
|
||||||
|
if ws
|
||||||
|
.send(Message::Text("get_processes".into()))
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
match ws.next().await {
|
||||||
|
Some(Ok(Message::Binary(b))) => {
|
||||||
|
let gz = is_gzip(&b);
|
||||||
|
let data = if gz { gunzip_to_vec(&b).ok()? } else { b };
|
||||||
|
match pb::Processes::decode(data.as_slice()) {
|
||||||
|
Ok(pb) => {
|
||||||
|
let rows: Vec<ProcessInfo> = pb
|
||||||
|
.rows
|
||||||
|
.into_iter()
|
||||||
|
.map(|p: pb::Process| ProcessInfo {
|
||||||
|
pid: p.pid,
|
||||||
|
name: p.name,
|
||||||
|
cpu_usage: p.cpu_usage,
|
||||||
|
mem_bytes: p.mem_bytes,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Some(ProcessesPayload {
|
||||||
|
process_count: pb.process_count as usize,
|
||||||
|
top_processes: rows,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
if std::env::var("SOCKTOP_DEBUG").ok().as_deref() == Some("1") {
|
||||||
|
eprintln!("protobuf decode failed: {e}");
|
||||||
|
}
|
||||||
|
// Fallback: maybe it's JSON (bytes already decompressed if gz)
|
||||||
|
match String::from_utf8(data) {
|
||||||
|
Ok(s) => serde_json::from_str::<ProcessesPayload>(&s).ok(),
|
||||||
|
Err(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(Ok(Message::Text(json))) => serde_json::from_str::<ProcessesPayload>(&json).ok(),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a "get_process_metrics:{pid}" request and await a JSON ProcessMetricsResponse
|
||||||
|
pub async fn request_process_metrics(
|
||||||
|
ws: &mut WsStream,
|
||||||
|
pid: u32,
|
||||||
|
) -> Option<ProcessMetricsResponse> {
|
||||||
|
let request = format!("get_process_metrics:{pid}");
|
||||||
|
if ws.send(Message::Text(request)).await.is_err() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
match ws.next().await {
|
||||||
|
Some(Ok(Message::Binary(b))) => gunzip_to_string(&b)
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| serde_json::from_str::<ProcessMetricsResponse>(&s).ok()),
|
||||||
|
Some(Ok(Message::Text(json))) => serde_json::from_str::<ProcessMetricsResponse>(&json).ok(),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a "get_journal_entries:{pid}" request and await a JSON JournalResponse
|
||||||
|
pub async fn request_journal_entries(ws: &mut WsStream, pid: u32) -> Option<JournalResponse> {
|
||||||
|
let request = format!("get_journal_entries:{pid}");
|
||||||
|
if ws.send(Message::Text(request)).await.is_err() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
match ws.next().await {
|
||||||
|
Some(Ok(Message::Binary(b))) => gunzip_to_string(&b)
|
||||||
|
.ok()
|
||||||
|
.and_then(|s| serde_json::from_str::<JournalResponse>(&s).ok()),
|
||||||
|
Some(Ok(Message::Text(json))) => serde_json::from_str::<JournalResponse>(&json).ok(),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
196
socktop_connector/src/types.rs
Normal file
196
socktop_connector/src/types.rs
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
//! Types that represent data from the socktop agent.
|
||||||
|
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct ProcessInfo {
|
||||||
|
pub pid: u32,
|
||||||
|
pub name: String,
|
||||||
|
pub cpu_usage: f32,
|
||||||
|
pub mem_bytes: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct DiskInfo {
|
||||||
|
pub name: String,
|
||||||
|
pub total: u64,
|
||||||
|
pub available: u64,
|
||||||
|
#[serde(default)]
|
||||||
|
pub temperature: Option<f32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub is_partition: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct NetworkInfo {
|
||||||
|
pub name: String,
|
||||||
|
pub received: u64,
|
||||||
|
pub transmitted: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct GpuInfo {
|
||||||
|
pub name: Option<String>,
|
||||||
|
pub vendor: Option<String>,
|
||||||
|
|
||||||
|
// Accept both the new and legacy keys
|
||||||
|
#[serde(
|
||||||
|
default,
|
||||||
|
alias = "utilization_gpu_pct",
|
||||||
|
alias = "gpu_util_pct",
|
||||||
|
alias = "gpu_utilization"
|
||||||
|
)]
|
||||||
|
pub utilization: Option<f32>,
|
||||||
|
|
||||||
|
#[serde(default, alias = "mem_used_bytes", alias = "vram_used_bytes")]
|
||||||
|
pub mem_used: Option<u64>,
|
||||||
|
|
||||||
|
#[serde(default, alias = "mem_total_bytes", alias = "vram_total_bytes")]
|
||||||
|
pub mem_total: Option<u64>,
|
||||||
|
|
||||||
|
#[serde(default, alias = "temp_c", alias = "temperature_c")]
|
||||||
|
pub temp: Option<f32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct Metrics {
|
||||||
|
pub cpu_total: f32,
|
||||||
|
pub cpu_per_core: Vec<f32>,
|
||||||
|
pub mem_total: u64,
|
||||||
|
pub mem_used: u64,
|
||||||
|
pub swap_total: u64,
|
||||||
|
pub swap_used: u64,
|
||||||
|
pub hostname: String,
|
||||||
|
pub cpu_temp_c: Option<f32>,
|
||||||
|
pub disks: Vec<DiskInfo>,
|
||||||
|
pub networks: Vec<NetworkInfo>,
|
||||||
|
pub top_processes: Vec<ProcessInfo>,
|
||||||
|
pub gpus: Option<Vec<GpuInfo>>,
|
||||||
|
// New: keep the last reported total process count
|
||||||
|
#[serde(default)]
|
||||||
|
pub process_count: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct ProcessesPayload {
|
||||||
|
pub process_count: usize,
|
||||||
|
pub top_processes: Vec<ProcessInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct ThreadInfo {
|
||||||
|
pub tid: u32, // Thread ID
|
||||||
|
pub name: String, // Thread name (from /proc/{pid}/task/{tid}/comm)
|
||||||
|
pub cpu_time_user: u64, // User CPU time in microseconds
|
||||||
|
pub cpu_time_system: u64, // System CPU time in microseconds
|
||||||
|
pub status: String, // Thread status (Running, Sleeping, etc.)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct DetailedProcessInfo {
|
||||||
|
pub pid: u32,
|
||||||
|
pub name: String,
|
||||||
|
pub command: String,
|
||||||
|
pub cpu_usage: f32,
|
||||||
|
pub mem_bytes: u64,
|
||||||
|
pub virtual_mem_bytes: u64,
|
||||||
|
pub shared_mem_bytes: Option<u64>,
|
||||||
|
pub thread_count: u32,
|
||||||
|
pub fd_count: Option<u32>,
|
||||||
|
pub status: String,
|
||||||
|
pub parent_pid: Option<u32>,
|
||||||
|
pub user_id: u32,
|
||||||
|
pub group_id: u32,
|
||||||
|
pub start_time: u64, // Unix timestamp
|
||||||
|
pub cpu_time_user: u64, // Microseconds
|
||||||
|
pub cpu_time_system: u64, // Microseconds
|
||||||
|
pub read_bytes: Option<u64>,
|
||||||
|
pub write_bytes: Option<u64>,
|
||||||
|
pub working_directory: Option<String>,
|
||||||
|
pub executable_path: Option<String>,
|
||||||
|
pub child_processes: Vec<DetailedProcessInfo>,
|
||||||
|
pub threads: Vec<ThreadInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct ProcessMetricsResponse {
|
||||||
|
pub process: DetailedProcessInfo,
|
||||||
|
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct JournalEntry {
|
||||||
|
pub timestamp: String, // ISO 8601 formatted timestamp
|
||||||
|
pub priority: LogLevel,
|
||||||
|
pub message: String,
|
||||||
|
pub unit: Option<String>, // systemd unit name
|
||||||
|
pub pid: Option<u32>,
|
||||||
|
pub comm: Option<String>, // process command name
|
||||||
|
pub uid: Option<u32>,
|
||||||
|
pub gid: Option<u32>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub enum LogLevel {
|
||||||
|
Emergency = 0,
|
||||||
|
Alert = 1,
|
||||||
|
Critical = 2,
|
||||||
|
Error = 3,
|
||||||
|
Warning = 4,
|
||||||
|
Notice = 5,
|
||||||
|
Info = 6,
|
||||||
|
Debug = 7,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
pub struct JournalResponse {
|
||||||
|
pub entries: Vec<JournalEntry>,
|
||||||
|
pub total_count: u32,
|
||||||
|
pub truncated: bool,
|
||||||
|
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request types that can be sent to the agent
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
#[serde(tag = "type")]
|
||||||
|
pub enum AgentRequest {
|
||||||
|
#[serde(rename = "metrics")]
|
||||||
|
Metrics,
|
||||||
|
#[serde(rename = "disks")]
|
||||||
|
Disks,
|
||||||
|
#[serde(rename = "processes")]
|
||||||
|
Processes,
|
||||||
|
#[serde(rename = "process_metrics")]
|
||||||
|
ProcessMetrics { pid: u32 },
|
||||||
|
#[serde(rename = "journal_entries")]
|
||||||
|
JournalEntries { pid: u32 },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AgentRequest {
|
||||||
|
/// Convert to the legacy string format used by the agent
|
||||||
|
pub fn to_legacy_string(&self) -> String {
|
||||||
|
match self {
|
||||||
|
AgentRequest::Metrics => "get_metrics".to_string(),
|
||||||
|
AgentRequest::Disks => "get_disks".to_string(),
|
||||||
|
AgentRequest::Processes => "get_processes".to_string(),
|
||||||
|
AgentRequest::ProcessMetrics { pid } => format!("get_process_metrics:{pid}"),
|
||||||
|
AgentRequest::JournalEntries { pid } => format!("get_journal_entries:{pid}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Response types that can be received from the agent
|
||||||
|
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||||
|
#[serde(tag = "type")]
|
||||||
|
pub enum AgentResponse {
|
||||||
|
#[serde(rename = "metrics")]
|
||||||
|
Metrics(Metrics),
|
||||||
|
#[serde(rename = "disks")]
|
||||||
|
Disks(Vec<DiskInfo>),
|
||||||
|
#[serde(rename = "processes")]
|
||||||
|
Processes(ProcessesPayload),
|
||||||
|
#[serde(rename = "process_metrics")]
|
||||||
|
ProcessMetrics(ProcessMetricsResponse),
|
||||||
|
#[serde(rename = "journal_entries")]
|
||||||
|
JournalEntries(JournalResponse),
|
||||||
|
}
|
||||||
67
socktop_connector/src/utils.rs
Normal file
67
socktop_connector/src/utils.rs
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
//! Shared utilities for both networking and WASM implementations.
|
||||||
|
|
||||||
|
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||||
|
use flate2::read::GzDecoder;
|
||||||
|
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||||
|
use std::io::Read;
|
||||||
|
|
||||||
|
use crate::error::{ConnectorError, Result};
|
||||||
|
|
||||||
|
// WebSocket state constants
|
||||||
|
#[cfg(feature = "wasm")]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub const WEBSOCKET_CONNECTING: u16 = 0;
|
||||||
|
#[cfg(feature = "wasm")]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub const WEBSOCKET_OPEN: u16 = 1;
|
||||||
|
#[cfg(feature = "wasm")]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub const WEBSOCKET_CLOSING: u16 = 2;
|
||||||
|
#[cfg(feature = "wasm")]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub const WEBSOCKET_CLOSED: u16 = 3;
|
||||||
|
|
||||||
|
// Gzip magic header constants
|
||||||
|
pub const GZIP_MAGIC_1: u8 = 0x1f;
|
||||||
|
pub const GZIP_MAGIC_2: u8 = 0x8b;
|
||||||
|
|
||||||
|
/// Unified gzip decompression to string for both networking and WASM
|
||||||
|
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||||
|
pub fn gunzip_to_string(bytes: &[u8]) -> Result<String> {
|
||||||
|
let mut decoder = GzDecoder::new(bytes);
|
||||||
|
let mut decompressed = String::new();
|
||||||
|
decoder
|
||||||
|
.read_to_string(&mut decompressed)
|
||||||
|
.map_err(|e| ConnectorError::protocol_error(format!("Gzip decompression failed: {e}")))?;
|
||||||
|
Ok(decompressed)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unified gzip decompression to bytes for both networking and WASM
|
||||||
|
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||||
|
pub fn gunzip_to_vec(bytes: &[u8]) -> Result<Vec<u8>> {
|
||||||
|
let mut decoder = GzDecoder::new(bytes);
|
||||||
|
let mut decompressed = Vec::new();
|
||||||
|
decoder
|
||||||
|
.read_to_end(&mut decompressed)
|
||||||
|
.map_err(|e| ConnectorError::protocol_error(format!("Gzip decompression failed: {e}")))?;
|
||||||
|
Ok(decompressed)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unified gzip detection for both networking and WASM
|
||||||
|
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||||
|
pub fn is_gzip(bytes: &[u8]) -> bool {
|
||||||
|
bytes.len() >= 2 && bytes[0] == GZIP_MAGIC_1 && bytes[1] == GZIP_MAGIC_2
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unified debug logging for both networking and WASM modes
|
||||||
|
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn log_debug(message: &str) {
|
||||||
|
#[cfg(feature = "networking")]
|
||||||
|
if std::env::var("SOCKTOP_DEBUG").ok().as_deref() == Some("1") {
|
||||||
|
eprintln!("{message}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(feature = "wasm", not(feature = "networking")))]
|
||||||
|
eprintln!("{message}");
|
||||||
|
}
|
||||||
66
socktop_connector/src/wasm/connection.rs
Normal file
66
socktop_connector/src/wasm/connection.rs
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
//! WebSocket connection handling for WASM environments.
|
||||||
|
|
||||||
|
use crate::config::ConnectorConfig;
|
||||||
|
use crate::error::{ConnectorError, Result};
|
||||||
|
use crate::utils::{WEBSOCKET_CLOSED, WEBSOCKET_CLOSING, WEBSOCKET_OPEN};
|
||||||
|
|
||||||
|
use wasm_bindgen::JsCast;
|
||||||
|
use wasm_bindgen::prelude::*;
|
||||||
|
use web_sys::WebSocket;
|
||||||
|
|
||||||
|
/// Connect to the agent using WASM WebSocket
|
||||||
|
pub async fn connect_to_agent(config: &ConnectorConfig) -> Result<WebSocket> {
|
||||||
|
let websocket = WebSocket::new(&config.url).map_err(|e| {
|
||||||
|
ConnectorError::protocol_error(format!("Failed to create WebSocket: {e:?}"))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Set binary type for proper message handling
|
||||||
|
websocket.set_binary_type(web_sys::BinaryType::Arraybuffer);
|
||||||
|
|
||||||
|
// Wait for connection to be ready with proper async delays
|
||||||
|
let start_time = js_sys::Date::now();
|
||||||
|
let timeout_ms = 10000.0; // 10 second timeout (increased from 5)
|
||||||
|
|
||||||
|
// Poll connection status until ready or timeout
|
||||||
|
loop {
|
||||||
|
let ready_state = websocket.ready_state();
|
||||||
|
|
||||||
|
if ready_state == WEBSOCKET_OPEN {
|
||||||
|
// OPEN - connection is ready
|
||||||
|
break;
|
||||||
|
} else if ready_state == WEBSOCKET_CLOSED {
|
||||||
|
// CLOSED
|
||||||
|
return Err(ConnectorError::protocol_error(
|
||||||
|
"WebSocket connection closed",
|
||||||
|
));
|
||||||
|
} else if ready_state == WEBSOCKET_CLOSING {
|
||||||
|
// CLOSING
|
||||||
|
return Err(ConnectorError::protocol_error("WebSocket is closing"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check timeout
|
||||||
|
let now = js_sys::Date::now();
|
||||||
|
if now - start_time > timeout_ms {
|
||||||
|
return Err(ConnectorError::protocol_error(
|
||||||
|
"WebSocket connection timeout",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proper async delay using setTimeout Promise
|
||||||
|
let promise = js_sys::Promise::new(&mut |resolve, _| {
|
||||||
|
let closure = Closure::once(move || resolve.call0(&JsValue::UNDEFINED));
|
||||||
|
web_sys::window()
|
||||||
|
.unwrap()
|
||||||
|
.set_timeout_with_callback_and_timeout_and_arguments_0(
|
||||||
|
closure.as_ref().unchecked_ref(),
|
||||||
|
100, // 100ms delay between polls
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
closure.forget();
|
||||||
|
});
|
||||||
|
|
||||||
|
let _ = wasm_bindgen_futures::JsFuture::from(promise).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(websocket)
|
||||||
|
}
|
||||||
7
socktop_connector/src/wasm/mod.rs
Normal file
7
socktop_connector/src/wasm/mod.rs
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
//! WASM module for browser WebSocket connections.
|
||||||
|
|
||||||
|
pub mod connection;
|
||||||
|
pub mod requests;
|
||||||
|
|
||||||
|
pub use connection::*;
|
||||||
|
pub use requests::*;
|
||||||
421
socktop_connector/src/wasm/requests.rs
Normal file
421
socktop_connector/src/wasm/requests.rs
Normal file
@ -0,0 +1,421 @@
|
|||||||
|
//! WebSocket request handlers for WASM environments.
|
||||||
|
|
||||||
|
use crate::error::{ConnectorError, Result};
|
||||||
|
use crate::pb::Processes;
|
||||||
|
use crate::utils::{gunzip_to_string, gunzip_to_vec, is_gzip, log_debug};
|
||||||
|
use crate::{
|
||||||
|
AgentRequest, AgentResponse, DiskInfo, JournalResponse, Metrics, ProcessInfo,
|
||||||
|
ProcessMetricsResponse, ProcessesPayload,
|
||||||
|
};
|
||||||
|
|
||||||
|
use prost::Message as ProstMessage;
|
||||||
|
use std::cell::RefCell;
|
||||||
|
use std::rc::Rc;
|
||||||
|
use wasm_bindgen::JsCast;
|
||||||
|
use wasm_bindgen::prelude::*;
|
||||||
|
use web_sys::WebSocket;
|
||||||
|
|
||||||
|
/// Send a request and wait for response with binary data handling
|
||||||
|
pub async fn send_request_and_wait(
|
||||||
|
websocket: &WebSocket,
|
||||||
|
request: AgentRequest,
|
||||||
|
) -> Result<AgentResponse> {
|
||||||
|
// Use the legacy string format that the agent expects
|
||||||
|
let request_string = request.to_legacy_string();
|
||||||
|
|
||||||
|
// Send request
|
||||||
|
websocket
|
||||||
|
.send_with_str(&request_string)
|
||||||
|
.map_err(|e| ConnectorError::protocol_error(format!("Failed to send message: {e:?}")))?;
|
||||||
|
|
||||||
|
// Wait for response using JavaScript Promise
|
||||||
|
let (response, binary_data) = wait_for_response_with_binary(websocket).await?;
|
||||||
|
|
||||||
|
// Parse the response based on the request type
|
||||||
|
match request {
|
||||||
|
AgentRequest::Metrics => {
|
||||||
|
// Check if this is binary data (protobuf from agent)
|
||||||
|
if response.starts_with("BINARY_DATA:") {
|
||||||
|
// Extract the byte count
|
||||||
|
let byte_count: usize = response
|
||||||
|
.strip_prefix("BINARY_DATA:")
|
||||||
|
.unwrap_or("0")
|
||||||
|
.parse()
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
// For now, return a placeholder metrics response indicating binary data received
|
||||||
|
// TODO: Implement proper protobuf decoding for binary data
|
||||||
|
let placeholder_metrics = Metrics {
|
||||||
|
cpu_total: 0.0,
|
||||||
|
cpu_per_core: vec![0.0],
|
||||||
|
mem_total: 0,
|
||||||
|
mem_used: 0,
|
||||||
|
swap_total: 0,
|
||||||
|
swap_used: 0,
|
||||||
|
hostname: format!("Binary protobuf data ({byte_count} bytes)"),
|
||||||
|
cpu_temp_c: None,
|
||||||
|
disks: vec![],
|
||||||
|
networks: vec![],
|
||||||
|
top_processes: vec![],
|
||||||
|
gpus: None,
|
||||||
|
process_count: None,
|
||||||
|
};
|
||||||
|
Ok(AgentResponse::Metrics(placeholder_metrics))
|
||||||
|
} else {
|
||||||
|
// Try to parse as JSON (fallback)
|
||||||
|
let metrics: Metrics = serde_json::from_str(&response).map_err(|e| {
|
||||||
|
ConnectorError::serialization_error(format!("Failed to parse metrics: {e}"))
|
||||||
|
})?;
|
||||||
|
Ok(AgentResponse::Metrics(metrics))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AgentRequest::Disks => {
|
||||||
|
let disks: Vec<DiskInfo> = serde_json::from_str(&response).map_err(|e| {
|
||||||
|
ConnectorError::serialization_error(format!("Failed to parse disks: {e}"))
|
||||||
|
})?;
|
||||||
|
Ok(AgentResponse::Disks(disks))
|
||||||
|
}
|
||||||
|
AgentRequest::Processes => {
|
||||||
|
log_debug(&format!(
|
||||||
|
"🔍 Processing process request - response: {}",
|
||||||
|
if response.len() > 100 {
|
||||||
|
format!("{}...", &response[..100])
|
||||||
|
} else {
|
||||||
|
response.clone()
|
||||||
|
}
|
||||||
|
));
|
||||||
|
log_debug(&format!(
|
||||||
|
"🔍 Binary data available: {}",
|
||||||
|
binary_data.is_some()
|
||||||
|
));
|
||||||
|
if let Some(ref data) = binary_data {
|
||||||
|
log_debug(&format!("🔍 Binary data size: {} bytes", data.len()));
|
||||||
|
// Check if it's gzipped data and decompress it first
|
||||||
|
if is_gzip(data) {
|
||||||
|
log_debug("🔍 Process data is gzipped, decompressing...");
|
||||||
|
match gunzip_to_vec(data) {
|
||||||
|
Ok(decompressed_bytes) => {
|
||||||
|
log_debug(&format!(
|
||||||
|
"🔍 Successfully decompressed {} bytes, now decoding protobuf...",
|
||||||
|
decompressed_bytes.len()
|
||||||
|
));
|
||||||
|
// Now decode the decompressed bytes as protobuf
|
||||||
|
match <Processes as ProstMessage>::decode(decompressed_bytes.as_slice())
|
||||||
|
{
|
||||||
|
Ok(protobuf_processes) => {
|
||||||
|
log_debug(&format!(
|
||||||
|
"✅ Successfully decoded {} processes from gzipped protobuf",
|
||||||
|
protobuf_processes.rows.len()
|
||||||
|
));
|
||||||
|
|
||||||
|
// Convert protobuf processes to ProcessInfo structs
|
||||||
|
let processes: Vec<ProcessInfo> = protobuf_processes
|
||||||
|
.rows
|
||||||
|
.into_iter()
|
||||||
|
.map(|p| ProcessInfo {
|
||||||
|
pid: p.pid,
|
||||||
|
name: p.name,
|
||||||
|
cpu_usage: p.cpu_usage,
|
||||||
|
mem_bytes: p.mem_bytes,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let processes_payload = ProcessesPayload {
|
||||||
|
top_processes: processes,
|
||||||
|
process_count: protobuf_processes.process_count as usize,
|
||||||
|
};
|
||||||
|
return Ok(AgentResponse::Processes(processes_payload));
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log_debug(&format!(
|
||||||
|
"❌ Failed to decode decompressed protobuf: {e}"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log_debug(&format!(
|
||||||
|
"❌ Failed to decompress gzipped process data: {e}"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is binary data (protobuf from agent)
|
||||||
|
if response.starts_with("BINARY_DATA:") {
|
||||||
|
// Extract the binary data size and decode protobuf
|
||||||
|
let byte_count_str = response.strip_prefix("BINARY_DATA:").unwrap_or("0");
|
||||||
|
let _byte_count: usize = byte_count_str.parse().unwrap_or(0);
|
||||||
|
|
||||||
|
// Check if we have the actual binary data
|
||||||
|
if let Some(binary_bytes) = binary_data {
|
||||||
|
log_debug(&format!(
|
||||||
|
"🔧 Decoding {} bytes of protobuf process data",
|
||||||
|
binary_bytes.len()
|
||||||
|
));
|
||||||
|
|
||||||
|
// Try to decode the protobuf data using the prost Message trait
|
||||||
|
match <Processes as ProstMessage>::decode(&binary_bytes[..]) {
|
||||||
|
Ok(protobuf_processes) => {
|
||||||
|
log_debug(&format!(
|
||||||
|
"✅ Successfully decoded {} processes from protobuf",
|
||||||
|
protobuf_processes.rows.len()
|
||||||
|
));
|
||||||
|
|
||||||
|
// Convert protobuf processes to ProcessInfo structs
|
||||||
|
let processes: Vec<ProcessInfo> = protobuf_processes
|
||||||
|
.rows
|
||||||
|
.into_iter()
|
||||||
|
.map(|p| ProcessInfo {
|
||||||
|
pid: p.pid,
|
||||||
|
name: p.name,
|
||||||
|
cpu_usage: p.cpu_usage,
|
||||||
|
mem_bytes: p.mem_bytes,
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let processes_payload = ProcessesPayload {
|
||||||
|
top_processes: processes,
|
||||||
|
process_count: protobuf_processes.process_count as usize,
|
||||||
|
};
|
||||||
|
Ok(AgentResponse::Processes(processes_payload))
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log_debug(&format!("❌ Failed to decode protobuf: {e}"));
|
||||||
|
// Fallback to empty processes
|
||||||
|
let processes = ProcessesPayload {
|
||||||
|
top_processes: vec![],
|
||||||
|
process_count: 0,
|
||||||
|
};
|
||||||
|
Ok(AgentResponse::Processes(processes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log_debug(
|
||||||
|
"❌ Binary data indicator received but no actual binary data preserved",
|
||||||
|
);
|
||||||
|
let processes = ProcessesPayload {
|
||||||
|
top_processes: vec![],
|
||||||
|
process_count: 0,
|
||||||
|
};
|
||||||
|
Ok(AgentResponse::Processes(processes))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Try to parse as JSON (fallback)
|
||||||
|
let processes: ProcessesPayload = serde_json::from_str(&response).map_err(|e| {
|
||||||
|
ConnectorError::serialization_error(format!("Failed to parse processes: {e}"))
|
||||||
|
})?;
|
||||||
|
Ok(AgentResponse::Processes(processes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AgentRequest::ProcessMetrics { pid: _ } => {
|
||||||
|
// Parse JSON response for process metrics
|
||||||
|
let process_metrics: ProcessMetricsResponse =
|
||||||
|
serde_json::from_str(&response).map_err(|e| {
|
||||||
|
ConnectorError::serialization_error(format!(
|
||||||
|
"Failed to parse process metrics: {e}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||||
|
}
|
||||||
|
AgentRequest::JournalEntries { pid: _ } => {
|
||||||
|
// Parse JSON response for journal entries
|
||||||
|
let journal_entries: JournalResponse =
|
||||||
|
serde_json::from_str(&response).map_err(|e| {
|
||||||
|
ConnectorError::serialization_error(format!(
|
||||||
|
"Failed to parse journal entries: {e}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn wait_for_response_with_binary(websocket: &WebSocket) -> Result<(String, Option<Vec<u8>>)> {
|
||||||
|
let start_time = js_sys::Date::now();
|
||||||
|
let timeout_ms = 10000.0; // 10 second timeout
|
||||||
|
|
||||||
|
// Store the response in a shared location
|
||||||
|
let response_cell = Rc::new(RefCell::new(None::<String>));
|
||||||
|
let binary_data_cell = Rc::new(RefCell::new(None::<Vec<u8>>));
|
||||||
|
let error_cell = Rc::new(RefCell::new(None::<String>));
|
||||||
|
|
||||||
|
// Use a unique request ID to avoid message collision
|
||||||
|
let _request_id = js_sys::Math::random();
|
||||||
|
let response_received = Rc::new(RefCell::new(false));
|
||||||
|
|
||||||
|
// Set up the message handler that only processes if we haven't gotten a response yet
|
||||||
|
{
|
||||||
|
let response_cell = response_cell.clone();
|
||||||
|
let binary_data_cell = binary_data_cell.clone();
|
||||||
|
let response_received = response_received.clone();
|
||||||
|
let onmessage_callback = Closure::wrap(Box::new(move |e: web_sys::MessageEvent| {
|
||||||
|
// Only process if we haven't already received a response for this request
|
||||||
|
if !*response_received.borrow() {
|
||||||
|
// Handle text messages (JSON responses for metrics/disks)
|
||||||
|
if let Ok(data) = e.data().dyn_into::<js_sys::JsString>() {
|
||||||
|
let message = data.as_string().unwrap_or_default();
|
||||||
|
if !message.is_empty() {
|
||||||
|
// Debug: Log what we received (truncated)
|
||||||
|
let preview = if message.len() > 100 {
|
||||||
|
format!("{}...", &message[..100])
|
||||||
|
} else {
|
||||||
|
message.clone()
|
||||||
|
};
|
||||||
|
log_debug(&format!("🔍 Received text: {preview}"));
|
||||||
|
|
||||||
|
*response_cell.borrow_mut() = Some(message);
|
||||||
|
*response_received.borrow_mut() = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Handle binary messages (could be JSON as text bytes or actual protobuf)
|
||||||
|
else if let Ok(array_buffer) = e.data().dyn_into::<js_sys::ArrayBuffer>() {
|
||||||
|
let uint8_array = js_sys::Uint8Array::new(&array_buffer);
|
||||||
|
let length = uint8_array.length() as usize;
|
||||||
|
let mut bytes = vec![0u8; length];
|
||||||
|
uint8_array.copy_to(&mut bytes);
|
||||||
|
|
||||||
|
log_debug(&format!("🔍 Received binary data: {length} bytes"));
|
||||||
|
|
||||||
|
// Debug: Log the first few bytes to see what we're dealing with
|
||||||
|
let first_bytes = if bytes.len() >= 4 {
|
||||||
|
format!(
|
||||||
|
"0x{:02x} 0x{:02x} 0x{:02x} 0x{:02x}",
|
||||||
|
bytes[0], bytes[1], bytes[2], bytes[3]
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
format!("Only {} bytes available", bytes.len())
|
||||||
|
};
|
||||||
|
log_debug(&format!("🔍 First bytes: {first_bytes}"));
|
||||||
|
|
||||||
|
// Try to decode as UTF-8 text first (in case it's JSON sent as binary)
|
||||||
|
match String::from_utf8(bytes.clone()) {
|
||||||
|
Ok(text) => {
|
||||||
|
// If it decodes to valid UTF-8, check if it looks like JSON
|
||||||
|
let trimmed = text.trim();
|
||||||
|
if (trimmed.starts_with('{') && trimmed.ends_with('}'))
|
||||||
|
|| (trimmed.starts_with('[') && trimmed.ends_with(']'))
|
||||||
|
{
|
||||||
|
log_debug(&format!(
|
||||||
|
"🔍 Binary data is actually JSON text: {}",
|
||||||
|
if text.len() > 100 {
|
||||||
|
format!("{}...", &text[..100])
|
||||||
|
} else {
|
||||||
|
text.clone()
|
||||||
|
}
|
||||||
|
));
|
||||||
|
*response_cell.borrow_mut() = Some(text);
|
||||||
|
*response_received.borrow_mut() = true;
|
||||||
|
} else {
|
||||||
|
log_debug(&format!(
|
||||||
|
"🔍 Binary data is UTF-8 text but not JSON: {}",
|
||||||
|
if text.len() > 100 {
|
||||||
|
format!("{}...", &text[..100])
|
||||||
|
} else {
|
||||||
|
text.clone()
|
||||||
|
}
|
||||||
|
));
|
||||||
|
*response_cell.borrow_mut() = Some(text);
|
||||||
|
*response_received.borrow_mut() = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
// If it's not valid UTF-8, check if it's gzipped data
|
||||||
|
if is_gzip(&bytes) {
|
||||||
|
log_debug(&format!(
|
||||||
|
"🔍 Binary data appears to be gzipped ({length} bytes)"
|
||||||
|
));
|
||||||
|
// Try to decompress using unified gzip decompression
|
||||||
|
match gunzip_to_string(&bytes) {
|
||||||
|
Ok(decompressed_text) => {
|
||||||
|
log_debug(&format!(
|
||||||
|
"🔍 Gzipped data decompressed to text: {}",
|
||||||
|
if decompressed_text.len() > 100 {
|
||||||
|
format!("{}...", &decompressed_text[..100])
|
||||||
|
} else {
|
||||||
|
decompressed_text.clone()
|
||||||
|
}
|
||||||
|
));
|
||||||
|
*response_cell.borrow_mut() = Some(decompressed_text);
|
||||||
|
*response_received.borrow_mut() = true;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log_debug(&format!("🔍 Failed to decompress gzip: {e}"));
|
||||||
|
// Fallback: treat as actual binary protobuf data
|
||||||
|
*binary_data_cell.borrow_mut() = Some(bytes.clone());
|
||||||
|
*response_cell.borrow_mut() =
|
||||||
|
Some(format!("BINARY_DATA:{length}"));
|
||||||
|
*response_received.borrow_mut() = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If it's not valid UTF-8 and not gzipped, it's likely actual binary protobuf data
|
||||||
|
log_debug(&format!(
|
||||||
|
"🔍 Binary data is actual protobuf ({length} bytes)"
|
||||||
|
));
|
||||||
|
*binary_data_cell.borrow_mut() = Some(bytes);
|
||||||
|
*response_cell.borrow_mut() = Some(format!("BINARY_DATA:{length}"));
|
||||||
|
*response_received.borrow_mut() = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Log what type of data we got
|
||||||
|
log_debug(&format!("🔍 Received unknown data type: {:?}", e.data()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}) as Box<dyn FnMut(_)>);
|
||||||
|
websocket.set_onmessage(Some(onmessage_callback.as_ref().unchecked_ref()));
|
||||||
|
onmessage_callback.forget();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up the error handler
|
||||||
|
{
|
||||||
|
let error_cell = error_cell.clone();
|
||||||
|
let response_received = response_received.clone();
|
||||||
|
let onerror_callback = Closure::wrap(Box::new(move |_e: web_sys::ErrorEvent| {
|
||||||
|
if !*response_received.borrow() {
|
||||||
|
*error_cell.borrow_mut() = Some("WebSocket error occurred".to_string());
|
||||||
|
*response_received.borrow_mut() = true;
|
||||||
|
}
|
||||||
|
}) as Box<dyn FnMut(_)>);
|
||||||
|
websocket.set_onerror(Some(onerror_callback.as_ref().unchecked_ref()));
|
||||||
|
onerror_callback.forget();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Poll for response with proper async delays
|
||||||
|
loop {
|
||||||
|
// Check for response
|
||||||
|
if *response_received.borrow() {
|
||||||
|
if let Some(response) = response_cell.borrow().as_ref() {
|
||||||
|
let binary_data = binary_data_cell.borrow().clone();
|
||||||
|
return Ok((response.clone(), binary_data));
|
||||||
|
}
|
||||||
|
if let Some(error) = error_cell.borrow().as_ref() {
|
||||||
|
return Err(ConnectorError::protocol_error(error));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check timeout
|
||||||
|
let now = js_sys::Date::now();
|
||||||
|
if now - start_time > timeout_ms {
|
||||||
|
*response_received.borrow_mut() = true; // Mark as done to prevent future processing
|
||||||
|
return Err(ConnectorError::protocol_error("WebSocket response timeout"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait 50ms before checking again
|
||||||
|
let promise = js_sys::Promise::new(&mut |resolve, _| {
|
||||||
|
let closure = Closure::once(move || resolve.call0(&JsValue::UNDEFINED));
|
||||||
|
web_sys::window()
|
||||||
|
.unwrap()
|
||||||
|
.set_timeout_with_callback_and_timeout_and_arguments_0(
|
||||||
|
closure.as_ref().unchecked_ref(),
|
||||||
|
50,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
closure.forget();
|
||||||
|
});
|
||||||
|
let _ = wasm_bindgen_futures::JsFuture::from(promise).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
51
socktop_connector/tests/integration_test.rs
Normal file
51
socktop_connector/tests/integration_test.rs
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
use socktop_connector::{
|
||||||
|
AgentRequest, AgentResponse, connect_to_socktop_agent, connect_to_socktop_agent_with_tls,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Integration probe: only runs when SOCKTOP_WS is set to an agent WebSocket URL.
|
||||||
|
// Example: SOCKTOP_WS=ws://127.0.0.1:3000/ws cargo test -p socktop_connector --test integration_test -- --nocapture
|
||||||
|
#[tokio::test]
|
||||||
|
async fn probe_ws_endpoints() {
|
||||||
|
// Gate the test to avoid CI failures when no agent is running.
|
||||||
|
let url = match std::env::var("SOCKTOP_WS") {
|
||||||
|
Ok(v) if !v.is_empty() => v,
|
||||||
|
_ => {
|
||||||
|
eprintln!(
|
||||||
|
"skipping ws_probe: set SOCKTOP_WS=ws://host:port/ws to run this integration test"
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Optional pinned CA for WSS/self-signed setups
|
||||||
|
let tls_ca = std::env::var("SOCKTOP_TLS_CA").ok();
|
||||||
|
|
||||||
|
let mut connector = if let Some(ca_path) = tls_ca {
|
||||||
|
connect_to_socktop_agent_with_tls(&url, ca_path, true)
|
||||||
|
.await
|
||||||
|
.expect("connect ws with TLS")
|
||||||
|
} else {
|
||||||
|
connect_to_socktop_agent(&url).await.expect("connect ws")
|
||||||
|
};
|
||||||
|
|
||||||
|
// Should get fast metrics quickly
|
||||||
|
let response = connector.request(AgentRequest::Metrics).await;
|
||||||
|
assert!(response.is_ok(), "expected Metrics payload within timeout");
|
||||||
|
if let Ok(AgentResponse::Metrics(_)) = response {
|
||||||
|
// Success
|
||||||
|
} else {
|
||||||
|
panic!("expected Metrics response");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processes may be gzipped and a bit slower, but should arrive
|
||||||
|
let response = connector.request(AgentRequest::Processes).await;
|
||||||
|
assert!(
|
||||||
|
response.is_ok(),
|
||||||
|
"expected Processes payload within timeout"
|
||||||
|
);
|
||||||
|
if let Ok(AgentResponse::Processes(_)) = response {
|
||||||
|
// Success
|
||||||
|
} else {
|
||||||
|
panic!("expected Processes response");
|
||||||
|
}
|
||||||
|
}
|
||||||
15
socktop_wasm_test/.gitignore
vendored
Normal file
15
socktop_wasm_test/.gitignore
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Build artifacts
|
||||||
|
/target/
|
||||||
|
/pkg/
|
||||||
|
|
||||||
|
# IDE files
|
||||||
|
.vscode/
|
||||||
|
.idea/
|
||||||
|
|
||||||
|
# OS files
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
# Backup files
|
||||||
|
*~
|
||||||
|
*.bak
|
||||||
741
socktop_wasm_test/Cargo.lock
generated
Normal file
741
socktop_wasm_test/Cargo.lock
generated
Normal file
@ -0,0 +1,741 @@
|
|||||||
|
# This file is automatically @generated by Cargo.
|
||||||
|
# It is not intended for manual editing.
|
||||||
|
version = 4
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "adler2"
|
||||||
|
version = "2.0.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "aho-corasick"
|
||||||
|
version = "1.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
|
||||||
|
dependencies = [
|
||||||
|
"memchr",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "anyhow"
|
||||||
|
version = "1.0.99"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bitflags"
|
||||||
|
version = "2.9.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bumpalo"
|
||||||
|
version = "3.19.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bytes"
|
||||||
|
version = "1.10.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cfg-if"
|
||||||
|
version = "1.0.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "console_error_panic_hook"
|
||||||
|
version = "0.1.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"wasm-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crc32fast"
|
||||||
|
version = "1.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "either"
|
||||||
|
version = "1.15.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "equivalent"
|
||||||
|
version = "1.0.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "errno"
|
||||||
|
version = "0.3.13"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "fastrand"
|
||||||
|
version = "2.3.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "fixedbitset"
|
||||||
|
version = "0.5.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "flate2"
|
||||||
|
version = "1.1.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d"
|
||||||
|
dependencies = [
|
||||||
|
"crc32fast",
|
||||||
|
"miniz_oxide",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "getrandom"
|
||||||
|
version = "0.2.16"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"js-sys",
|
||||||
|
"libc",
|
||||||
|
"wasi 0.11.1+wasi-snapshot-preview1",
|
||||||
|
"wasm-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "getrandom"
|
||||||
|
version = "0.3.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"libc",
|
||||||
|
"r-efi",
|
||||||
|
"wasi 0.14.4+wasi-0.2.4",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hashbrown"
|
||||||
|
version = "0.15.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "heck"
|
||||||
|
version = "0.5.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "indexmap"
|
||||||
|
version = "2.11.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9"
|
||||||
|
dependencies = [
|
||||||
|
"equivalent",
|
||||||
|
"hashbrown",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "itertools"
|
||||||
|
version = "0.14.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285"
|
||||||
|
dependencies = [
|
||||||
|
"either",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "itoa"
|
||||||
|
version = "1.0.15"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "js-sys"
|
||||||
|
version = "0.3.78"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0c0b063578492ceec17683ef2f8c5e89121fbd0b172cbc280635ab7567db2738"
|
||||||
|
dependencies = [
|
||||||
|
"once_cell",
|
||||||
|
"wasm-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "libc"
|
||||||
|
version = "0.2.175"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "linux-raw-sys"
|
||||||
|
version = "0.9.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "log"
|
||||||
|
version = "0.4.28"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "memchr"
|
||||||
|
version = "2.7.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "miniz_oxide"
|
||||||
|
version = "0.8.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
|
||||||
|
dependencies = [
|
||||||
|
"adler2",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "multimap"
|
||||||
|
version = "0.10.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "once_cell"
|
||||||
|
version = "1.21.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "petgraph"
|
||||||
|
version = "0.7.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772"
|
||||||
|
dependencies = [
|
||||||
|
"fixedbitset",
|
||||||
|
"indexmap",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "prettyplease"
|
||||||
|
version = "0.2.37"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "proc-macro2"
|
||||||
|
version = "1.0.101"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
|
||||||
|
dependencies = [
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "prost"
|
||||||
|
version = "0.13.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5"
|
||||||
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
|
"prost-derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "prost-build"
|
||||||
|
version = "0.13.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf"
|
||||||
|
dependencies = [
|
||||||
|
"heck",
|
||||||
|
"itertools",
|
||||||
|
"log",
|
||||||
|
"multimap",
|
||||||
|
"once_cell",
|
||||||
|
"petgraph",
|
||||||
|
"prettyplease",
|
||||||
|
"prost",
|
||||||
|
"prost-types",
|
||||||
|
"regex",
|
||||||
|
"syn",
|
||||||
|
"tempfile",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "prost-derive"
|
||||||
|
version = "0.13.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
|
||||||
|
dependencies = [
|
||||||
|
"anyhow",
|
||||||
|
"itertools",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "prost-types"
|
||||||
|
version = "0.13.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16"
|
||||||
|
dependencies = [
|
||||||
|
"prost",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "protoc-bin-vendored"
|
||||||
|
version = "3.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d1c381df33c98266b5f08186583660090a4ffa0889e76c7e9a5e175f645a67fa"
|
||||||
|
dependencies = [
|
||||||
|
"protoc-bin-vendored-linux-aarch_64",
|
||||||
|
"protoc-bin-vendored-linux-ppcle_64",
|
||||||
|
"protoc-bin-vendored-linux-s390_64",
|
||||||
|
"protoc-bin-vendored-linux-x86_32",
|
||||||
|
"protoc-bin-vendored-linux-x86_64",
|
||||||
|
"protoc-bin-vendored-macos-aarch_64",
|
||||||
|
"protoc-bin-vendored-macos-x86_64",
|
||||||
|
"protoc-bin-vendored-win32",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "protoc-bin-vendored-linux-aarch_64"
|
||||||
|
version = "3.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c350df4d49b5b9e3ca79f7e646fde2377b199e13cfa87320308397e1f37e1a4c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "protoc-bin-vendored-linux-ppcle_64"
|
||||||
|
version = "3.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a55a63e6c7244f19b5c6393f025017eb5d793fd5467823a099740a7a4222440c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "protoc-bin-vendored-linux-s390_64"
|
||||||
|
version = "3.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1dba5565db4288e935d5330a07c264a4ee8e4a5b4a4e6f4e83fad824cc32f3b0"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "protoc-bin-vendored-linux-x86_32"
|
||||||
|
version = "3.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8854774b24ee28b7868cd71dccaae8e02a2365e67a4a87a6cd11ee6cdbdf9cf5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "protoc-bin-vendored-linux-x86_64"
|
||||||
|
version = "3.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b38b07546580df720fa464ce124c4b03630a6fb83e05c336fea2a241df7e5d78"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "protoc-bin-vendored-macos-aarch_64"
|
||||||
|
version = "3.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "89278a9926ce312e51f1d999fee8825d324d603213344a9a706daa009f1d8092"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "protoc-bin-vendored-macos-x86_64"
|
||||||
|
version = "3.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "81745feda7ccfb9471d7a4de888f0652e806d5795b61480605d4943176299756"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "protoc-bin-vendored-win32"
|
||||||
|
version = "3.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "95067976aca6421a523e491fce939a3e65249bac4b977adee0ee9771568e8aa3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "quote"
|
||||||
|
version = "1.0.40"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "r-efi"
|
||||||
|
version = "5.3.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex"
|
||||||
|
version = "1.11.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912"
|
||||||
|
dependencies = [
|
||||||
|
"aho-corasick",
|
||||||
|
"memchr",
|
||||||
|
"regex-automata",
|
||||||
|
"regex-syntax",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex-automata"
|
||||||
|
version = "0.4.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
|
||||||
|
dependencies = [
|
||||||
|
"aho-corasick",
|
||||||
|
"memchr",
|
||||||
|
"regex-syntax",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "regex-syntax"
|
||||||
|
version = "0.8.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rustix"
|
||||||
|
version = "1.0.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags",
|
||||||
|
"errno",
|
||||||
|
"libc",
|
||||||
|
"linux-raw-sys",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rustversion"
|
||||||
|
version = "1.0.22"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ryu"
|
||||||
|
version = "1.0.20"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde"
|
||||||
|
version = "1.0.219"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
|
||||||
|
dependencies = [
|
||||||
|
"serde_derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_derive"
|
||||||
|
version = "1.0.219"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_json"
|
||||||
|
version = "1.0.143"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
|
||||||
|
dependencies = [
|
||||||
|
"itoa",
|
||||||
|
"memchr",
|
||||||
|
"ryu",
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "socktop_connector"
|
||||||
|
version = "0.1.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3a63dadaa5105df11b0684759a829012257d48e72a469cc554c0cf4394605f5a"
|
||||||
|
dependencies = [
|
||||||
|
"flate2",
|
||||||
|
"js-sys",
|
||||||
|
"prost",
|
||||||
|
"prost-build",
|
||||||
|
"protoc-bin-vendored",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"thiserror",
|
||||||
|
"wasm-bindgen",
|
||||||
|
"wasm-bindgen-futures",
|
||||||
|
"web-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "socktop_wasm_test"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"console_error_panic_hook",
|
||||||
|
"getrandom 0.2.16",
|
||||||
|
"js-sys",
|
||||||
|
"serde",
|
||||||
|
"serde_json",
|
||||||
|
"socktop_connector",
|
||||||
|
"wasm-bindgen",
|
||||||
|
"wasm-bindgen-futures",
|
||||||
|
"web-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "syn"
|
||||||
|
version = "2.0.106"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "tempfile"
|
||||||
|
version = "3.21.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e"
|
||||||
|
dependencies = [
|
||||||
|
"fastrand",
|
||||||
|
"getrandom 0.3.3",
|
||||||
|
"once_cell",
|
||||||
|
"rustix",
|
||||||
|
"windows-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thiserror"
|
||||||
|
version = "2.0.16"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0"
|
||||||
|
dependencies = [
|
||||||
|
"thiserror-impl",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thiserror-impl"
|
||||||
|
version = "2.0.16"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "unicode-ident"
|
||||||
|
version = "1.0.18"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasi"
|
||||||
|
version = "0.11.1+wasi-snapshot-preview1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasi"
|
||||||
|
version = "0.14.4+wasi-0.2.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "88a5f4a424faf49c3c2c344f166f0662341d470ea185e939657aaff130f0ec4a"
|
||||||
|
dependencies = [
|
||||||
|
"wit-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen"
|
||||||
|
version = "0.2.101"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7e14915cadd45b529bb8d1f343c4ed0ac1de926144b746e2710f9cd05df6603b"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"once_cell",
|
||||||
|
"rustversion",
|
||||||
|
"wasm-bindgen-macro",
|
||||||
|
"wasm-bindgen-shared",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-backend"
|
||||||
|
version = "0.2.101"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e28d1ba982ca7923fd01448d5c30c6864d0a14109560296a162f80f305fb93bb"
|
||||||
|
dependencies = [
|
||||||
|
"bumpalo",
|
||||||
|
"log",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
"wasm-bindgen-shared",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-futures"
|
||||||
|
version = "0.4.51"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0ca85039a9b469b38336411d6d6ced91f3fc87109a2a27b0c197663f5144dffe"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"js-sys",
|
||||||
|
"once_cell",
|
||||||
|
"wasm-bindgen",
|
||||||
|
"web-sys",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-macro"
|
||||||
|
version = "0.2.101"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7c3d463ae3eff775b0c45df9da45d68837702ac35af998361e2c84e7c5ec1b0d"
|
||||||
|
dependencies = [
|
||||||
|
"quote",
|
||||||
|
"wasm-bindgen-macro-support",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-macro-support"
|
||||||
|
version = "0.2.101"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7bb4ce89b08211f923caf51d527662b75bdc9c9c7aab40f86dcb9fb85ac552aa"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
"wasm-bindgen-backend",
|
||||||
|
"wasm-bindgen-shared",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wasm-bindgen-shared"
|
||||||
|
version = "0.2.101"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f143854a3b13752c6950862c906306adb27c7e839f7414cec8fea35beab624c1"
|
||||||
|
dependencies = [
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "web-sys"
|
||||||
|
version = "0.3.78"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "77e4b637749ff0d92b8fad63aa1f7cff3cbe125fd49c175cd6345e7272638b12"
|
||||||
|
dependencies = [
|
||||||
|
"js-sys",
|
||||||
|
"wasm-bindgen",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-link"
|
||||||
|
version = "0.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-sys"
|
||||||
|
version = "0.60.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
|
||||||
|
dependencies = [
|
||||||
|
"windows-targets",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-targets"
|
||||||
|
version = "0.53.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
|
||||||
|
dependencies = [
|
||||||
|
"windows-link",
|
||||||
|
"windows_aarch64_gnullvm",
|
||||||
|
"windows_aarch64_msvc",
|
||||||
|
"windows_i686_gnu",
|
||||||
|
"windows_i686_gnullvm",
|
||||||
|
"windows_i686_msvc",
|
||||||
|
"windows_x86_64_gnu",
|
||||||
|
"windows_x86_64_gnullvm",
|
||||||
|
"windows_x86_64_msvc",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_aarch64_gnullvm"
|
||||||
|
version = "0.53.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_aarch64_msvc"
|
||||||
|
version = "0.53.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_gnu"
|
||||||
|
version = "0.53.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_gnullvm"
|
||||||
|
version = "0.53.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_msvc"
|
||||||
|
version = "0.53.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_gnu"
|
||||||
|
version = "0.53.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_gnullvm"
|
||||||
|
version = "0.53.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_msvc"
|
||||||
|
version = "0.53.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "wit-bindgen"
|
||||||
|
version = "0.45.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5c573471f125075647d03df72e026074b7203790d41351cd6edc96f46bcccd36"
|
||||||
36
socktop_wasm_test/Cargo.toml
Normal file
36
socktop_wasm_test/Cargo.toml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
[package]
|
||||||
|
name = "socktop_wasm_test"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
# Make this a standalone package, not part of the parent workspace
|
||||||
|
[workspace]
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
crate-type = ["cdylib"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
# Use WASM features for WebSocket connectivity (published version)
|
||||||
|
socktop_connector = { version = "0.1.5", default-features = false, features = ["wasm"] }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
wasm-bindgen = "0.2"
|
||||||
|
wasm-bindgen-futures = "0.4"
|
||||||
|
console_error_panic_hook = "0.1"
|
||||||
|
js-sys = "0.3"
|
||||||
|
|
||||||
|
[dependencies.web-sys]
|
||||||
|
version = "0.3"
|
||||||
|
features = [
|
||||||
|
"console",
|
||||||
|
"WebSocket",
|
||||||
|
"MessageEvent",
|
||||||
|
"ErrorEvent",
|
||||||
|
"CloseEvent",
|
||||||
|
"BinaryType",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Enable JS feature for WASM random number generation
|
||||||
|
[dependencies.getrandom]
|
||||||
|
version = "0.2"
|
||||||
|
features = ["js"]
|
||||||
150
socktop_wasm_test/README.md
Normal file
150
socktop_wasm_test/README.md
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
# WASM Compatibility Guide for socktop_connector
|
||||||
|
|
||||||
|
This directory contains a complete WebAssembly (WASM) compatibility test and implementation guide for the `socktop_connector` library.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
`socktop_connector` provides **full WebSocket networking support** for WebAssembly environments. The library includes complete connectivity functionality with automatic compression and protobuf decoding, making it easy to connect to socktop agents directly from browser applications.
|
||||||
|
|
||||||
|
## What Works in WASM
|
||||||
|
|
||||||
|
- ✅ **Full WebSocket connections** (`ws://` connections)
|
||||||
|
- ✅ **All request types** (`AgentRequest::Metrics`, `AgentRequest::Disks`, `AgentRequest::Processes`)
|
||||||
|
- ✅ **Automatic data processing**: Gzip decompression for metrics/disks, protobuf decoding for processes
|
||||||
|
- ✅ Configuration types (`ConnectorConfig`)
|
||||||
|
- ✅ Request/Response types (`AgentRequest`, `AgentResponse`)
|
||||||
|
- ✅ JSON serialization/deserialization of all types
|
||||||
|
- ✅ Protocol and version configuration builders
|
||||||
|
- ✅ All type-safe validation and error handling
|
||||||
|
|
||||||
|
## What Doesn't Work in WASM
|
||||||
|
|
||||||
|
- ❌ TLS connections (`wss://`) - use `ws://` only
|
||||||
|
- ❌ TLS certificate handling (use non-TLS endpoints)
|
||||||
|
|
||||||
|
## Quick Start - WASM Test Page
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Please note that the test assumes you have and agent runnign on your local host at port 3000. If you would like to use an alternate configuration please update lib.rs prior to build.
|
||||||
|
|
||||||
|
# Build the WASM package
|
||||||
|
wasm-pack build --target web --out-dir pkg
|
||||||
|
|
||||||
|
# Serve the test page
|
||||||
|
basic-http-server . --addr 127.0.0.1:8000
|
||||||
|
|
||||||
|
# Open http://127.0.0.1:8000 in your browser
|
||||||
|
# Check the browser console for test results
|
||||||
|
```
|
||||||
|
|
||||||
|
<img src="./screenshot_09092025_134458.jpg" width="85%">
|
||||||
|
|
||||||
|
|
||||||
|
## WASM Dependencies
|
||||||
|
|
||||||
|
The test uses the WASM-compatible networking features:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
[dependencies]
|
||||||
|
socktop_connector = { version = "0.1.5", default-features = false, features = ["wasm"] }
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
wasm-bindgen = "0.2"
|
||||||
|
console_error_panic_hook = "0.1"
|
||||||
|
|
||||||
|
[dependencies.web-sys]
|
||||||
|
version = "0.3"
|
||||||
|
features = ["console"]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key**: Use `features = ["wasm"]` to enable full WebSocket networking support in WASM builds.
|
||||||
|
|
||||||
|
## Implementation Strategy
|
||||||
|
|
||||||
|
### 1. Use socktop_connector Types for Configuration
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use wasm_bindgen::prelude::*;
|
||||||
|
use socktop_connector::{ConnectorConfig, AgentRequest, AgentResponse};
|
||||||
|
|
||||||
|
#[wasm_bindgen]
|
||||||
|
pub fn create_config() -> String {
|
||||||
|
// Use socktop_connector types for type-safe configuration
|
||||||
|
let config = ConnectorConfig::new("ws://localhost:3000/ws")
|
||||||
|
.with_protocols(vec!["socktop".to_string(), "v1".to_string()])
|
||||||
|
.with_version("13".to_string());
|
||||||
|
|
||||||
|
// Return JSON for use with browser WebSocket API
|
||||||
|
serde_json::to_string(&config).unwrap_or_default()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Create Type-Safe Requests
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[wasm_bindgen]
|
||||||
|
pub fn create_metrics_request() -> String {
|
||||||
|
let request = AgentRequest::Metrics;
|
||||||
|
serde_json::to_string(&request).unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[wasm_bindgen]
|
||||||
|
pub fn create_processes_request() -> String {
|
||||||
|
let request = AgentRequest::Processes;
|
||||||
|
serde_json::to_string(&request).unwrap_or_default()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Parse Responses with Type Safety
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[wasm_bindgen]
|
||||||
|
pub fn parse_metrics_response(json: &str) -> Option<String> {
|
||||||
|
match serde_json::from_str::<AgentResponse>(json) {
|
||||||
|
Ok(AgentResponse::Metrics(metrics)) => {
|
||||||
|
Some(format!("CPU: {}%, Memory: {}MB",
|
||||||
|
metrics.cpu_total,
|
||||||
|
metrics.mem_used / 1024 / 1024))
|
||||||
|
}
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Browser Integration
|
||||||
|
|
||||||
|
Then in JavaScript:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
import init, {
|
||||||
|
create_config,
|
||||||
|
create_metrics_request,
|
||||||
|
parse_metrics_response
|
||||||
|
} from './pkg/socktop_wasm_test.js';
|
||||||
|
|
||||||
|
async function run() {
|
||||||
|
await init();
|
||||||
|
|
||||||
|
// Use type-safe configuration
|
||||||
|
const configJson = create_config();
|
||||||
|
const config = JSON.parse(configJson);
|
||||||
|
|
||||||
|
// Create WebSocket with proper protocols
|
||||||
|
const ws = new WebSocket(config.url, config.ws_protocols);
|
||||||
|
|
||||||
|
ws.onopen = () => {
|
||||||
|
// Send type-safe requests
|
||||||
|
ws.send(create_metrics_request());
|
||||||
|
};
|
||||||
|
|
||||||
|
ws.onmessage = (event) => {
|
||||||
|
// Handle responses with type safety
|
||||||
|
const result = parse_metrics_response(event.data);
|
||||||
|
if (result) {
|
||||||
|
console.log(result);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
run();
|
||||||
|
```
|
||||||
154
socktop_wasm_test/index.html
Normal file
154
socktop_wasm_test/index.html
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Socktop Connector WASM Test</title>
|
||||||
|
<style>
|
||||||
|
body { font-family: monospace; padding: 20px; background-color: #f5f5f5; }
|
||||||
|
.container { max-width: 800px; margin: 0 auto; background: white; padding: 20px; border-radius: 8px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }
|
||||||
|
.log { margin: 5px 0; padding: 5px; border-radius: 4px; }
|
||||||
|
.success { color: #0a7c0a; background-color: #e8f5e8; }
|
||||||
|
.warning { color: #b8860b; background-color: #fdf6e3; }
|
||||||
|
.error { color: #d2322d; background-color: #f9e6e6; }
|
||||||
|
.info { color: #0969da; background-color: #e6f3ff; }
|
||||||
|
button {
|
||||||
|
background: #0969da;
|
||||||
|
color: white;
|
||||||
|
border: none;
|
||||||
|
padding: 10px 20px;
|
||||||
|
border-radius: 4px;
|
||||||
|
cursor: pointer;
|
||||||
|
font-size: 14px;
|
||||||
|
margin: 10px 0;
|
||||||
|
}
|
||||||
|
button:hover { background: #0757c7; }
|
||||||
|
button:disabled { background: #ccc; cursor: not-allowed; }
|
||||||
|
.server-input {
|
||||||
|
margin: 10px 0;
|
||||||
|
padding: 8px;
|
||||||
|
width: 300px;
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
border-radius: 4px;
|
||||||
|
font-family: monospace;
|
||||||
|
}
|
||||||
|
.input-group { margin: 15px 0; }
|
||||||
|
.input-group label { display: block; margin-bottom: 5px; font-weight: bold; }
|
||||||
|
#output {
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
border-radius: 4px;
|
||||||
|
padding: 10px;
|
||||||
|
min-height: 200px;
|
||||||
|
background: #fafafa;
|
||||||
|
font-family: 'Courier New', monospace;
|
||||||
|
}
|
||||||
|
.status { font-weight: bold; margin: 10px 0; }
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="container">
|
||||||
|
<h1>🦀 Socktop Connector WASM Test</h1>
|
||||||
|
|
||||||
|
<div class="status">
|
||||||
|
<p><strong>Test Purpose:</strong> Verify socktop_connector works in WebAssembly without TLS dependencies</p>
|
||||||
|
<p><strong>Status:</strong> <span id="status">Loading WASM module...</span></p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="input-group">
|
||||||
|
<label for="server-url">Server URL:</label>
|
||||||
|
<input type="text" id="server-url" class="server-input" value="ws://localhost:3000/ws"
|
||||||
|
placeholder="ws://localhost:3000/ws">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<button id="test-btn" disabled>Run WASM Test</button>
|
||||||
|
<button id="clear-btn">Clear Output</button>
|
||||||
|
|
||||||
|
<h3>Output:</h3>
|
||||||
|
<div id="output"></div>
|
||||||
|
|
||||||
|
<h3>ICON LEGEND:</h3>
|
||||||
|
<ul>
|
||||||
|
<li>✅ <strong>Success:</strong> No rustls/TLS errors, connector loads in WASM</li>
|
||||||
|
<li>⚠️ <strong>Expected:</strong> Connection failures without running socktop_agent</li>
|
||||||
|
<li>❌ <strong>Failure:</strong> Build errors or TLS dependency issues</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<p><small>💡 <strong>Tip:</strong> start socktop_agent with: <code>socktop_agent --port 3000</code></small></p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script type="module">
|
||||||
|
import init, { test_socktop_connector } from './pkg/socktop_wasm_test.js';
|
||||||
|
|
||||||
|
const output = document.getElementById('output');
|
||||||
|
const testBtn = document.getElementById('test-btn');
|
||||||
|
const clearBtn = document.getElementById('clear-btn');
|
||||||
|
const status = document.getElementById('status');
|
||||||
|
|
||||||
|
// Capture console output and display it on page
|
||||||
|
const originalLog = console.log;
|
||||||
|
const originalError = console.error;
|
||||||
|
|
||||||
|
function addLog(text, type = 'info') {
|
||||||
|
const div = document.createElement('div');
|
||||||
|
div.className = `log ${type}`;
|
||||||
|
div.textContent = new Date().toLocaleTimeString() + ' - ' + text;
|
||||||
|
output.appendChild(div);
|
||||||
|
output.scrollTop = output.scrollHeight;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log = function(...args) {
|
||||||
|
originalLog.apply(console, args);
|
||||||
|
const text = args.join(' ');
|
||||||
|
let type = 'info';
|
||||||
|
if (text.includes('✅')) {
|
||||||
|
type = 'success';
|
||||||
|
} else if (text.includes('⚠️')) {
|
||||||
|
type = 'warning';
|
||||||
|
} else if (text.includes('❌')) {
|
||||||
|
type = 'error';
|
||||||
|
}
|
||||||
|
addLog(text, type);
|
||||||
|
};
|
||||||
|
|
||||||
|
console.error = function(...args) {
|
||||||
|
originalError.apply(console, args);
|
||||||
|
addLog('ERROR: ' + args.join(' '), 'error');
|
||||||
|
};
|
||||||
|
|
||||||
|
clearBtn.onclick = () => {
|
||||||
|
output.innerHTML = '';
|
||||||
|
};
|
||||||
|
|
||||||
|
async function run() {
|
||||||
|
try {
|
||||||
|
await init();
|
||||||
|
addLog('WASM module initialized successfully!', 'success');
|
||||||
|
status.textContent = 'Ready to test';
|
||||||
|
testBtn.disabled = false;
|
||||||
|
|
||||||
|
testBtn.onclick = () => {
|
||||||
|
testBtn.disabled = true;
|
||||||
|
const serverUrl = document.getElementById('server-url').value.trim();
|
||||||
|
addLog('=== Starting WASM Test ===', 'info');
|
||||||
|
addLog(`🌐 Using server: ${serverUrl}`, 'info');
|
||||||
|
try {
|
||||||
|
test_socktop_connector(serverUrl || undefined);
|
||||||
|
setTimeout(() => {
|
||||||
|
testBtn.disabled = false;
|
||||||
|
}, 2000);
|
||||||
|
} catch (e) {
|
||||||
|
addLog('Test execution failed: ' + e.message, 'error');
|
||||||
|
testBtn.disabled = false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} catch (e) {
|
||||||
|
addLog('Failed to initialize WASM: ' + e.message, 'error');
|
||||||
|
status.textContent = 'Failed to load WASM module';
|
||||||
|
console.error('WASM initialization error:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
run();
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
BIN
socktop_wasm_test/screenshot_09092025_134458.jpg
Normal file
BIN
socktop_wasm_test/screenshot_09092025_134458.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 214 KiB |
188
socktop_wasm_test/src/lib.rs
Normal file
188
socktop_wasm_test/src/lib.rs
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
use wasm_bindgen::prelude::*;
|
||||||
|
use wasm_bindgen_futures::spawn_local;
|
||||||
|
use socktop_connector::{ConnectorConfig, AgentRequest, SocktopConnector};
|
||||||
|
|
||||||
|
// Import the `console.log` function from the Web API
|
||||||
|
#[wasm_bindgen]
|
||||||
|
extern "C" {
|
||||||
|
#[wasm_bindgen(js_namespace = console)]
|
||||||
|
fn log(s: &str);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define a macro for easier console logging
|
||||||
|
macro_rules! console_log {
|
||||||
|
($($t:tt)*) => (log(&format_args!($($t)*).to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the main entry point called from JavaScript
|
||||||
|
#[wasm_bindgen]
|
||||||
|
pub fn test_socktop_connector(server_url: Option<String>) {
|
||||||
|
console_error_panic_hook::set_once();
|
||||||
|
|
||||||
|
// Use provided URL or default
|
||||||
|
let url = server_url.unwrap_or_else(|| "ws://localhost:3000/ws".to_string());
|
||||||
|
|
||||||
|
console_log!("🦀 Starting WASM connector test...");
|
||||||
|
console_log!("🌐 Connecting to: {}", url);
|
||||||
|
|
||||||
|
// Test 1: Create configuration
|
||||||
|
let config = ConnectorConfig::new(&url);
|
||||||
|
console_log!("✅ Config created: {}", config.url);
|
||||||
|
|
||||||
|
// Test 2: Test configuration methods
|
||||||
|
let config_with_protocols = config
|
||||||
|
.clone()
|
||||||
|
.with_protocols(vec!["socktop".to_string(), "v1".to_string()]);
|
||||||
|
console_log!("✅ Config with protocols: {:?}", config_with_protocols.ws_protocols);
|
||||||
|
|
||||||
|
let config_with_version = config_with_protocols.with_version("13".to_string());
|
||||||
|
console_log!("✅ Config with version: {:?}", config_with_version.ws_version);
|
||||||
|
|
||||||
|
// Test 3: Create request types
|
||||||
|
let _metrics_request = AgentRequest::Metrics;
|
||||||
|
let _disks_request = AgentRequest::Disks;
|
||||||
|
let _processes_request = AgentRequest::Processes;
|
||||||
|
console_log!("✅ AgentRequest types created");
|
||||||
|
|
||||||
|
// Test 4: Test serialization
|
||||||
|
if let Ok(json) = serde_json::to_string(&AgentRequest::Metrics) {
|
||||||
|
console_log!("✅ Serialization works: {}", json);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 5: WebSocket connection test
|
||||||
|
console_log!("🌐 Testing WebSocket connection...");
|
||||||
|
|
||||||
|
spawn_local(async move {
|
||||||
|
test_websocket_connection(config_with_version).await;
|
||||||
|
|
||||||
|
console_log!("");
|
||||||
|
console_log!("🎉 socktop_connector WASM Test Results:");
|
||||||
|
console_log!("✅ ConnectorConfig API works in WASM");
|
||||||
|
console_log!("✅ AgentRequest types work in WASM");
|
||||||
|
console_log!("✅ SocktopConnector compiles for WASM");
|
||||||
|
console_log!("✅ Connection stays alive with regular requests");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn test_websocket_connection(config: ConnectorConfig) {
|
||||||
|
console_log!("📡 Connecting to agent...");
|
||||||
|
|
||||||
|
let mut connector = SocktopConnector::new(config);
|
||||||
|
|
||||||
|
match connector.connect().await {
|
||||||
|
Ok(()) => {
|
||||||
|
console_log!("✅ Connected!");
|
||||||
|
|
||||||
|
// Test continuous monitoring (5 rounds)
|
||||||
|
for round in 1..=5 {
|
||||||
|
console_log!("🔄 Round {}/5 - Requesting metrics...", round);
|
||||||
|
|
||||||
|
// Request metrics (mimicking TUI behavior)
|
||||||
|
match connector.request(AgentRequest::Metrics).await {
|
||||||
|
Ok(response) => {
|
||||||
|
match &response {
|
||||||
|
socktop_connector::AgentResponse::Metrics(metrics) => {
|
||||||
|
console_log!("✅ Round {} - CPU: {:.1}%, Mem: {:.1}%, Host: {}",
|
||||||
|
round,
|
||||||
|
metrics.cpu_total,
|
||||||
|
(metrics.mem_used as f64 / metrics.mem_total as f64) * 100.0,
|
||||||
|
metrics.hostname
|
||||||
|
);
|
||||||
|
|
||||||
|
// Show JSON summary for each round (clean, collapsible)
|
||||||
|
if let Ok(json_str) = serde_json::to_string_pretty(&response) {
|
||||||
|
console_log!("📊 Round {} JSON ({} chars):", round, json_str.len());
|
||||||
|
console_log!("{}", json_str);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => console_log!("📊 Round {} - Received non-metrics response", round),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
console_log!("❌ Round {} failed: {}", round, e);
|
||||||
|
console_log!("🔍 Error details: {:?}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Every other round, also test disks and processes
|
||||||
|
if round % 2 == 0 {
|
||||||
|
console_log!("💾 Round {} - Requesting disk info...", round);
|
||||||
|
match connector.request(AgentRequest::Disks).await {
|
||||||
|
Ok(response) => {
|
||||||
|
match &response {
|
||||||
|
socktop_connector::AgentResponse::Disks(disks) => {
|
||||||
|
console_log!("✅ Round {} - Got {} disks", round, disks.len());
|
||||||
|
for disk in disks.iter().take(3) { // Show first 3 disks
|
||||||
|
let used_gb = (disk.total - disk.available) / 1024 / 1024 / 1024;
|
||||||
|
let total_gb = disk.total / 1024 / 1024 / 1024;
|
||||||
|
console_log!(" 💿 {}: {}/{} GB used", disk.name, used_gb, total_gb);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => console_log!("❌ Round {} - Unexpected disk response type", round),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(e) => console_log!("❌ Round {} - Disk request failed: {}", round, e),
|
||||||
|
}
|
||||||
|
|
||||||
|
console_log!("⚙️ Round {} - Requesting process info...", round);
|
||||||
|
match connector.request(AgentRequest::Processes).await {
|
||||||
|
Ok(response) => {
|
||||||
|
match &response {
|
||||||
|
socktop_connector::AgentResponse::Processes(processes) => {
|
||||||
|
console_log!("✅ Round {} - Process count: {}, Top processes: {}",
|
||||||
|
round,
|
||||||
|
processes.process_count,
|
||||||
|
processes.top_processes.len()
|
||||||
|
);
|
||||||
|
if processes.top_processes.is_empty() {
|
||||||
|
console_log!("ℹ️ No top processes in response (process_count: {})", processes.process_count);
|
||||||
|
} else {
|
||||||
|
for process in processes.top_processes.iter().take(3) { // Show top 3 processes
|
||||||
|
console_log!(" ⚙️ {}: {:.1}% CPU, {} MB",
|
||||||
|
process.name,
|
||||||
|
process.cpu_usage,
|
||||||
|
process.mem_bytes / 1024 / 1024
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => console_log!("❌ Round {} - Unexpected process response type", round),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(e) => console_log!("❌ Round {} - Process request failed: {}", round, e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait 1 second between rounds
|
||||||
|
if round < 5 {
|
||||||
|
console_log!("⏱️ Waiting 1 second...");
|
||||||
|
let promise = js_sys::Promise::new(&mut |resolve, _| {
|
||||||
|
let closure = wasm_bindgen::closure::Closure::once(move || resolve.call0(&wasm_bindgen::JsValue::UNDEFINED));
|
||||||
|
web_sys::window()
|
||||||
|
.unwrap()
|
||||||
|
.set_timeout_with_callback_and_timeout_and_arguments_0(
|
||||||
|
closure.as_ref().unchecked_ref(),
|
||||||
|
1000, // 1 second delay
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
closure.forget();
|
||||||
|
});
|
||||||
|
let _ = wasm_bindgen_futures::JsFuture::from(promise).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console_log!("");
|
||||||
|
console_log!("🎉 Test completed successfully!");
|
||||||
|
|
||||||
|
// Clean disconnect
|
||||||
|
match connector.disconnect().await {
|
||||||
|
Ok(()) => console_log!("✅ Disconnected"),
|
||||||
|
Err(e) => console_log!("⚠️ Disconnect error: {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
console_log!("❌ Connection failed: {}", e);
|
||||||
|
console_log!("💡 Make sure socktop_agent is running on localhost:3000");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
146
socktop_wasm_test/test.html
Normal file
146
socktop_wasm_test/test.html
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>socktop_connector WASM Test</title>
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
font-family: Arial, sans-serif;
|
||||||
|
max-width: 800px;
|
||||||
|
margin: 0 auto;
|
||||||
|
padding: 20px;
|
||||||
|
}
|
||||||
|
.log {
|
||||||
|
background: #f5f5f5;
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
padding: 15px;
|
||||||
|
margin: 10px 0;
|
||||||
|
font-family: monospace;
|
||||||
|
white-space: pre-wrap;
|
||||||
|
height: 500px;
|
||||||
|
overflow-y: scroll;
|
||||||
|
}
|
||||||
|
button {
|
||||||
|
background: #007cba;
|
||||||
|
color: white;
|
||||||
|
border: none;
|
||||||
|
padding: 10px 20px;
|
||||||
|
cursor: pointer;
|
||||||
|
margin: 5px;
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
|
button:hover {
|
||||||
|
background: #005a87;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>🦀 socktop_connector WASM Test</h1>
|
||||||
|
|
||||||
|
<p>This test demonstrates that <code>socktop_connector</code> works properly in WebAssembly with real WebSocket connections.</p>
|
||||||
|
|
||||||
|
<div style="margin: 20px 0; padding: 15px; background: #f9f9f9; border-radius: 4px;">
|
||||||
|
<label for="serverUrl" style="display: block; font-weight: bold; margin-bottom: 5px;">
|
||||||
|
🌐 WebSocket Server URL:
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
id="serverUrl"
|
||||||
|
value="ws://localhost:3000/ws"
|
||||||
|
style="width: 300px; padding: 8px; margin-right: 10px; border: 1px solid #ccc; border-radius: 4px;"
|
||||||
|
placeholder="ws://your-server:port/ws"
|
||||||
|
/>
|
||||||
|
<small style="color: #666;">Edit if your socktop_agent is running on a different host/port</small>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<button onclick="runTest()">Run Test</button>
|
||||||
|
<button onclick="clearLog()">Clear Log</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h2>Test Output:</h2>
|
||||||
|
<div id="log" class="log">Click "Run Test" to start...\n</div>
|
||||||
|
|
||||||
|
<script type="module">
|
||||||
|
import init, { test_socktop_connector } from './pkg/socktop_wasm_test.js';
|
||||||
|
|
||||||
|
const logElement = document.getElementById('log');
|
||||||
|
let wasmInitialized = false;
|
||||||
|
|
||||||
|
// Override console.log to capture output
|
||||||
|
const originalConsoleLog = console.log;
|
||||||
|
console.log = function(...args) {
|
||||||
|
originalConsoleLog.apply(console, arguments);
|
||||||
|
logElement.textContent += args.join(' ') + '\n';
|
||||||
|
logElement.scrollTop = logElement.scrollHeight;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Initialize WASM module on page load
|
||||||
|
async function initializeWasm() {
|
||||||
|
try {
|
||||||
|
console.log('🔄 Loading WASM module...');
|
||||||
|
await init();
|
||||||
|
wasmInitialized = true;
|
||||||
|
console.log('✅ WASM module initialized successfully!');
|
||||||
|
updateButtonState();
|
||||||
|
} catch (error) {
|
||||||
|
console.log('❌ Failed to initialize WASM module:', error);
|
||||||
|
console.log('💡 Make sure the build was successful and pkg/ directory exists');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateButtonState() {
|
||||||
|
const button = document.querySelector('button');
|
||||||
|
if (wasmInitialized) {
|
||||||
|
button.disabled = false;
|
||||||
|
button.textContent = 'Run Test';
|
||||||
|
button.style.cursor = 'pointer';
|
||||||
|
button.title = 'Click to run the WASM test';
|
||||||
|
} else {
|
||||||
|
button.disabled = true;
|
||||||
|
button.textContent = 'Loading WASM...';
|
||||||
|
button.style.cursor = 'not-allowed';
|
||||||
|
button.title = 'WASM module is loading...';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
window.runTest = async function() {
|
||||||
|
if (!wasmInitialized) {
|
||||||
|
console.log('❌ WASM module not initialized yet');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the server URL from the input field
|
||||||
|
const serverUrl = document.getElementById('serverUrl').value.trim();
|
||||||
|
if (!serverUrl) {
|
||||||
|
console.log('❌ Server URL cannot be empty');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log('');
|
||||||
|
console.log('='.repeat(60));
|
||||||
|
console.log('🚀 Starting socktop_connector WASM test...');
|
||||||
|
console.log('🌐 Server URL: ' + serverUrl);
|
||||||
|
console.log('='.repeat(60));
|
||||||
|
console.log('');
|
||||||
|
|
||||||
|
test_socktop_connector(serverUrl);
|
||||||
|
} catch (error) {
|
||||||
|
console.log('❌ Error running test:', error);
|
||||||
|
console.log('🔍 Error details:', error.stack);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
window.clearLog = function() {
|
||||||
|
logElement.textContent = 'Click "Run Test" to start...\n';
|
||||||
|
};
|
||||||
|
|
||||||
|
// Initialize on page load
|
||||||
|
initializeWasm();
|
||||||
|
|
||||||
|
// Update button state initially
|
||||||
|
updateButtonState();
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
0
test_thiserror.rs
Normal file
0
test_thiserror.rs
Normal file
19
zellij_socktop_plugin/Cargo.toml
Normal file
19
zellij_socktop_plugin/Cargo.toml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
[package]
|
||||||
|
name = "zellij_socktop_plugin"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
crate-type = ["cdylib"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
zellij-tile = "0.40.0"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
serde_json = "1.0"
|
||||||
|
socktop_connector = { version = "0.1.5", default-features = false, features = ["wasm"] }
|
||||||
|
futures = "0.3"
|
||||||
|
|
||||||
|
[dependencies.chrono]
|
||||||
|
version = "0.4"
|
||||||
|
default-features = false
|
||||||
|
features = ["clock", "std", "wasmbind"]
|
||||||
101
zellij_socktop_plugin/README.md
Normal file
101
zellij_socktop_plugin/README.md
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
# Zellij Socktop Plugin
|
||||||
|
|
||||||
|
A Zellij plugin that displays real-time system metrics from a socktop agent.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
1. **Build the plugin:**
|
||||||
|
```bash
|
||||||
|
cargo build --target wasm32-wasi --release
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Install in Zellij:**
|
||||||
|
```bash
|
||||||
|
# Copy the WASM file to Zellij plugins directory
|
||||||
|
mkdir -p ~/.config/zellij/plugins
|
||||||
|
cp target/wasm32-wasi/release/zellij_socktop_plugin.wasm ~/.config/zellij/plugins/socktop.wasm
|
||||||
|
cp plugin.yaml ~/.config/zellij/plugins/
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Use in Zellij layout:**
|
||||||
|
```yaml
|
||||||
|
# ~/.config/zellij/layouts/socktop.yaml
|
||||||
|
template:
|
||||||
|
direction: Horizontal
|
||||||
|
parts:
|
||||||
|
- direction: Vertical
|
||||||
|
borderless: true
|
||||||
|
split_size:
|
||||||
|
Fixed: 1
|
||||||
|
run:
|
||||||
|
plugin:
|
||||||
|
location: "file:~/.config/zellij/plugins/socktop.wasm"
|
||||||
|
configuration:
|
||||||
|
server_url: "ws://localhost:3000/ws"
|
||||||
|
- direction: Vertical
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Launch Zellij with the layout:**
|
||||||
|
```bash
|
||||||
|
zellij --layout socktop
|
||||||
|
```
|
||||||
|
|
||||||
|
## Plugin Features
|
||||||
|
|
||||||
|
- **Real-time Metrics**: Displays CPU and memory usage
|
||||||
|
- **Auto-refresh**: Updates every 2 seconds
|
||||||
|
- **Reconnection**: Press 'r' to reconnect to socktop agent
|
||||||
|
- **Configurable**: Set custom server URL in plugin config
|
||||||
|
- **Error Handling**: Shows connection status and errors
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
- `server_url`: WebSocket URL for socktop agent (default: `ws://localhost:3000/ws`)
|
||||||
|
|
||||||
|
## Controls
|
||||||
|
|
||||||
|
- **`r`** - Reconnect to socktop agent
|
||||||
|
- Plugin updates automatically every 2 seconds
|
||||||
|
|
||||||
|
## Development Notes
|
||||||
|
|
||||||
|
This is a scaffold implementation. To make it fully functional:
|
||||||
|
|
||||||
|
1. **Async Operations**: Zellij plugins have limitations with async operations. You may need to:
|
||||||
|
- Use a different async runtime or approach
|
||||||
|
- Handle WebSocket connections in a background thread
|
||||||
|
- Use message passing between threads
|
||||||
|
|
||||||
|
2. **Error Handling**: Add more robust error handling for:
|
||||||
|
- Network connectivity issues
|
||||||
|
- Invalid server URLs
|
||||||
|
- Agent unavailability
|
||||||
|
|
||||||
|
3. **UI Improvements**:
|
||||||
|
- Add more detailed metrics display
|
||||||
|
- Implement scrolling for large datasets
|
||||||
|
- Add color coding for status indicators
|
||||||
|
|
||||||
|
4. **Performance**:
|
||||||
|
- Implement caching to reduce agent requests
|
||||||
|
- Add configurable update intervals
|
||||||
|
- Optimize WASM binary size
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
- `zellij-tile`: Zellij plugin framework
|
||||||
|
- `socktop_connector`: WebSocket connector with WASM support
|
||||||
|
- `serde`: JSON serialization
|
||||||
|
- `chrono`: Time handling (WASM-compatible)
|
||||||
|
|
||||||
|
## Building
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Add WASM target
|
||||||
|
rustup target add wasm32-wasi
|
||||||
|
|
||||||
|
# Build for WASM
|
||||||
|
cargo build --target wasm32-wasi --release
|
||||||
|
|
||||||
|
# The plugin will be at: target/wasm32-wasi/release/zellij_socktop_plugin.wasm
|
||||||
|
```
|
||||||
11
zellij_socktop_plugin/plugin.yaml
Normal file
11
zellij_socktop_plugin/plugin.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
name: "socktop"
|
||||||
|
version: "0.1.0"
|
||||||
|
authors: ["Your Name <your@email.com>"]
|
||||||
|
plugin: true
|
||||||
|
permissions:
|
||||||
|
- ReadApplicationState
|
||||||
|
configuration:
|
||||||
|
server_url:
|
||||||
|
type: "string"
|
||||||
|
default: "ws://localhost:3000/ws"
|
||||||
|
description: "WebSocket URL for socktop agent"
|
||||||
180
zellij_socktop_plugin/src/lib.rs
Normal file
180
zellij_socktop_plugin/src/lib.rs
Normal file
@ -0,0 +1,180 @@
|
|||||||
|
use zellij_tile::prelude::*;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use socktop_connector::{ConnectorConfig, AgentRequest, SocktopConnector, AgentResponse};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
struct State {
|
||||||
|
connector: Option<SocktopConnector>,
|
||||||
|
metrics_data: Option<String>,
|
||||||
|
connection_status: String,
|
||||||
|
error_message: Option<String>,
|
||||||
|
update_counter: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
static mut STATE: State = State {
|
||||||
|
connector: None,
|
||||||
|
metrics_data: None,
|
||||||
|
connection_status: String::new(),
|
||||||
|
error_message: None,
|
||||||
|
update_counter: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
register_plugin!(State);
|
||||||
|
|
||||||
|
impl ZellijPlugin for State {
|
||||||
|
fn load(&mut self, configuration: BTreeMap<String, String>) {
|
||||||
|
// Get server URL from plugin config or use default
|
||||||
|
let server_url = configuration
|
||||||
|
.get("server_url")
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| "ws://localhost:3000/ws".to_string());
|
||||||
|
|
||||||
|
// Initialize connector configuration
|
||||||
|
let config = ConnectorConfig::new(&server_url);
|
||||||
|
let connector = SocktopConnector::new(config);
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
STATE.connector = Some(connector);
|
||||||
|
STATE.connection_status = "Connecting...".to_string();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up periodic updates
|
||||||
|
set_timeout(1.0); // Update every second
|
||||||
|
|
||||||
|
// Start initial connection
|
||||||
|
self.connect_to_socktop();
|
||||||
|
|
||||||
|
request_permission(&[
|
||||||
|
PermissionType::ReadApplicationState,
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update(&mut self, event: Event) -> bool {
|
||||||
|
match event {
|
||||||
|
Event::Timer(_) => {
|
||||||
|
unsafe {
|
||||||
|
STATE.update_counter += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request metrics every update cycle
|
||||||
|
self.fetch_metrics();
|
||||||
|
|
||||||
|
// Set next timer
|
||||||
|
set_timeout(2.0); // Update every 2 seconds
|
||||||
|
true
|
||||||
|
}
|
||||||
|
Event::Key(key) => {
|
||||||
|
match key {
|
||||||
|
Key::Char('r') => {
|
||||||
|
// Reconnect on 'r' key press
|
||||||
|
self.connect_to_socktop();
|
||||||
|
true
|
||||||
|
}
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn render(&mut self, rows: usize, cols: usize) {
|
||||||
|
unsafe {
|
||||||
|
let mut output = Vec::new();
|
||||||
|
|
||||||
|
// Header
|
||||||
|
output.push("╭─ Socktop Metrics Plugin ─╮".to_string());
|
||||||
|
output.push(format!("│ Status: {:<18} │", STATE.connection_status));
|
||||||
|
output.push("├──────────────────────────╯".to_string());
|
||||||
|
|
||||||
|
// Metrics display
|
||||||
|
if let Some(ref metrics) = STATE.metrics_data {
|
||||||
|
output.push("│ System Metrics:".to_string());
|
||||||
|
output.push(format!("│ {}", metrics));
|
||||||
|
} else if let Some(ref error) = STATE.error_message {
|
||||||
|
output.push("│ Error:".to_string());
|
||||||
|
output.push(format!("│ {}", error));
|
||||||
|
} else {
|
||||||
|
output.push("│ Waiting for data...".to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Footer
|
||||||
|
output.push("│".to_string());
|
||||||
|
output.push(format!("│ Updates: {} │ Press 'r' to reconnect", STATE.update_counter));
|
||||||
|
output.push("╰──────────────────────────╯".to_string());
|
||||||
|
|
||||||
|
// Print lines within terminal bounds
|
||||||
|
for (i, line) in output.iter().enumerate() {
|
||||||
|
if i < rows {
|
||||||
|
println!("{}", line);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl State {
|
||||||
|
fn connect_to_socktop(&mut self) {
|
||||||
|
unsafe {
|
||||||
|
if let Some(ref mut connector) = STATE.connector {
|
||||||
|
STATE.connection_status = "Connecting...".to_string();
|
||||||
|
STATE.error_message = None;
|
||||||
|
|
||||||
|
// In a real implementation, you'd use async/await here
|
||||||
|
// For this scaffold, we'll simulate the connection
|
||||||
|
// Note: Zellij plugins have limitations with async operations
|
||||||
|
STATE.connection_status = "Connected".to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn fetch_metrics(&mut self) {
|
||||||
|
unsafe {
|
||||||
|
if let Some(ref mut connector) = STATE.connector {
|
||||||
|
// Try to get real metrics from socktop agent
|
||||||
|
match self.try_get_metrics(connector) {
|
||||||
|
Ok(metrics_text) => {
|
||||||
|
STATE.metrics_data = Some(metrics_text);
|
||||||
|
STATE.connection_status = "Active".to_string();
|
||||||
|
STATE.error_message = None;
|
||||||
|
}
|
||||||
|
Err(error) => {
|
||||||
|
STATE.error_message = Some(error);
|
||||||
|
STATE.connection_status = "Error".to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
STATE.error_message = Some("No connector available".to_string());
|
||||||
|
STATE.connection_status = "Disconnected".to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn try_get_metrics(&mut self, connector: &mut SocktopConnector) -> Result<String, String> {
|
||||||
|
// Note: This is synchronous for simplicity. In a real plugin you might need
|
||||||
|
// to handle async operations differently depending on Zellij's threading model.
|
||||||
|
|
||||||
|
// For now, we'll use a blocking approach or return a placeholder
|
||||||
|
// that indicates we're trying to connect
|
||||||
|
|
||||||
|
// Attempt connection if not connected
|
||||||
|
if let Err(e) = futures::executor::block_on(connector.connect()) {
|
||||||
|
return Err(format!("Connection failed: {}", e));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request metrics
|
||||||
|
match futures::executor::block_on(connector.request(AgentRequest::Metrics)) {
|
||||||
|
Ok(AgentResponse::Metrics(metrics)) => {
|
||||||
|
Ok(format!(
|
||||||
|
"CPU: {:.1}% | Mem: {:.1}% | Host: {} | Load: {:.2}",
|
||||||
|
metrics.cpu_total,
|
||||||
|
(metrics.mem_used as f64 / metrics.mem_total as f64) * 100.0,
|
||||||
|
metrics.hostname,
|
||||||
|
metrics.load_avg_1m
|
||||||
|
))
|
||||||
|
}
|
||||||
|
Ok(_) => Err("Unexpected response type".to_string()),
|
||||||
|
Err(e) => Err(format!("Request failed: {}", e)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue
Block a user