Compare commits

...

No commits in common. "master" and "d58549b96bc4ea543ece081ddf819feb99541df5" have entirely different histories.

143 changed files with 1900 additions and 21048 deletions

View File

@ -1,39 +0,0 @@
#!/usr/bin/env bash
# This repository uses a custom hooks directory (.githooks). To enable this pre-commit hook run:
# git config core.hooksPath .githooks
# Ensure this file is executable: chmod +x .githooks/pre-commit
set -euo pipefail
echo "[pre-commit] Running cargo fmt --all" >&2
if ! command -v cargo >/dev/null 2>&1; then
# Try loading rustup environment (common install path)
if [ -f "$HOME/.cargo/env" ]; then
# shellcheck source=/dev/null
. "$HOME/.cargo/env"
fi
fi
if ! command -v cargo >/dev/null 2>&1; then
echo "[pre-commit] cargo not found in PATH; skipping fmt (install Rust or adjust PATH)." >&2
exit 0
fi
cargo fmt --all
# Stage any Rust files that were reformatted
changed=$(git diff --name-only --diff-filter=M | grep -E '\\.rs$' || true)
if [ -n "$changed" ]; then
echo "$changed" | xargs git add
echo "[pre-commit] Added formatted files" >&2
fi
# Fail if further diffs remain (shouldn't happen normally)
unfmt=$(git diff --name-only --diff-filter=M | grep -E '\\.rs$' || true)
if [ -n "$unfmt" ]; then
echo "[pre-commit] Some Rust files still differ after formatting:" >&2
echo "$unfmt" >&2
exit 1
fi
exit 0

View File

@ -1,129 +0,0 @@
name: CI
on:
push:
pull_request:
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest]
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
components: clippy, rustfmt
- name: Install system dependencies (Linux)
if: matrix.os == 'ubuntu-latest'
run: sudo apt-get update && sudo apt-get install -y libdrm-dev libdrm-amdgpu1
- name: Cargo fmt
run: cargo fmt --all -- --check
- name: Clippy
run: cargo clippy --all-targets --all-features -- -D warnings
- name: Build (release)
run: cargo build --release --workspace
- name: "Linux: start agent and run WS probe"
if: matrix.os == 'ubuntu-latest'
shell: bash
run: |
set -euo pipefail
RUST_LOG=info SOCKTOP_ENABLE_SSL=0 SOCKTOP_AGENT_GPU=0 SOCKTOP_AGENT_TEMP=0 ./target/release/socktop_agent -p 3000 > agent.log 2>&1 &
AGENT_PID=$!
for i in {1..60}; do
if curl -fsS http://127.0.0.1:3000/healthz >/dev/null; then break; fi
sleep 1
done
if ! curl -fsS http://127.0.0.1:3000/healthz >/dev/null; then
echo "--- agent.log (tail) ---"
tail -n 200 agent.log || true
(command -v ss >/dev/null && ss -ltnp || netstat -ltnp) || true
kill $AGENT_PID || true
exit 1
fi
SOCKTOP_WS=ws://127.0.0.1:3000/ws cargo test -p socktop_connector --test integration_test -- --nocapture
kill $AGENT_PID || true
- name: "Windows: start agent and run WS probe"
if: matrix.os == 'windows-latest'
shell: pwsh
run: |
$env:SOCKTOP_ENABLE_SSL = "0"
$env:SOCKTOP_AGENT_GPU = "0"
$env:SOCKTOP_AGENT_TEMP = "0"
$out = Join-Path $PWD "agent.out.txt"
$err = Join-Path $PWD "agent.err.txt"
$p = Start-Process -FilePath "${PWD}\target\release\socktop_agent.exe" -ArgumentList "-p 3000" -RedirectStandardOutput $out -RedirectStandardError $err -PassThru -NoNewWindow
$ready = $false
for ($i = 0; $i -lt 60; $i++) {
$pinfo = New-Object System.Diagnostics.ProcessStartInfo
$pinfo.FileName = "curl.exe"
$pinfo.Arguments = "-fsS http://127.0.0.1:3000/healthz"
$pinfo.RedirectStandardOutput = $true
$pinfo.RedirectStandardError = $true
$pinfo.UseShellExecute = $false
$proc = [System.Diagnostics.Process]::Start($pinfo)
$proc.WaitForExit()
if ($proc.ExitCode -eq 0) { $ready = $true; break }
Start-Sleep -Seconds 1
}
if (-not $ready) {
Write-Warning "TCP connect to (127.0.0.1 : 3000) failed"
if (Test-Path $out) { Write-Host "--- agent.out (full) ---"; Get-Content $out }
if (Test-Path $err) { Write-Host "--- agent.err (full) ---"; Get-Content $err }
Write-Host "--- netstat ---"
netstat -ano | Select-String ":3000" | ForEach-Object { $_.Line }
if ($p -and !$p.HasExited) { Stop-Process -Id $p.Id -Force -ErrorAction SilentlyContinue }
throw "agent did not become ready"
}
$env:SOCKTOP_WS = "ws://127.0.0.1:3000/ws"
try {
cargo test -p socktop_connector --test integration_test -- --nocapture
} finally {
if ($p -and !$p.HasExited) { Stop-Process -Id $p.Id -Force -ErrorAction SilentlyContinue }
}
- name: Smoke test (client --help)
run: cargo run -p socktop -- --help
- name: Package artifacts (Linux)
if: matrix.os == 'ubuntu-latest'
shell: bash
run: |
set -e
mkdir -p dist
cp target/release/socktop dist/
cp target/release/socktop_agent dist/
tar czf socktop-${{ matrix.os }}.tar.gz -C dist .
- name: Package artifacts (Windows)
if: matrix.os == 'windows-latest'
shell: pwsh
run: |
New-Item -ItemType Directory -Force -Path dist | Out-Null
Copy-Item target\release\socktop.exe dist\
Copy-Item target\release\socktop_agent.exe dist\
Compress-Archive -Path dist\* -DestinationPath socktop-${{ matrix.os }}.zip -Force
- name: Upload build artifacts (ephemeral)
uses: actions/upload-artifact@v4
with:
name: socktop-${{ matrix.os }}
path: |
*.tar.gz
*.zip
- name: Upload to rolling GitHub Release (main only)
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
uses: softprops/action-gh-release@v2
with:
tag_name: latest
name: Latest build
prerelease: true
draft: false
files: |
*.tar.gz
*.zip
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

7
.gitignore vendored
View File

@ -1,7 +0,0 @@
/target
.vscode/
/socktop-wasm-test/target
# Documentation files from development sessions (context-specific, not for public repo)
/OPTIMIZATION_PROCESS_DETAILS.md
/THREAD_SUPPORT.md

423
APT_REPO_SUMMARY.md Normal file
View File

@ -0,0 +1,423 @@
# APT Repository Setup Summary
## 🎉 What You Now Have
You now have a complete system for creating and hosting your own APT repository for socktop packages, **without needing a sponsor or official Debian/Ubuntu approval**.
## 📁 Files Created
### Scripts (in `scripts/`)
- **`init-apt-repo.sh`** - Initializes the APT repository directory structure
- **`add-package-to-repo.sh`** - Adds .deb packages to the repository and generates metadata
- **`sign-apt-repo.sh`** - Signs the repository with your GPG key
- **`setup-apt-repo.sh`** - All-in-one interactive wizard to set everything up
### Documentation
- **`QUICK_START_APT_REPO.md`** - Quick start guide (< 10 minutes)
- **`docs/APT_REPOSITORY.md`** - Comprehensive 600+ line guide covering everything
- **`APT_REPO_SUMMARY.md`** - This file
### GitHub Actions
- **`.github/workflows/publish-apt-repo.yml`** - Automated building, signing, and publishing
## 🚀 Quick Start (Choose One)
### Option 1: Interactive Setup (Recommended for First Time)
Run the setup wizard:
```bash
./scripts/setup-apt-repo.sh
```
This walks you through:
1. ✅ Checking prerequisites
2. 🔑 Setting up GPG key
3. 📦 Finding/building packages
4. 📝 Creating repository structure
5. ✍️ Signing the repository
6. 📋 Next steps to publish to gh-pages
### Option 2: Manual Step-by-Step
```bash
# 1. Initialize
./scripts/init-apt-repo.sh
# 2. Build packages
cargo deb --package socktop
cargo deb --package socktop_agent
# 3. Add packages
./scripts/add-package-to-repo.sh target/debian/socktop_*.deb
./scripts/add-package-to-repo.sh target/debian/socktop-agent_*.deb
# 4. Sign (replace YOUR-KEY-ID)
./scripts/sign-apt-repo.sh apt-repo stable YOUR-KEY-ID
# 5. Update URLs
sed -i 's/YOUR-USERNAME/your-github-username/g' apt-repo/*.{md,html}
# 6. Publish to gh-pages (see below)
```
### Option 3: Fully Automated (After Initial Setup)
Once gh-pages branch exists, just tag releases:
```bash
git tag v1.50.0
git push --tags
# GitHub Actions will:
# - Build packages for AMD64 and ARM64
# - Update APT repository
# - Sign with your GPG key
# - Push to gh-pages branch automatically
```
## 📤 Publishing to GitHub Pages (gh-pages branch)
**Why gh-pages branch?**
- ✅ Keeps main branch clean (source code only)
- ✅ Separate branch for published content
- ✅ GitHub Actions can auto-update it
- ✅ You can customize the landing page
**Initial Setup:**
```bash
# Create gh-pages branch
git checkout --orphan gh-pages
git rm -rf .
# Copy apt-repo CONTENTS to root (not the folder!)
cp -r apt-repo/* .
rm -rf apt-repo
# Commit and push
git add .
git commit -m "Initialize APT repository"
git push -u origin gh-pages
# Return to main
git checkout main
```
**Enable in GitHub:**
1. Settings → Pages
2. Source: **gh-pages****/ (root)**
3. Save
Your repo will be at: `https://your-username.github.io/socktop/`
**Note:** GitHub Pages only allows `/` (root) or `/docs`. Since we use gh-pages branch, contents go in the root of that branch.
See `SETUP_GITHUB_PAGES.md` for detailed step-by-step instructions.
### Alternative: Self-Hosted Server
Copy `apt-repo/` contents to your web server:
```bash
rsync -avz apt-repo/ user@example.com:/var/www/apt/
```
Configure Apache/Nginx to serve the directory. See `docs/APT_REPOSITORY.md` for details.
## 🤖 GitHub Actions Automation
### Required Secrets
Add these in GitHub Settings → Secrets → Actions:
1. **GPG_PRIVATE_KEY**
```bash
gpg --armor --export-secret-key YOUR-KEY-ID
# Copy entire output including BEGIN/END lines
```
2. **GPG_KEY_ID**
```bash
gpg --list-secret-keys --keyid-format LONG
# Use the ID after "rsa4096/"
```
3. **GPG_PASSPHRASE**
```bash
# Your GPG passphrase (leave empty if no passphrase)
```
### Triggers
The workflow runs on:
- **Version tags**: `git tag v1.50.0 && git push --tags`
- **Manual dispatch**: Actions tab → "Publish APT Repository" → Run workflow
### What It Does
1. ✅ Builds packages for AMD64 and ARM64
2. ✅ Initializes or updates APT repository
3. ✅ Generates Packages files and metadata
4. ✅ Signs with your GPG key
5. ✅ Commits and pushes to gh-pages branch
6. ✅ Creates GitHub Release with artifacts
7. ✅ Generates summary with installation instructions
## 👥 User Installation
Once published, users install with:
```bash
# Add repository
curl -fsSL https://your-username.github.io/socktop/KEY.gpg | \
sudo gpg --dearmor -o /usr/share/keyrings/socktop-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/socktop-archive-keyring.gpg] https://your-username.github.io/socktop stable main" | \
sudo tee /etc/apt/sources.list.d/socktop.list
# Install
sudo apt update
sudo apt install socktop socktop-agent
# The agent service is automatically installed and configured
sudo systemctl enable --now socktop-agent
```
## 🔧 Maintenance
### Release New Version (Automated)
```bash
# Update version in Cargo.toml, commit changes
git add . && git commit -m "Bump version to 1.51.0"
git tag v1.51.0
git push origin main --tags
# GitHub Actions automatically:
# - Builds packages for AMD64 and ARM64
# - Updates apt-repo
# - Signs with GPG
# - Pushes to gh-pages branch
```
### Manual Update (if needed)
```bash
# On main branch
cargo deb --package socktop
./scripts/add-package-to-repo.sh target/debian/socktop_*.deb
./scripts/sign-apt-repo.sh
# Switch to gh-pages and update
git checkout gh-pages
cp -r apt-repo/* .
git add . && git commit -m "Release v1.51.0" && git push
git checkout main
```
### Remove Old Versions
```bash
# On gh-pages branch
git checkout gh-pages
rm pool/main/socktop_1.50.0_*.deb
# Regenerate metadata (re-add remaining packages)
git add . && git commit -m "Remove old versions" && git push
git checkout main
```
## 🎯 Key Benefits
**No sponsor needed** - Host your own repository
**Full control** - You decide when to release
**Free hosting** - GitHub Pages at no cost
**Automated** - GitHub Actions does the work
**Professional** - Just like official repos
**Multi-arch** - AMD64, ARM64 support built-in
**Secure** - GPG signed packages
**Easy updates** - Users get updates via `apt upgrade`
## 📊 Repository Structure
```
apt-repo/
├── dists/
│ └── stable/
│ ├── Release # Main metadata (checksums)
│ ├── Release.gpg # Detached signature
│ ├── InRelease # Clearsigned release
│ └── main/
│ ├── binary-amd64/
│ │ ├── Packages # Package list
│ │ ├── Packages.gz # Compressed
│ │ └── Release # Component metadata
│ ├── binary-arm64/
│ └── binary-armhf/
├── pool/
│ └── main/
│ ├── socktop_1.50.0_amd64.deb
│ ├── socktop-agent_1.50.1_amd64.deb
│ ├── socktop_1.50.0_arm64.deb
│ └── socktop-agent_1.50.1_arm64.deb
├── KEY.gpg # Public GPG key
├── README.md # Repository info
├── index.html # Web interface
└── packages.html # Package listing
```
## 🔑 GPG Key Management
### Create New Key
```bash
gpg --full-generate-key
# Choose RSA 4096, no expiration (or 2 years)
```
### Export Keys
```bash
# Public key (for users)
gpg --armor --export YOUR-KEY-ID > KEY.gpg
# Private key (for GitHub Secrets)
gpg --armor --export-secret-key YOUR-KEY-ID
```
### Backup Keys
```bash
# Backup to safe location
gpg --export-secret-keys YOUR-KEY-ID > gpg-private-backup.key
gpg --export YOUR-KEY-ID > gpg-public-backup.key
```
### Key Rotation
If your key expires or is compromised:
```bash
./scripts/sign-apt-repo.sh apt-repo stable NEW-KEY-ID
gpg --armor --export NEW-KEY-ID > apt-repo/KEY.gpg
# Users need to re-import the key
```
## 🐛 Troubleshooting
### "Repository not signed"
```bash
./scripts/sign-apt-repo.sh apt-repo stable YOUR-KEY-ID
ls apt-repo/dists/stable/Release* # Should show 3 files
```
### "Package not found"
```bash
cd apt-repo
dpkg-scanpackages --arch amd64 pool/main /dev/null > dists/stable/main/binary-amd64/Packages
gzip -9 -k -f dists/stable/main/binary-amd64/Packages
cd ..
./scripts/sign-apt-repo.sh
```
### "404 Not Found" on GitHub Pages
- Wait 2-3 minutes after pushing
- Check Settings → Pages is enabled
- Verify source branch/directory
### GitHub Actions not signing
- Check all 3 secrets are set correctly
- GPG_PRIVATE_KEY must include BEGIN/END lines
- Test signing locally first
## 📚 Documentation
| File | Purpose | Length |
|------|---------|--------|
| `QUICK_START_APT_REPO.md` | Get started in < 10 minutes | Quick |
| `SETUP_GITHUB_PAGES.md` | Detailed gh-pages setup guide | Step-by-step |
| `docs/APT_REPOSITORY.md` | Complete guide with all options | Comprehensive |
| `docs/DEBIAN_PACKAGING.md` | How .deb packages are built | Technical |
| `DEBIAN_PACKAGING_SUMMARY.md` | Overview of packaging work | Summary |
| `APT_REPO_SUMMARY.md` | This file | Overview |
## 🎓 Learning Path
1. **Start here**: `QUICK_START_APT_REPO.md` (10 min)
2. **Set up**: Run `./scripts/setup-apt-repo.sh` (15 min)
3. **Publish**: Follow `SETUP_GITHUB_PAGES.md` (5 min)
4. **Automate**: Set up GitHub Actions secrets (10 min)
5. **Advanced**: Read `docs/APT_REPOSITORY.md` as needed
## 🚦 Next Steps
Choose your path:
### Just Getting Started?
1. ✅ Read `QUICK_START_APT_REPO.md`
2. ✅ Run `./scripts/setup-apt-repo.sh`
3. ✅ Follow `SETUP_GITHUB_PAGES.md` to publish
4. ✅ Test installation on a VM
### Want Automation?
1. ✅ Generate/export GPG key
2. ✅ Add GitHub Secrets
3. ✅ Tag a release: `git tag v1.50.0 && git push --tags`
4. ✅ Watch GitHub Actions magic happen
### Want to Understand Everything?
1. ✅ Read `docs/APT_REPOSITORY.md` (comprehensive)
2. ✅ Study the scripts in `scripts/`
3. ✅ Examine `.github/workflows/publish-apt-repo.yml`
4. ✅ Learn about Debian repository format
### Ready for Production?
1. ✅ Set up monitoring/analytics
2. ✅ Create PPA for Ubuntu (Launchpad)
3. ✅ Apply to Debian mentors for official inclusion
4. ✅ Set up repository mirrors
5. ✅ Document best practices for users
## 🌟 Success Criteria
You'll know you're successful when:
- [ ] Users can `apt install socktop`
- [ ] Updates work with `apt upgrade`
- [ ] Multiple architectures supported
- [ ] Repository is GPG signed
- [ ] GitHub Actions publishes automatically
- [ ] Installation instructions in README
- [ ] Zero sponsor or approval needed
## 💡 Pro Tips
1. **Test first**: Always test on a fresh VM before publishing
2. **Keep versions**: Don't delete old .deb files immediately
3. **Backup GPG key**: Store it safely offline
4. **Monitor downloads**: Use GitHub Insights or server logs
5. **Document everything**: Help users troubleshoot
6. **Version consistently**: Use semantic versioning
7. **Sign always**: Never publish unsigned repositories
## 🔗 Resources
- [Debian Repository Format](https://wiki.debian.org/DebianRepository/Format)
- [GitHub Pages Docs](https://docs.github.com/en/pages)
- [cargo-deb](https://github.com/kornelski/cargo-deb)
- [Ubuntu PPA Guide](https://help.launchpad.net/Packaging/PPA)
- [Debian Mentors](https://mentors.debian.net/)
## 🎊 Congratulations!
You now have everything you need to:
- ✅ Create your own APT repository
- ✅ Host it for free on GitHub Pages
- ✅ Automate the entire process
- ✅ Distribute packages professionally
- ✅ Provide easy installation for users
**No sponsor required. No approval needed. You're in control!** 🚀
---
**Questions?** Check the docs or open an issue.
**Ready to publish?** Run `./scripts/setup-apt-repo.sh` and follow the wizard!

3346
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,50 +0,0 @@
[workspace]
resolver = "2"
members = [
"socktop",
"socktop_agent",
"socktop_connector"
]
[workspace.dependencies]
# async + streams
tokio = { version = "1", features = ["full"] }
futures-util = "0.3"
anyhow = "1.0"
# websocket
tokio-tungstenite = { version = "0.24", features = ["__rustls-tls", "connect"] }
url = "2.5"
# JSON + error handling
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
# system stats (align across crates)
sysinfo = "0.37"
# CLI UI
ratatui = "0.28"
crossterm = "0.27"
# web server (remote-agent)
axum = { version = "0.7", features = ["ws"] }
# protobuf
prost = "0.13"
dirs-next = "2"
# compression
flate2 = "1.0"
# TLS
rustls = { version = "0.23", features = ["ring"] }
rustls-pemfile = "2.1"
[profile.release]
# Favor smaller, simpler binaries with good runtime perf
lto = "thin"
codegen-units = 1
panic = "abort"
opt-level = 3
strip = "symbols"

109
GETTING_STARTED_NOW.md Normal file
View File

@ -0,0 +1,109 @@
# 🚀 Get Your APT Repository Live in 5 Minutes
## You're Here Because...
You want to publish socktop packages via APT, but GitHub Pages won't let you select `apt-repo/` folder. Here's why and how to fix it:
**The Issue:** GitHub Pages only serves from `/` (root) or `/docs`, not custom folders like `/apt-repo`.
**The Solution:** Use a `gh-pages` branch where `apt-repo` contents go in the root.
## Quick Setup (5 Steps)
### 1. Create apt-repo locally (if you haven't)
```bash
./scripts/setup-apt-repo.sh
```
This creates `apt-repo/` with your packages and signs them.
### 2. Create gh-pages branch
```bash
git checkout --orphan gh-pages
git rm -rf .
```
### 3. Copy apt-repo to root
```bash
cp -r apt-repo/* .
rm -rf apt-repo
ls
# You should see: dists/ pool/ KEY.gpg index.html README.md
```
### 4. Push to GitHub
```bash
git add .
git commit -m "Initialize APT repository"
git push -u origin gh-pages
git checkout main
```
### 5. Enable GitHub Pages
1. Go to: **Settings → Pages**
2. Source: **gh-pages****/ (root)**
3. Click **Save**
**Done!** ✅ Your repo will be live at `https://your-username.github.io/socktop/` in 1-2 minutes.
## Test It
```bash
curl -I https://your-username.github.io/socktop/KEY.gpg
# Should return: HTTP/2 200
```
## Install It (On Any Debian/Ubuntu System)
```bash
curl -fsSL https://your-username.github.io/socktop/KEY.gpg | \
sudo gpg --dearmor -o /usr/share/keyrings/socktop-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/socktop-archive-keyring.gpg] https://your-username.github.io/socktop stable main" | \
sudo tee /etc/apt/sources.list.d/socktop.list
sudo apt update
sudo apt install socktop socktop-agent
```
## What's Next?
### Now (Optional):
- Customize `index.html` on gh-pages for a nice landing page
- Add installation instructions to your main README
### Later:
- Set up GitHub Actions automation (see `QUICK_START_APT_REPO.md`)
- Add more architectures (ARM64, ARMv7)
## Understanding the Setup
```
main branch: gh-pages branch:
├── src/ ├── dists/
├── Cargo.toml ├── pool/
├── scripts/ ├── KEY.gpg
└── apt-repo/ (local) └── index.html ← GitHub Pages serves this
Work here ↑ Published here ↑
```
- **main**: Your development work
- **gh-pages**: What users see/download
- **apt-repo/**: Local folder (ignored in git, see `.gitignore`)
## Need More Help?
- **Quick start**: `QUICK_START_APT_REPO.md`
- **Detailed setup**: `SETUP_GITHUB_PAGES.md`
- **Why gh-pages?**: `WHY_GHPAGES_BRANCH.md`
- **Full guide**: `docs/APT_REPOSITORY.md`
---
**You got this!** 🎉

42
KEY.gpg Normal file
View File

@ -0,0 +1,42 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQGNBGkih7QBDADgX6sYMx2Lp6qcZxeCCizcy4TFsxcRJfp5mfbMplVES0hQToIP
EMC11JqPwQdLliXKjUr8Z2kgM2oqvH+dkdgzUGrw6kTK8YHc+qs37iJAOVS9D72X
tTld282NrtFwzb74nS2GKPkpWI7aSKBpHtWFPX/1ONsc56qGqFd3wwikEvCz8MeJ
HwCD1JZ9F+2DyyXWsTJNgDwPloJSUbtyVuk2gd6PeTg7AQdx92Pk/mggmYbHtP8N
wy072ku1g8K/hplmwIOGpSx1JWvAQkDU/Bb/jSqrYg2wSHO7IQnYE8I3x/zglYBl
FYNh47TVQr0zPVSYR1MQkHU5YLBTDc5UgDvtcsYUiTtq4D/m8HWmKja0/UKGxvDJ
P5sUPcp4dk77RdoCtUe5HImYGS8lo5N3+t0lz8sd9rYmRiIO4f7FJaJqJeHbUJyn
iw/GCQh5D5/D571dICrEq/QhL+k5KhJljPGoVMGPFXJIc7q+CxvGp2oOo5fOlbOn
3kSrM93AJPwT8FMAEQEAAbRFSmFzb24gV2l0dHkgKHNvY2t0b3AgYXB0IHNpZ25p
bmcga2V5KSA8amFzb25wd2l0dHkrc29ja3RvcEBwcm90b24ubWU+iQHOBBMBCgA4
FiEEHnVWqAU5uDlLwoINESwaeYRl+/IFAmkih7QCGwMFCwkIBwIGFQoJCAsCBBYC
AwECHgECF4AACgkQESwaeYRl+/KV+gwAzfZVZEhO7MQV2EmNeKVK1GycFSm2oUAl
ZbwNIEHu6+tOzqXJb8o65BtGlbLSGavsMpgRCK2SL83DdLOkutG1ahQiJr+5GaXC
zbQgX+VWqGPZtQ+I6/rVoYZPMTCrqpAmFgvVpqv0xod7w8/wny8/XmhQ37KY2/0l
B38oNTvdA7C8jzSrI6kr3XqurvQRW7z+MnC+nCp9Ob9bYtY0kpd4U3NrVdb8m32U
d5LVFwD1OGvzLOSqyJ33IKjSJc4KLvW+aEsHXe+fHO9UEzH8Nbo5MmVvX3QIHiyq
jD4zN16AGsGYqCK4irtQCiD3wBOdsG/RVkgIcdlmAH3EGEp7Ux8+7v1PXYI+UrSs
XE7f1xFTJ2r5TMex6W3he073Em4qhQsrnMF5syTZsM6N+5UqXVOM1RuDVVXr7929
hC3G8pK/A2W5Lwpxl2yzock2CxhvUn7M/xm4VbcPlWTCUd/QzU8VtsgaGHcuhi5e
xHY1AU07STLB9RinjBVf2bmk4oDQcmB6uQGNBGkih7QBDACrjE+xSWP92n931/5t
+tXcujwFlIpSZdbSQFr0B0YyjPRUP4FSzEGu8vuM5ChUfWKhmN1dDr5C4qFo9NgQ
6oCN2HubajSGyXNwnOMlMb5ck79Ubmy9yDV9/ZLqpJJiozGap2/EnNoDhaANlmUg
rfqUHpIB8XC2IZ0Itt05tp/u78dJiB+R6ReZn/bVUafNV4jIqYZfLRzI3FTJ4xvK
FGs/ER+JajAdJQ8LPfazmDQSGw0huguxhopZwKQ/qWZMn1OHq/ZaPvCqbQt3irLw
dLPDC4pEaYGRyADYeyuarG0DVyUQ9XRc/NufKDvOAn33LpBPBpcvNQAsVhWTCYl7
ogQ+suVYVN8Tu7v4bUSHKwzXKvLN/ojJX/Fh7eTW4TPsgLHNHAEDUkSQozIe9vO6
o+vydDqRxuXJgdkR7lqP6PQDYrhRYZGJf57eKf6VtTKYFaMbiMWPU+vcHeB0/iDe
Pv81qro2LD2PG5WCzDpNETBceCTjykb9r0VHx4/JsiojKmsAEQEAAYkBtgQYAQoA
IBYhBB51VqgFObg5S8KCDREsGnmEZfvyBQJpIoe0AhsMAAoJEBEsGnmEZfvyNp8M
AIH+6+hGB3qADdnhNgb+3fN0511eK9Uk82lxgGARLcD8GN1UP0HlvEqkxCHy3PUe
tHcsuYVz7i8pmpEGdFx9zv7MelenUsJniUQ++OZKx6iUG/MYqz//NxY+5lyRmcu2
aYvUxhkgf9zgxXTkTyV2VV32mX//cHcwc+c/089QAPzCMaSrHdNK+ED9+k8uquJ1
lSL9Bm15z/EV42v9Q/4KTM5OBLHpNw0Rvn9C0iuZVwHXBrrA/HSGXpA54AqNUMpZ
kRPgLQcy5yVE2y1aXLXt2XdTn6YPzrAjNoazYYuCWHYIZU7dGkIswpsDirDLKHdD
onb3VShmSpemYjsuFiqhfi6qwCkeHsz/CpQAp70SZ+z9oB8H80PJVKPbPIP3zEf3
i7bcsqHA7stF+8sJclXgxBUBeDJ3O2jN/scBOcvNA6xoRp7+oJbnjDRuxBmh+fVg
TIuw2++vTF2Ml0EMv7ePTpr7b1DofuJRNYGkuAIMVXHjLTqMiTJUce3OUy003zMg
Dg==
=AaPQ
-----END PGP PUBLIC KEY BLOCK-----

21
LICENSE
View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2025 Witty One Off
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

221
QUICK_START_APT_REPO.md Normal file
View File

@ -0,0 +1,221 @@
# Quick Start: Setting Up Your socktop APT Repository
This guide will get your APT repository up and running in **under 10 minutes**.
## Prerequisites
- [ ] Debian packages built (or use GitHub Actions to build them)
- [ ] GPG key for signing
- [ ] GitHub repository with Pages enabled
## Step 1: Create GPG Key (if needed)
```bash
# Generate a new key
gpg --full-generate-key
# Select:
# - RSA and RSA (default)
# - 4096 bits
# - Key does not expire (or 2 years)
# - Your name and email
# Get your key ID
gpg --list-secret-keys --keyid-format LONG
# Look for the part after "rsa4096/" - that's your KEY-ID
```
## Step 2: Initialize Repository Locally
```bash
cd socktop
# Create the repository structure
./scripts/init-apt-repo.sh
# Build packages (or download from GitHub Actions)
cargo install cargo-deb
cargo deb --package socktop
cargo deb --package socktop_agent
# Add packages to repository
./scripts/add-package-to-repo.sh target/debian/socktop_*.deb
./scripts/add-package-to-repo.sh target/debian/socktop-agent_*.deb
# Sign the repository (replace YOUR-KEY-ID with actual key ID)
./scripts/sign-apt-repo.sh apt-repo stable YOUR-KEY-ID
# Update URLs with your GitHub username
sed -i 's/YOUR-USERNAME/your-github-username/g' apt-repo/README.md apt-repo/index.html
```
## Step 3: Publish to GitHub Pages (gh-pages branch)
```bash
# Create gh-pages branch
git checkout --orphan gh-pages
git rm -rf .
# Copy apt-repo CONTENTS to root (not the folder itself)
cp -r apt-repo/* .
rm -rf apt-repo
# Commit and push
git add .
git commit -m "Initialize APT repository"
git push -u origin gh-pages
# Go back to main branch
git checkout main
```
Then in GitHub:
1. Go to **Settings → Pages**
2. Source: **Deploy from a branch**
3. Branch: **gh-pages****/ (root)** → **Save**
Wait 1-2 minutes, then visit: `https://your-username.github.io/socktop/`
## Step 4: Automate with GitHub Actions
Add these secrets to your repository (Settings → Secrets → Actions):
```bash
# Export your private key
gpg --armor --export-secret-key YOUR-KEY-ID
# Copy the ENTIRE output and save as secret: GPG_PRIVATE_KEY
```
Add these three secrets:
- **GPG_PRIVATE_KEY**: Your exported private key
- **GPG_KEY_ID**: Your key ID (e.g., `ABC123DEF456`)
- **GPG_PASSPHRASE**: Your key passphrase (leave empty if no passphrase)
The workflow in `.github/workflows/publish-apt-repo.yml` will now:
- Build packages for AMD64 and ARM64
- Update the APT repository
- Sign with your GPG key
- Push to gh-pages automatically
Trigger it by:
- Creating a version tag: `git tag v1.50.0 && git push --tags`
- Manual dispatch from GitHub Actions tab
## Step 5: Test It
On any Debian/Ubuntu system:
```bash
# Add your repository
curl -fsSL https://your-username.github.io/socktop/KEY.gpg | \
sudo gpg --dearmor -o /usr/share/keyrings/socktop-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/socktop-archive-keyring.gpg] https://your-username.github.io/socktop stable main" | \
sudo tee /etc/apt/sources.list.d/socktop.list
# Install
sudo apt update
sudo apt install socktop socktop-agent
# Verify
socktop --version
socktop_agent --version
```
## Maintenance
### Add a New Version
```bash
# Build new packages
cargo deb --package socktop
cargo deb --package socktop_agent
# Add to repository
./scripts/add-package-to-repo.sh target/debian/socktop_*.deb
./scripts/add-package-to-repo.sh target/debian/socktop-agent_*.deb
# Re-sign
./scripts/sign-apt-repo.sh apt-repo stable YOUR-KEY-ID
# Publish
cd docs/apt # or wherever your apt-repo is
git add .
git commit -m "Release v1.51.0"
git push origin main
```
### Or Just Tag and Let GitHub Actions Do It
```bash
# Update version in Cargo.toml
# Commit changes
git add .
git commit -m "Bump version to 1.51.0"
# Tag and push
git tag v1.51.0
git push origin main --tags
# GitHub Actions will:
# - Build packages for AMD64 and ARM64
# - Update gh-pages branch automatically
# - Sign and publish!
```
## Troubleshooting
### "Repository not signed" error
Make sure you signed it:
```bash
./scripts/sign-apt-repo.sh apt-repo stable YOUR-KEY-ID
ls apt-repo/dists/stable/Release*
# Should show: Release, Release.gpg, InRelease, KEY.gpg
```
### "404 Not Found" on GitHub Pages
1. Check Settings → Pages is enabled
2. Wait 2-3 minutes for GitHub to deploy
3. Verify the URL structure matches your settings
### GitHub Actions not signing
Check that all three secrets are set correctly:
- Settings → Secrets and variables → Actions
- Make sure GPG_PRIVATE_KEY includes the BEGIN/END lines
- Test locally first
## What's Next?
✅ You now have a working APT repository!
**Share it:**
- Add installation instructions to your main README
- Tweet/blog about it
- Submit to awesome-rust lists
**Improve it:**
- Customize your GitHub Pages site (it's just HTML!)
- Add more architectures (ARMv7)
- Create multiple distributions (stable, testing)
- Set up download statistics
- Apply to Ubuntu PPA (Launchpad)
- Eventually submit to official Debian repos
## Full Documentation
For detailed information, see:
- `docs/APT_REPOSITORY.md` - Complete APT repository guide
- `docs/DEBIAN_PACKAGING.md` - Debian packaging details
- `DEBIAN_PACKAGING_SUMMARY.md` - Quick summary
## Questions?
Open an issue on GitHub or check the full documentation.
---
**Happy packaging! 📦**

564
README.md
View File

@ -1,556 +1,38 @@
# socktop # socktop APT Repository
socktop is a remote system monitor with a rich TUI, inspired by top/btop, talking to a lightweight agent over WebSockets. This repository contains Debian packages for socktop and socktop-agent.
- Linux agent: near-zero CPU when idle (request-driven, no always-on sampler) ## Adding this repository
- TUI: smooth graphs, sortable process table, scrollbars, readable colors
<img src="./docs/socktop_demo.apng" width="100%"> Add the repository to your system:
---
## Features
- Remote monitoring via WebSocket (JSON over WS)
- Optional WSS (TLS): agent autogenerates a selfsigned cert on first run; client pins the cert via --tls-ca/-t
- TUI built with ratatui
- CPU
- Overall sparkline + per-core mini bars
- Accurate per-process CPU% (Linux /proc deltas), normalized to 0100%
- Memory/Swap gauges with human units
- Disks: per-device usage
- Network: per-interface throughput with sparklines and peak markers
- Temperatures: CPU (optional)
- Top processes (top 50)
- PID, name, CPU%, memory, and memory%
- Click-to-sort by CPU% or Mem (descending)
- Scrollbar and mouse/keyboard scrolling
- Total process count shown in the header
- Only top-level processes listed (threads hidden) — matches btop/top
- Optional GPU metrics (can be disabled)
- Optional auth token for the agent
---
## Prerequisites: Install Rust (rustup)
Rust is fast, safe, and crossplatform. Installing it will make your machine better. Consider yourself privileged.
Linux/macOS:
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
# load cargo for this shell
source "$HOME/.cargo/env"
# ensure stable is up to date
rustup update stable
rustc --version
cargo --version
# after install you may need to reload your shell, e.g.:
exec bash # or: exec zsh / exec fish
```
Windows (for the brave): install from https://rustup.rs with the MSVC toolchain. Yes, youll need Visual Studio Build Tools. You chose Windows — enjoy the ride.
### Raspberry Pi / Ubuntu / PopOS (required)
Install GPU support with apt command below
```bash ```bash
sudo apt-get update # Add the GPG key
sudo apt-get install libdrm-dev libdrm-amdgpu1 curl -fsSL https://jasonwitty.github.io/socktop/KEY.gpg | sudo gpg --dearmor -o /usr/share/keyrings/socktop-archive-keyring.gpg
# Add the repository
echo "deb [signed-by=/usr/share/keyrings/socktop-archive-keyring.gpg] https://jasonwitty.github.io/socktop stable main" | sudo tee /etc/apt/sources.list.d/socktop.list
# Update and install
sudo apt update
sudo apt install socktop socktop-agent
``` ```
_Additional note for Raspberry Pi users. Please update your system to use the newest kernel available through app, kernel version 6.6+ will use considerably less overall CPU to run the agent. For example on a rpi4 the kernel < 6.6 the agent will consume .8 cpu but on the same hardware on > 6.6 the agent will consume only .2 cpu. (these numbers indicate continuous polling at web socket endpoints, when not in use the usage is 0)_ ## Manual Installation
--- You can also download and install packages manually from the `pool/main/` directory.
## Architecture
Two components:
1) Agent (remote): small Rust WS server using sysinfo + /proc. It collects metrics only when the client requests them over the WebSocket (request-driven). No background sampling loop.
2) Client (local): TUI that connects to ws://HOST:PORT/ws (or wss://HOST:PORT/ws when TLS is enabled) and renders updates.
---
## Quick start
- Build both binaries:
```bash ```bash
git clone https://github.com/jasonwitty/socktop.git wget https://jasonwitty.github.io/socktop/pool/main/socktop_VERSION_ARCH.deb
cd socktop sudo dpkg -i socktop_VERSION_ARCH.deb
cargo build --release
``` ```
- Start the agent on the target machine (default port 3000): ## Supported Architectures
```bash - amd64 (x86_64)
./target/release/socktop_agent --port 3000 - arm64 (aarch64)
``` - armhf (32-bit ARM)
- Connect with the TUI from your local machine: ## Building from Source
```bash See the main repository at https://github.com/jasonwitty/socktop
./target/release/socktop ws://REMOTE_HOST:3000/ws
```
### Cross-compiling for Raspberry Pi
For Raspberry Pi and other ARM devices, you can cross-compile the agent from a more powerful machine:
- [Cross-compilation guide](./docs/cross-compiling.md) - Instructions for cross-compiling from Linux, macOS, or Windows hosts
### Quick demo (no agent setup)
Spin up a temporary local agent on port 3231 and connect automatically:
```bash
socktop --demo
```
Or just run `socktop` with no arguments and pick the builtin `demo` entry from the interactive profile list (if you have saved profiles, `demo` is appended). The demo agent:
- Runs locally (`ws://127.0.0.1:3231/ws`)
- Stops automatically (you'll see "Stopped demo agent on port 3231") when you quit the TUI or press Ctrl-C
---
## Install (from crates.io)
You dont need to clone this repo to use socktop. Install the published binaries with cargo:
```bash
# TUI (client)
cargo install socktop
# Agent (server)
cargo install socktop_agent
```
This drops socktop and socktop_agent into ~/.cargo/bin (add it to PATH).
Notes:
- After installing Rust via rustup, reload your shell (e.g., exec bash) so cargo is on PATH.
- Windows: you can also grab prebuilt EXEs from GitHub Actions artifacts if rustup scares you. It shouldnt. Be brave.
System-wide agent (Linux)
```bash
# If you installed with cargo, binaries are in ~/.cargo/bin
sudo install -o root -g root -m 0755 "$HOME/.cargo/bin/socktop_agent" /usr/local/bin/socktop_agent
# Install and enable the systemd service (example unit in docs/)
sudo install -o root -g root -m 0644 docs/socktop-agent.service /etc/systemd/system/socktop-agent.service
sudo systemctl daemon-reload
sudo systemctl enable --now socktop-agent
```
```bash
# Enable SSL
# Stop service
sudo systemctl stop socktop-agent
# Edit service to append SSL option and port
sudo micro /etc/systemd/system/socktop-agent.service
--
ExecStart=/usr/local/bin/socktop_agent --enableSSL --port 8443
--
# Reload
sudo systemctl daemon-reload
# Restart
sudo systemctl start socktop-agent
# check logs for certificate location
sudo journalctl -u socktop-agent -f
--
Aug 22 22:25:26 rpi-master socktop_agent[2913998]: socktop_agent: generated self-signed TLS certificate at /var/lib/socktop/.config/socktop_agent/tls/cert.pem
--
```
---
## Usage
Agent (server):
```bash
socktop_agent --port 3000
# or env: SOCKTOP_PORT=3000 socktop_agent
# optional auth: SOCKTOP_TOKEN=changeme socktop_agent
# enable TLS (selfsigned cert, default port 8443; you can also use -p):
socktop_agent --enableSSL --port 8443
```
Client (TUI):
```bash
socktop ws://HOST:3000/ws
# with token:
socktop "ws://HOST:3000/ws?token=changeme"
# TLS with pinned server certificate (recommended over the internet):
socktop --tls-ca /path/to/cert.pem wss://HOST:8443/ws
# (By default hostname/SAN verification is skipped for ease on home networks. To enforce it add --verify-hostname)
socktop --verify-hostname --tls-ca /path/to/cert.pem wss://HOST:8443/ws
# shorthand:
socktop -t /path/to/cert.pem wss://HOST:8443/ws
# Note: providing --tls-ca/-t automatically upgrades ws:// to wss:// if you forget
```
Intervals (client-driven):
- Fast metrics: ~500 ms
- Processes: ~2 s
- Disks: ~5 s
The agent stays idle unless queried. When queried, it collects just whats needed.
---
## Connection Profiles (Named)
You can save frequently used connection settings (URL + optional TLS CA path) under a short name and reuse them later.
Config file location:
- Linux (XDG): `$XDG_CONFIG_HOME/socktop/profiles.json`
- Fallback (when XDG not set): `~/.config/socktop/profiles.json`
### Creating a profile
First time you specify a new `--profile/-P` name together with a URL (and optional `--tls-ca`), it is saved automatically:
```bash
socktop --profile prod ws://prod-host:3000/ws
# With TLS pinning:
socktop --profile prod-tls --tls-ca /path/to/cert.pem wss://prod-host:8443/ws
You can also set custom intervals (milliseconds):
```bash
socktop --profile prod --metrics-interval-ms 750 --processes-interval-ms 3000 ws://prod-host:3000/ws
```
```
If a profile already exists you will be prompted before overwriting:
```
$ socktop --profile prod ws://new-host:3000/ws
Overwrite existing profile 'prod'? [y/N]: y
```
To overwrite without an interactive prompt pass `--save`:
```bash
socktop --profile prod --save ws://new-host:3000/ws
```
### Using a saved profile
Just pass the profile name (no URL needed):
```bash
socktop --profile prod
socktop -P prod-tls # short flag
```
The stored URL (and TLS CA path, if any) plus any saved intervals will be used. TLS auto-upgrade still applies if a CA path is stored alongside a ws:// URL.
### Interactive selection (no args)
If you run `socktop` with no arguments and at least one profile exists, you will be shown a numbered list to pick from:
```
$ socktop
Select profile:
1. prod
2. prod-tls
Enter number (or blank to abort): 2
```
Choosing a number starts the TUI with that profile. A builtin `demo` option is always appended; selecting it launches a local agent on port 3231 (no TLS) and connects to `ws://127.0.0.1:3231/ws`. Pressing Enter on blank aborts without connecting.
### JSON format
An example `profiles.json` (prettyprinted):
```json
{
"profiles": {
"prod": { "url": "ws://prod-host:3000/ws" },
"prod-tls": {
"url": "wss://prod-host:8443/ws",
"tls_ca": "/home/user/certs/prod-cert.pem",
"metrics_interval_ms": 500,
"processes_interval_ms": 2000
}
},
"version": 0
}
```
Notes:
- The `tls_ca` path is stored as given; if you move or rotate the certificate update the profile by re-running with `--profile NAME --save`.
- Deleting a profile: edit the JSON file and remove the entry (TUI does not yet have an in-app delete command).
- Profiles are client-side convenience only; they do not affect the agent.
- Intervals: `metrics_interval_ms` controls the fast metrics poll (default 500 ms). `processes_interval_ms` controls process list polling (default 2000 ms). Values below 100 ms (metrics) or 200 ms (processes) are clamped.
---
## Updating
Update the agent (systemd):
```bash
# on the server running the agent
cargo install socktop_agent --force
sudo systemctl stop socktop-agent
sudo install -o root -g root -m 0755 "$HOME/.cargo/bin/socktop_agent" /usr/local/bin/socktop_agent
# if you changed the unit file:
# sudo install -o root -g root -m 0644 docs/socktop-agent.service /etc/systemd/system/socktop-agent.service
# sudo systemctl daemon-reload
sudo systemctl start socktop-agent
sudo systemctl status socktop-agent --no-pager
# logs:
# journalctl -u socktop-agent -f
```
Update the TUI (client):
```bash
cargo install socktop --force
socktop ws://HOST:3000/ws
```
Tip: If only the binary changed, restart is enough. If the unit file changed, run sudo systemctl daemon-reload.
---
## Configuration (agent)
- Port:
- Flag: --port 8080 or -p 8080
- Positional: socktop_agent 8080
- Env: SOCKTOP_PORT=8080
- TLS (selfsigned):
- Enable: --enableSSL
- Default TLS port: 8443 (override with --port/-p)
- Certificate/Key location (created on first TLS run):
- Linux (XDG): $XDG_CONFIG_HOME/socktop_agent/tls/{cert.pem,key.pem} (defaults to ~/.config)
- The agent prints these paths on creation.
- You can set XDG_CONFIG_HOME before first run to control where certs are written.
- Additional SANs: set `SOCKTOP_AGENT_EXTRA_SANS` (commaseparated) before first TLS start to include extra IPs/DNS names in the cert. Example:
```bash
SOCKTOP_AGENT_EXTRA_SANS="192.168.1.101,myhost.internal" socktop_agent --enableSSL
```
This prevents client errors like `NotValidForName` when connecting via an IP not present in the default cert SAN list.
- Expiry / rotation: the generated cert is valid for ~397 days from creation. If the agent fails to start with an "ExpiredCertificate" error (or your client reports expiry), simply delete the existing cert and key:
```bash
rm ~/.config/socktop_agent/tls/cert.pem ~/.config/socktop_agent/tls/key.pem
# (adjust path if XDG_CONFIG_HOME is set or different user)
systemctl restart socktop-agent # if running under systemd
```
On next TLS start the agent will generate a fresh pair. Only distribute the new cert.pem to clients (never the key).
- Auth token (optional): SOCKTOP_TOKEN=changeme
- Disable GPU metrics: SOCKTOP_AGENT_GPU=0
- Disable CPU temperature: SOCKTOP_AGENT_TEMP=0
---
## Keyboard & Mouse
- Quit: q or Esc
- Processes pane:
- Click “CPU %” to sort by CPU descending
- Click “Mem” to sort by memory descending
- Mouse wheel: scroll
- Drag scrollbar: scroll
- Arrow/PageUp/PageDown/Home/End: scroll
---
## Example agent JSON
```json
{
"cpu_total": 12.4,
"cpu_per_core": [11.2, 15.7],
"mem_total": 33554432,
"mem_used": 18321408,
"swap_total": 0,
"swap_used": 0,
"process_count": 127,
"hostname": "myserver",
"cpu_temp_c": 42.5,
"disks": [{"name":"nvme0n1p2","total":512000000000,"available":320000000000}],
"networks": [{"name":"eth0","received":12345678,"transmitted":87654321}],
"top_processes": [
{"pid":1234,"name":"nginx","cpu_usage":1.2,"mem_bytes":12345678}
],
"gpus": null
}
```
Notes:
- process_count is merged into the main metrics on the client when processes are polled.
- top_processes are the current top 50 (sorting in the TUI is client-side).
---
## Security
Set a token on the agent and pass it as a query param from the client:
Server:
```bash
SOCKTOP_TOKEN=changeme socktop_agent --port 3000
```
Client:
```bash
socktop "ws://HOST:3000/ws?token=changeme"
```
### TLS / WSS
For encrypted connections, enable TLS on the agent and pin the server certificate on the client.
Server (generates selfsigned cert and key on first run):
```bash
socktop_agent --enableSSL --port 8443
```
Client (trust/pin the server cert; copy cert.pem from the agent):
```bash
socktop --tls-ca /path/to/agent/cert.pem wss://HOST:8443/ws
```
Notes:
- Do not copy the private key off the server; only the cert.pem is needed by clients.
- When --tls-ca/-t is supplied, the client autoupgrades ws:// to wss:// to avoid protocol mismatch.
- Hostname (SAN) verification is DISABLED by default (the cert is still pinned). Use `--verify-hostname` to enable strict SAN checking.
- You can run multiple clients with different cert paths by passing --tls-ca per invocation.
---
## Using tmux to monitor multiple hosts
You can use tmux to show multiple socktop instances in a single terminal.
![socktop screenshot](./docs/tmux_4_rpis_v3.jpg)
monitoring 4 Raspberry Pis using Tmux
Prerequisites:
- Install tmux (Ubuntu/Debian: `sudo apt-get install tmux`)
Key bindings (defaults):
- Split left/right: Ctrl-b %
- Split top/bottom: Ctrl-b "
- Move between panes: Ctrl-b + Arrow keys
- Show pane numbers: Ctrl-b q
- Close a pane: Ctrl-b x
- Detach from session: Ctrl-b d
Two panes (left/right)
- This creates a session named "socktop", splits it horizontally, and starts two socktops.
```bash
tmux new-session -d -s socktop 'socktop ws://HOST1:3000/ws' \; \
split-window -h 'socktop ws://HOST2:3000/ws' \; \
select-layout even-horizontal \; \
attach
```
Four panes (top-left, top-right, bottom-left, bottom-right)
- This creates a 2x2 grid with one socktop per pane.
```bash
tmux new-session -d -s socktop 'socktop ws://HOST1:3000/ws' \; \
split-window -h 'socktop ws://HOST2:3000/ws' \; \
select-pane -t 0 \; split-window -v 'socktop ws://HOST3:3000/ws' \; \
select-pane -t 1 \; split-window -v 'socktop ws://HOST4:3000/ws' \; \
select-layout tiled \; \
attach
```
Tips:
- Replace HOST1..HOST4 (and ports) with your targets.
- Reattach later: `tmux attach -t socktop`
---
## Platform notes
- Linux: fully supported (agent and client).
- Raspberry Pi:
- 64-bit: aarch64-unknown-linux-gnu
- 32-bit: armv7-unknown-linux-gnueabihf
- Windows:
- TUI + agent can build with stable Rust; bring your own MSVC. Youre on Windows; you know the drill.
- CPU temperature may be unavailable.
- binary exe for both available in build artifacts under actions.
- macOS:
- TUI works; agent is primarily targeted at Linux. Agent will run just fine on macos for debugging but I have not documented how to run as a service, I may not given the "security" feautures with applications on macos. We will see.
---
## Development
```bash
cargo fmt
cargo clippy --all-targets --all-features
cargo run -p socktop -- ws://127.0.0.1:3000/ws
# TLS (dev): first run will create certs under ~/.config/socktop_agent/tls/
cargo run -p socktop_agent -- --enableSSL --port 8443
```
### Auto-format on commit
A sample pre-commit hook that runs `cargo fmt --all` is provided in `.githooks/pre-commit`.
Enable it (one-time):
```bash
git config core.hooksPath .githooks
chmod +x .githooks/pre-commit
```
Every commit will then format Rust sources and restage them automatically.
---
## Roadmap
- [x] Agent authentication (token)
- [x] Hide per-thread entries; only show processes
- [x] Sort top processes in the TUI
- [x] Configurable refresh intervals (client)
- [ ] Export metrics to file
- [x] TLS / WSS support (selfsigned server cert + client pinning)
- [x] Split processes/disks to separate WS calls with independent cadences (already logical on client; formalize API)
- [ ] Outage notifications and reconnect.
- [ ] Per process detailed statistics pane
- [ ] cleanup of Disks section, properly display physical disks / partitions, remove duplicate entries
---
## License
MIT — see LICENSE.
---
## Acknowledgements
- ratatui for the TUI
- sysinfo for system metrics
- tokio-tungstenite for WebSockets

351
SETUP_GITHUB_PAGES.md Normal file
View File

@ -0,0 +1,351 @@
# Setting Up GitHub Pages for socktop APT Repository
This guide walks you through the initial setup of your APT repository on GitHub Pages using the `gh-pages` branch.
## Prerequisites
- [ ] You've run `./scripts/setup-apt-repo.sh` or manually created `apt-repo/`
- [ ] `apt-repo/` contains signed packages and metadata
- [ ] You have a GitHub repository for socktop
## Step-by-Step Setup
### 1. Verify Your Local Repository
First, make sure everything is ready:
```bash
# Check that apt-repo exists and has content
ls -la apt-repo/
# You should see:
# - dists/stable/Release, Release.gpg, InRelease
# - pool/main/*.deb
# - KEY.gpg
# - index.html, README.md
```
### 2. Create and Switch to gh-pages Branch
```bash
# Create a new orphan branch (no history from main)
git checkout --orphan gh-pages
# Remove all files from staging
git rm -rf .
```
**Important:** This creates a completely separate branch. Don't worry - your main branch is safe!
### 3. Copy APT Repository to Root
```bash
# Copy CONTENTS of apt-repo to root of gh-pages
cp -r apt-repo/* .
# Remove the apt-repo directory itself
rm -rf apt-repo
# Verify the structure
ls -la
# You should see in the current directory:
# - dists/
# - pool/
# - KEY.gpg
# - index.html
# - README.md
```
**Why root?** GitHub Pages can only serve from:
- `/` (root) - what we're doing
- `/docs` directory
- NOT from custom directories like `/apt-repo`
### 4. Commit and Push
```bash
# Add all files
git add .
# Commit
git commit -m "Initialize APT repository for GitHub Pages"
# Push to gh-pages branch
git push -u origin gh-pages
```
### 5. Return to Main Branch
```bash
# Switch back to your main development branch
git checkout main
# Verify you're back on main
git branch
# Should show: * main
```
### 6. Enable GitHub Pages
1. Go to your repository on GitHub
2. Click **Settings** (top right)
3. Click **Pages** (left sidebar)
4. Under "Build and deployment":
- Source: **Deploy from a branch**
- Branch: **gh-pages**
- Folder: **/ (root)**
- Click **Save**
### 7. Wait for Deployment
GitHub will deploy your site. This usually takes 1-2 minutes.
You can watch the progress:
- Go to **Actions** tab
- Look for "pages build and deployment" workflow
### 8. Verify Your Repository is Live
Once deployed, your repository will be at:
```
https://YOUR-USERNAME.github.io/socktop/
```
Test it:
```bash
# Check the public key is accessible
curl -I https://YOUR-USERNAME.github.io/socktop/KEY.gpg
# Should return: HTTP/2 200
# Check the Release file
curl -I https://YOUR-USERNAME.github.io/socktop/dists/stable/Release
# Should return: HTTP/2 200
```
### 9. Test Installation (Optional but Recommended)
On a Debian/Ubuntu VM or system:
```bash
# Add GPG key
curl -fsSL https://YOUR-USERNAME.github.io/socktop/KEY.gpg | \
sudo gpg --dearmor -o /usr/share/keyrings/socktop-archive-keyring.gpg
# Add repository
echo "deb [signed-by=/usr/share/keyrings/socktop-archive-keyring.gpg] https://YOUR-USERNAME.github.io/socktop stable main" | \
sudo tee /etc/apt/sources.list.d/socktop.list
# Update package lists
sudo apt update
# You should see:
# Get:1 https://YOUR-USERNAME.github.io/socktop stable InRelease [xxx B]
# Install packages
sudo apt install socktop socktop-agent
# Verify
socktop --version
```
## Understanding the Two Branches
After setup, you'll have two branches:
### `main` branch (development)
```
main/
├── src/
├── Cargo.toml
├── scripts/
├── docs/
├── apt-repo/ ← Local build artifact (not published)
└── ...
```
**Purpose:** Source code, development, building packages
### `gh-pages` branch (published)
```
gh-pages/
├── dists/
├── pool/
├── KEY.gpg
├── index.html ← Customize this for a nice landing page!
└── README.md
```
**Purpose:** Published APT repository served by GitHub Pages
## Workflow Going Forward
### Manual Updates
When you release a new version:
```bash
# 1. On main branch, build new packages
git checkout main
cargo deb --package socktop
cargo deb --package socktop_agent
# 2. Update local apt-repo
./scripts/add-package-to-repo.sh target/debian/socktop_*.deb
./scripts/add-package-to-repo.sh target/debian/socktop-agent_*.deb
./scripts/sign-apt-repo.sh apt-repo stable YOUR-GPG-KEY-ID
# 3. Switch to gh-pages and update
git checkout gh-pages
cp -r apt-repo/* .
git add .
git commit -m "Release v1.51.0"
git push origin gh-pages
# 4. Return to main
git checkout main
```
### Automated Updates (Recommended)
Set up GitHub Actions to do this automatically:
1. Add GitHub Secrets (Settings → Secrets → Actions):
- `GPG_PRIVATE_KEY` - Your exported private key
- `GPG_KEY_ID` - Your GPG key ID
- `GPG_PASSPHRASE` - Your GPG passphrase (if any)
2. Tag and push:
```bash
git tag v1.51.0
git push origin main --tags
```
3. GitHub Actions will automatically:
- Build packages for AMD64 and ARM64
- Update apt-repo
- Sign with your GPG key
- Push to gh-pages
- Create GitHub Release
See `.github/workflows/publish-apt-repo.yml` for details.
## Customizing Your GitHub Pages Site
The `gh-pages` branch contains `index.html` which users see when they visit:
`https://YOUR-USERNAME.github.io/socktop/`
You can customize this! On the `gh-pages` branch:
```bash
git checkout gh-pages
# Edit index.html
nano index.html
# Add features, badges, screenshots, etc.
git add index.html
git commit -m "Improve landing page"
git push origin gh-pages
git checkout main
```
## Troubleshooting
### "404 Not Found" on GitHub Pages
**Check:**
- Settings → Pages shows "Your site is live at..."
- Wait 2-3 minutes after pushing
- Verify branch is `gh-pages` and folder is `/`
- Check Actions tab for deployment errors
### "Repository not found" when installing
**Check:**
- URL is correct: `https://USERNAME.github.io/REPO/` (no trailing /apt-repo)
- Files exist at the URLs:
```bash
curl -I https://USERNAME.github.io/REPO/dists/stable/InRelease
curl -I https://USERNAME.github.io/REPO/KEY.gpg
```
### "GPG error" when installing
**Check:**
- Repository is signed: `ls gh-pages/dists/stable/Release.gpg`
- Users imported the key: `curl https://USERNAME.github.io/REPO/KEY.gpg | gpg --import`
### Changes not appearing
**Check:**
- You committed and pushed to `gh-pages` (not `main`)
- Wait 1-2 minutes for GitHub to redeploy
- Clear browser cache if viewing index.html
- For apt: `sudo apt clean && sudo apt update`
## Success Checklist
After completing this guide, you should have:
- [ ] `gh-pages` branch created and pushed
- [ ] GitHub Pages enabled and deployed
- [ ] Site accessible at `https://USERNAME.github.io/socktop/`
- [ ] `KEY.gpg` downloadable
- [ ] `dists/stable/InRelease` accessible
- [ ] Packages in `pool/main/*.deb` downloadable
- [ ] Successfully tested installation on a test system
- [ ] Understand the workflow for future updates
## Next Steps
1. **Update your main README.md** with installation instructions
2. **Set up GitHub Actions** for automated releases
3. **Customize index.html** on gh-pages for a nice landing page
4. **Test on multiple architectures** (AMD64, ARM64)
5. **Share your repository** with users
## Quick Reference
**Switch branches:**
```bash
git checkout main # Development
git checkout gh-pages # Published site
```
**Update published site manually:**
```bash
git checkout main
# ... build packages, update apt-repo ...
git checkout gh-pages
cp -r apt-repo/* .
git add . && git commit -m "Update" && git push
git checkout main
```
**Your repository URL:**
```
https://YOUR-USERNAME.github.io/socktop/
```
**User installation command:**
```bash
curl -fsSL https://YOUR-USERNAME.github.io/socktop/KEY.gpg | \
sudo gpg --dearmor -o /usr/share/keyrings/socktop-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/socktop-archive-keyring.gpg] https://YOUR-USERNAME.github.io/socktop stable main" | \
sudo tee /etc/apt/sources.list.d/socktop.list
sudo apt update && sudo apt install socktop socktop-agent
```
---
**Need help?** See:
- `QUICK_START_APT_REPO.md` - Overall quick start
- `docs/APT_REPOSITORY.md` - Comprehensive guide
- `docs/APT_WORKFLOW.md` - Visual workflow diagrams

119
WHY_GHPAGES_BRANCH.md Normal file
View File

@ -0,0 +1,119 @@
# Why We Use the gh-pages Branch
## The Problem
GitHub Pages has a limitation - it can only serve static sites from:
1. **`/` (root)** of a branch
2. **`/docs`** directory of a branch
3. **NOT** from custom directories like `/apt-repo`
## Why Not `/docs`?
When you tried to enable GitHub Pages with `apt-repo/` checked into main, you couldn't select it because:
```
main/
├── src/
├── Cargo.toml
├── apt-repo/ ← GitHub Pages can't serve from here!
└── ...
```
You could move it to `/docs`:
```
main/
├── src/
├── Cargo.toml
├── docs/ ← GitHub Pages CAN serve from here
│ ├── dists/
│ ├── pool/
│ └── ...
└── ...
```
But this has downsides:
- ❌ Mixed source code and published content
- ❌ Large .deb files bloat the main branch
- ❌ Can't easily customize the site without affecting source
- ❌ Messy git history with binary files
## Why gh-pages Branch (Our Solution)
Using a separate `gh-pages` branch is cleaner:
```
main branch (source code):
├── src/
├── Cargo.toml
├── scripts/
└── docs/ ← Documentation source
gh-pages branch (published):
├── dists/
├── pool/
├── KEY.gpg
├── index.html ← Customizable landing page
└── README.md
```
### Benefits
**Clean separation**: Source code stays in `main`, published content in `gh-pages`
**No binary bloat**: .deb files don't clutter your main branch history
**Easy automation**: GitHub Actions can push to gh-pages without affecting main
**Customizable**: You can make a beautiful landing page on gh-pages
**Standard practice**: Most GitHub Pages projects use gh-pages branch
**Root URL**: Your repo is at `https://username.github.io/socktop/` (not `/apt-repo`)
### Workflow
```
Developer (main branch)
Build packages
Update apt-repo/ (local)
Push to gh-pages branch
GitHub Pages serves
Users: apt install socktop
```
## The Setup
**One-time setup:**
```bash
git checkout --orphan gh-pages
git rm -rf .
cp -r apt-repo/* .
rm -rf apt-repo
git add . && git commit -m "Initialize APT repository"
git push -u origin gh-pages
git checkout main
```
**Going forward:**
- Work on `main` branch for development
- `gh-pages` branch gets updated by GitHub Actions (or manually)
- Never need to switch branches manually after automation is set up!
## Comparison
| Approach | Location | URL | Pros | Cons |
|----------|----------|-----|------|------|
| **gh-pages branch** ✅ | gh-pages:/ | `username.github.io/socktop/` | Clean, automated, customizable | Two branches |
| `/docs` on main | main:/docs | `username.github.io/socktop/` | One branch | Mixed content, binary bloat |
| `/apt-repo` on main | main:/apt-repo | ❌ Not possible | - | GitHub Pages won't allow it |
## Conclusion
The `gh-pages` branch approach is:
- The **cleanest** solution
- The **most flexible** for customization
- The **easiest to automate**
- **Industry standard** for GitHub Pages
That's why we chose it! 🚀

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

43
dists/stable/InRelease Normal file
View File

@ -0,0 +1,43 @@
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA512
Origin: socktop
Label: socktop
Suite: stable
Codename: stable
Architectures: amd64 arm64 armhf riscv64
Components: main
Description: socktop APT repository
Date: Mon, 24 Nov 2025 20:01:32 +0000
MD5Sum:
0b1110782420dd7941940d8f99adda34 1627 main/binary-amd64/Packages
7c1447cf37137d7a72534cea3cb3872f 836 main/binary-amd64/Packages.gz
1e8f176ddd62df4bed3d11aa9673d5f1 1627 main/binary-arm64/Packages
165f010f2f2e85775e602bbfc32b9a67 832 main/binary-arm64/Packages.gz
0c17d836ddf76547285336b1a9948daf 1610 main/binary-armhf/Packages
6ee29ba21442c21c3a1ad89aaa1bcdf6 820 main/binary-armhf/Packages.gz
1fc7e24923a509ea722c2d0d5189700a 1623 main/binary-riscv64/Packages
6fc7db17b307382c4042cd7fab03010a 820 main/binary-riscv64/Packages.gz
SHA256:
eb77b0c29f6d909a0e26596ff5c897ad32c45fa0c95d14f435e68264e2f02024 1627 main/binary-amd64/Packages
e045e3303e653094ad828aa11adb0fd7194f2fe422b99cd0ee0a1c2100d86544 836 main/binary-amd64/Packages.gz
7054a62c0a4cdda4be5a264809c358a83f8323b388d43880d73557588c20fe15 1627 main/binary-arm64/Packages
d87f2cc66015a7d6f7b6755330af0003fa444d27229eb6127df00ab847b9cd53 832 main/binary-arm64/Packages.gz
d17d55bd46dc1955b09615b3bb7ce009f34e277b1b11e131b340afc77157e554 1610 main/binary-armhf/Packages
53e37cd254c6f96272f7b9f83e2ad86e2013be1a3e4061a35592c795102aa087 820 main/binary-armhf/Packages.gz
a43bb5de1b4b43fbd02e141db8c0134558b4750f9c2835f468cb731701103ba6 1623 main/binary-riscv64/Packages
1d0be4332d0794b3a25ea81774690030276fb791f47eaf07758041183079807d 820 main/binary-riscv64/Packages.gz
-----BEGIN PGP SIGNATURE-----
iQGzBAEBCgAdFiEEHnVWqAU5uDlLwoINESwaeYRl+/IFAmkkuZ4ACgkQESwaeYRl
+/LaawwAv9CIkkuqc7bvgyLNNVf2GEi0UV3I/DBg0YX47gKI2u4wp760sf53BYkk
wmYZkPcQFFAICVEY7j/WqvnRBkNb67sF7eS4p14IW9UB4GAMs5U0k6737VIwp47G
AUbuqZKv3kRk/x7XeZgmqnipXtSqlfVct2dx1+53yGqnwdSywpU9Ns64Iod4/lLQ
ipXkKOmb+SdGM90uv4lQ+BdleRykqb8cC761LKJEYZEByUal95woBW8cp9EycJXu
yqqAX21Bgx8YC7aF0Z61e9BYlUGXnxFwz4pn0hwvLli8X5dJEdUxWnuto+b/5HAk
ql7gicDHfxOKOUv2qQFsShsybJm4axWMBqMudDohZTtylfp0rgAQX8Tr6U1u303a
1kmU8v0Ph/XOfZT1gGnHfEn6RlOmDfaSRqcF2USyZTzrPgo9g8EKCiBuZm+Som9a
VjQmvdRHiJ0K84bfNpjFW4aJo0xE2EQR7xS/RXCA0SQ9YhFrxBxYr5TxWM0N0C6j
LCsp4O4A
=Jzma
-----END PGP SIGNATURE-----

26
dists/stable/Release Normal file
View File

@ -0,0 +1,26 @@
Origin: socktop
Label: socktop
Suite: stable
Codename: stable
Architectures: amd64 arm64 armhf riscv64
Components: main
Description: socktop APT repository
Date: Mon, 24 Nov 2025 20:01:32 +0000
MD5Sum:
0b1110782420dd7941940d8f99adda34 1627 main/binary-amd64/Packages
7c1447cf37137d7a72534cea3cb3872f 836 main/binary-amd64/Packages.gz
1e8f176ddd62df4bed3d11aa9673d5f1 1627 main/binary-arm64/Packages
165f010f2f2e85775e602bbfc32b9a67 832 main/binary-arm64/Packages.gz
0c17d836ddf76547285336b1a9948daf 1610 main/binary-armhf/Packages
6ee29ba21442c21c3a1ad89aaa1bcdf6 820 main/binary-armhf/Packages.gz
1fc7e24923a509ea722c2d0d5189700a 1623 main/binary-riscv64/Packages
6fc7db17b307382c4042cd7fab03010a 820 main/binary-riscv64/Packages.gz
SHA256:
eb77b0c29f6d909a0e26596ff5c897ad32c45fa0c95d14f435e68264e2f02024 1627 main/binary-amd64/Packages
e045e3303e653094ad828aa11adb0fd7194f2fe422b99cd0ee0a1c2100d86544 836 main/binary-amd64/Packages.gz
7054a62c0a4cdda4be5a264809c358a83f8323b388d43880d73557588c20fe15 1627 main/binary-arm64/Packages
d87f2cc66015a7d6f7b6755330af0003fa444d27229eb6127df00ab847b9cd53 832 main/binary-arm64/Packages.gz
d17d55bd46dc1955b09615b3bb7ce009f34e277b1b11e131b340afc77157e554 1610 main/binary-armhf/Packages
53e37cd254c6f96272f7b9f83e2ad86e2013be1a3e4061a35592c795102aa087 820 main/binary-armhf/Packages.gz
a43bb5de1b4b43fbd02e141db8c0134558b4750f9c2835f468cb731701103ba6 1623 main/binary-riscv64/Packages
1d0be4332d0794b3a25ea81774690030276fb791f47eaf07758041183079807d 820 main/binary-riscv64/Packages.gz

14
dists/stable/Release.gpg Normal file
View File

@ -0,0 +1,14 @@
-----BEGIN PGP SIGNATURE-----
iQGzBAABCgAdFiEEHnVWqAU5uDlLwoINESwaeYRl+/IFAmkkuZwACgkQESwaeYRl
+/LkCQv+M3ceDIfGIJYN5PoDJjE5ON2RuOg+GQscz44qcOwrXxj2E76LpAhkjbrC
RFQOHPp2harTDLAQ5b1PcxCy7DygTYgyFVXxn3bqf5NwzXFDHGzVMhFHLBqDr7e7
rkG6k2H6OMUV4SLhx6XQMp74fMP3e4qvKiZRP0LPn2ZiQcnh5CLKRDWwPhC8GWHM
Fh053Drg8bkWY/qG4070FVfQU/Os5w65pS9knDPe+1AFC9Rl7glNYtcMVPO6psvX
2UgvSCZ5McFZJt+eQceWDFIK6Zl0gJ5YEFVsIPug93x3EEdXXYL5UfosaYeSS1L8
g1ATdNA3otvPYvcwOVo/USjwwQ9OODb3tQLlp8NynOJ+v9oTju22RXLaa4iwt7d/
qk+bfmxfZTjEb0dz92SKGtIHwTSzDUAxb7kvpQLtQ3utGuH/46ozxJQ0FUoOLATr
pp2n7aCmgWupuRybB6U9tQNICydOGbY5lBrmzQC3/dR2IdayCuRhKqiGe5z10KCa
gu7LGncL
=1a8X
-----END PGP SIGNATURE-----

View File

@ -0,0 +1,40 @@
Package: socktop
Version: 1.50.0-1
Architecture: amd64
Maintainer: Jason Witty <jasonpwitty+socktop@proton.me>
Installed-Size: 3447
Depends: libc6 (>= 2.39)
Filename: pool/main/socktop_1.50.0-1_amd64.deb
Size: 1277472
MD5sum: 8af32694d8ea66feb97bc9896dd1034f
SHA1: acf596897449d1ec3a3eaee7c7fe25e716d91a8c
SHA256: 59ade1d2cc919fa672c2b8b50a05905defc1a220776949b78743d054eaa07994
Section: admin
Priority: optional
Homepage: https://github.com/jasonwitty/socktop
Description: Remote system monitor over WebSocket, TUI like top
socktop is a remote system monitor with a rich terminal user interface (TUI)
that connects to remote hosts running the socktop_agent over WebSocket. It
provides real-time monitoring of CPU, memory, processes, and more with an
interface similar to the traditional 'top' command.
Package: socktop-agent
Version: 1.50.2-1
Architecture: amd64
Maintainer: Jason Witty <jasonpwitty+socktop@proton.me>
Installed-Size: 6726
Depends: libc6 (>= 2.39), libdrm-amdgpu1 (>= 2.4.80)
Filename: pool/main/socktop-agent_1.50.2-1_amd64.deb
Size: 1869464
MD5sum: b708dbc7330e8c8f2d2d6812f978f3c9
SHA1: 2fce3376934586a630031f9a6dc4ed196c37200e
SHA256: 4a1d7e9794048b6dc0a74a428d1536903c9c3e3ed8b52d53539f1ecb4ebc3967
Section: admin
Priority: optional
Homepage: https://github.com/jasonwitty/socktop
Description: Socktop agent daemon. Serves host metrics over WebSocket.
socktop_agent is the daemon component that runs on remote hosts to collect and
serve system metrics over WebSocket. It gathers CPU, memory, disk, network,
GPU, and process information that can be monitored remotely by the socktop TUI
client.

Binary file not shown.

View File

@ -0,0 +1,5 @@
Archive: stable
Component: main
Origin: socktop
Label: socktop
Architecture: amd64

View File

@ -0,0 +1,40 @@
Package: socktop
Version: 1.50.0-1
Architecture: arm64
Maintainer: Jason Witty <jasonpwitty+socktop@proton.me>
Installed-Size: 2908
Depends: libc6 (>= 2.39)
Filename: pool/main/socktop_1.50.0-1_arm64.deb
Size: 1139764
MD5sum: 05fa53ec3555238b6454ffe44acba4a5
SHA1: c6f062f0d191e45d34e2d349864dfa552433bd0c
SHA256: 976de361b1f867e5c3fe1b8bba34862f6e92564980367a1b5a9ff024c7825273
Section: admin
Priority: optional
Homepage: https://github.com/jasonwitty/socktop
Description: Remote system monitor over WebSocket, TUI like top
socktop is a remote system monitor with a rich terminal user interface (TUI)
that connects to remote hosts running the socktop_agent over WebSocket. It
provides real-time monitoring of CPU, memory, processes, and more with an
interface similar to the traditional 'top' command.
Package: socktop-agent
Version: 1.50.2-1
Architecture: arm64
Maintainer: Jason Witty <jasonpwitty+socktop@proton.me>
Installed-Size: 5220
Depends: libc6 (>= 2.39), libdrm-amdgpu1 (>= 2.4.80)
Filename: pool/main/socktop-agent_1.50.2-1_arm64.deb
Size: 1645464
MD5sum: 517f46814ffc34ca3075ce0fc6020f1f
SHA1: a120d9b763b453c0e860fea8e3f121e4cabda329
SHA256: 72d562c50f4de437c5e8f46fb79cc0216b96f5bdc2b96958a51ae69aa156eead
Section: admin
Priority: optional
Homepage: https://github.com/jasonwitty/socktop
Description: Socktop agent daemon. Serves host metrics over WebSocket.
socktop_agent is the daemon component that runs on remote hosts to collect and
serve system metrics over WebSocket. It gathers CPU, memory, disk, network,
GPU, and process information that can be monitored remotely by the socktop TUI
client.

Binary file not shown.

View File

@ -0,0 +1,40 @@
Package: socktop
Version: 1.50.0-1
Architecture: armhf
Maintainer: Jason Witty <jasonpwitty+socktop@proton.me>
Installed-Size: 2706
Depends: libc6:armhf (>= 2.39)
Filename: pool/main/socktop_1.50.0-1_armhf.deb
Size: 986508
MD5sum: 5872abb834d52fefd05d3be848c0c466
SHA1: c2cd0f4bc1578541836f44c8595a90b07815448f
SHA256: c246bd1fbad3598129dd3101296791aa75d0d817d2a9b33ea18e21b95712bdeb
Section: admin
Priority: optional
Homepage: https://github.com/jasonwitty/socktop
Description: Remote system monitor over WebSocket, TUI like top
socktop is a remote system monitor with a rich terminal user interface (TUI)
that connects to remote hosts running the socktop_agent over WebSocket. It
provides real-time monitoring of CPU, memory, processes, and more with an
interface similar to the traditional 'top' command.
Package: socktop-agent
Version: 1.50.2-1
Architecture: armhf
Maintainer: Jason Witty <jasonpwitty+socktop@proton.me>
Installed-Size: 3919
Depends: libc6:armhf (>= 2.39)
Filename: pool/main/socktop-agent_1.50.2-1_armhf.deb
Size: 1494848
MD5sum: 90aae9922ffe58b9685c10e38cdb91f0
SHA1: f8aab5171952a9a5b1251a00e907ae6805286edd
SHA256: 6c1f813a899416243a8c6d11dd424832a8a7184e3da200ba72e3afe23cb5fcd6
Section: admin
Priority: optional
Homepage: https://github.com/jasonwitty/socktop
Description: Socktop agent daemon. Serves host metrics over WebSocket.
socktop_agent is the daemon component that runs on remote hosts to collect and
serve system metrics over WebSocket. It gathers CPU, memory, disk, network,
GPU, and process information that can be monitored remotely by the socktop TUI
client.

Binary file not shown.

View File

@ -0,0 +1,40 @@
Package: socktop
Version: 1.50.0-1
Architecture: riscv64
Maintainer: Jason Witty <jasonpwitty+socktop@proton.me>
Installed-Size: 2664
Depends: libc6:riscv64 (>= 2.39)
Filename: pool/main/socktop_1.50.0-1_riscv64.deb
Size: 1133776
MD5sum: 8d35cb4a2e61e4817bedd36362f3d21e
SHA1: 242c3818e20e4fa8ab2fd6bd9a897aab0246c01d
SHA256: c8138d798820fc0bcdcf12a945f35b6a45635cbec109548ce65cb4ab3db7690a
Section: admin
Priority: optional
Homepage: https://github.com/jasonwitty/socktop
Description: Remote system monitor over WebSocket, TUI like top
socktop is a remote system monitor with a rich terminal user interface (TUI)
that connects to remote hosts running the socktop_agent over WebSocket. It
provides real-time monitoring of CPU, memory, processes, and more with an
interface similar to the traditional 'top' command.
Package: socktop-agent
Version: 1.50.2-1
Architecture: riscv64
Maintainer: Jason Witty <jasonpwitty+socktop@proton.me>
Installed-Size: 4068
Depends: libc6:riscv64 (>= 2.39)
Filename: pool/main/socktop-agent_1.50.2-1_riscv64.deb
Size: 1698504
MD5sum: 76249497cbd2bb14e86b0a680a628d22
SHA1: 1b427d86313d2d906886f33f5f274b6515958245
SHA256: 52f17798f0a208a5067a266fad84e535c30137620273a218caac465fb969f48e
Section: admin
Priority: optional
Homepage: https://github.com/jasonwitty/socktop
Description: Socktop agent daemon. Serves host metrics over WebSocket.
socktop_agent is the daemon component that runs on remote hosts to collect and
serve system metrics over WebSocket. It gathers CPU, memory, disk, network,
GPU, and process information that can be monitored remotely by the socktop TUI
client.

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 775 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 879 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 152 KiB

View File

@ -1,204 +0,0 @@
# Cross-Compiling socktop_agent for Raspberry Pi
This guide explains how to cross-compile the socktop_agent on various host systems and deploy it to a Raspberry Pi. Cross-compiling is particularly useful for older or resource-constrained Pi models where native compilation might be slow.
## Cross-Compilation Host Setup
Choose your host operating system:
- [Debian/Ubuntu](#debianubuntu-based-systems)
- [Arch Linux](#arch-linux-based-systems)
- [macOS](#macos)
- [Windows](#windows)
## Debian/Ubuntu Based Systems
### Prerequisites
Install the cross-compilation toolchain for your target Raspberry Pi architecture:
```bash
# For 64-bit Raspberry Pi (aarch64)
sudo apt update
sudo apt install gcc-aarch64-linux-gnu libc6-dev-arm64-cross libdrm-dev:arm64
# For 32-bit Raspberry Pi (armv7)
sudo apt update
sudo apt install gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libdrm-dev:armhf
```
### Setup Rust Cross-Compilation Targets
```bash
# For 64-bit Raspberry Pi
rustup target add aarch64-unknown-linux-gnu
# For 32-bit Raspberry Pi
rustup target add armv7-unknown-linux-gnueabihf
```
### Configure Cargo for Cross-Compilation
Create or edit `~/.cargo/config.toml`:
```toml
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-gcc"
```
## Arch Linux Based Systems
### Prerequisites
Install the cross-compilation toolchain using pacman and AUR:
```bash
# Install base dependencies
sudo pacman -S base-devel
# For 64-bit Raspberry Pi (aarch64)
sudo pacman -S aarch64-linux-gnu-gcc
# Install libdrm for aarch64 using an AUR helper (e.g., yay, paru)
yay -S aarch64-linux-gnu-libdrm
# For 32-bit Raspberry Pi (armv7)
sudo pacman -S arm-linux-gnueabihf-gcc
# Install libdrm for armv7 using an AUR helper
yay -S arm-linux-gnueabihf-libdrm
```
### Setup Rust Cross-Compilation Targets
```bash
# For 64-bit Raspberry Pi
rustup target add aarch64-unknown-linux-gnu
# For 32-bit Raspberry Pi
rustup target add armv7-unknown-linux-gnueabihf
```
### Configure Cargo for Cross-Compilation
Create or edit `~/.cargo/config.toml`:
```toml
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-gcc"
```
## macOS
The recommended approach for cross-compiling from macOS is to use Docker:
```bash
# Install Docker
brew install --cask docker
# Pull a cross-compilation Docker image
docker pull messense/rust-musl-cross:armv7-musleabihf # For 32-bit Pi
docker pull messense/rust-musl-cross:aarch64-musl # For 64-bit Pi
```
### Using Docker for Cross-Compilation
```bash
# Navigate to your socktop project directory
cd path/to/socktop
# For 64-bit Raspberry Pi
docker run --rm -it -v "$(pwd)":/home/rust/src messense/rust-musl-cross:aarch64-musl cargo build --release --target aarch64-unknown-linux-musl -p socktop_agent
# For 32-bit Raspberry Pi
docker run --rm -it -v "$(pwd)":/home/rust/src messense/rust-musl-cross:armv7-musleabihf cargo build --release --target armv7-unknown-linux-musleabihf -p socktop_agent
```
The compiled binaries will be available in your local target directory.
## Windows
The recommended approach for Windows is to use Windows Subsystem for Linux (WSL2):
1. Install WSL2 with a Debian/Ubuntu distribution by following the [official Microsoft documentation](https://docs.microsoft.com/en-us/windows/wsl/install).
2. Once WSL2 is set up with a Debian/Ubuntu distribution, open your WSL terminal and follow the [Debian/Ubuntu instructions](#debianubuntu-based-systems) above.
## Cross-Compile the Agent
After setting up your environment, build the socktop_agent for your target Raspberry Pi:
```bash
# For 64-bit Raspberry Pi
cargo build --release --target aarch64-unknown-linux-gnu -p socktop_agent
# For 32-bit Raspberry Pi
cargo build --release --target armv7-unknown-linux-gnueabihf -p socktop_agent
```
## Transfer the Binary to Your Raspberry Pi
Use SCP to transfer the compiled binary to your Raspberry Pi:
```bash
# For 64-bit Raspberry Pi
scp target/aarch64-unknown-linux-gnu/release/socktop_agent pi@raspberry-pi-ip:~/
# For 32-bit Raspberry Pi
scp target/armv7-unknown-linux-gnueabihf/release/socktop_agent pi@raspberry-pi-ip:~/
```
Replace `raspberry-pi-ip` with your Raspberry Pi's IP address and `pi` with your username.
## Install Dependencies on the Raspberry Pi
SSH into your Raspberry Pi and install the required dependencies:
```bash
ssh pi@raspberry-pi-ip
# For Raspberry Pi OS (Debian-based)
sudo apt update
sudo apt install libdrm-dev libdrm-amdgpu1
# For Arch Linux ARM
sudo pacman -Syu
sudo pacman -S libdrm
```
## Make the Binary Executable and Install
```bash
chmod +x ~/socktop_agent
# Optional: Install system-wide
sudo install -o root -g root -m 0755 ~/socktop_agent /usr/local/bin/socktop_agent
# Optional: Set up as a systemd service
sudo install -o root -g root -m 0644 ~/socktop-agent.service /etc/systemd/system/socktop-agent.service
sudo systemctl daemon-reload
sudo systemctl enable --now socktop-agent
```
## Troubleshooting
If you encounter issues with the cross-compiled binary:
1. **Incorrect Architecture**: Ensure you've chosen the correct target for your Raspberry Pi model:
- For Raspberry Pi 2: use `armv7-unknown-linux-gnueabihf`
- For Raspberry Pi 3/4/5 in 64-bit mode: use `aarch64-unknown-linux-gnu`
- For Raspberry Pi 3/4/5 in 32-bit mode: use `armv7-unknown-linux-gnueabihf`
2. **Dependency Issues**: Check for missing libraries:
```bash
ldd ~/socktop_agent
```
3. **Run with Backtrace**: Get detailed error information:
```bash
RUST_BACKTRACE=1 ~/socktop_agent
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 616 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.4 MiB

View File

@ -1,18 +0,0 @@
[Unit]
Description=Socktop agent
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
ExecStart=/usr/local/bin/socktop_agent --port 3000
Environment=RUST_LOG=info
# Optional auth:
# Environment=SOCKTOP_TOKEN=changeme
Restart=on-failure
User=socktop
Group=socktop
NoNewPrivileges=true
[Install]
WantedBy=multi-user.target

Binary file not shown.

Before

Width:  |  Height:  |  Size: 151 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.6 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.3 MiB

364
index.html Normal file
View File

@ -0,0 +1,364 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>socktop APT Repository</title>
<style>
/* Catppuccin Frappe Color Palette */
:root {
--ctp-base: #303446;
--ctp-mantle: #292c3c;
--ctp-crust: #232634;
--ctp-text: #c6d0f5;
--ctp-subtext1: #b5bfe2;
--ctp-subtext0: #a5adce;
--ctp-overlay2: #949cbb;
--ctp-overlay1: #838ba7;
--ctp-overlay0: #737994;
--ctp-surface2: #626880;
--ctp-surface1: #51576d;
--ctp-surface0: #414559;
--ctp-lavender: #babbf1;
--ctp-blue: #8caaee;
--ctp-sapphire: #85c1dc;
--ctp-sky: #99d1db;
--ctp-teal: #81c8be;
--ctp-green: #a6d189;
--ctp-yellow: #e5c890;
--ctp-peach: #ef9f76;
--ctp-maroon: #ea999c;
--ctp-red: #e78284;
--ctp-mauve: #ca9ee6;
--ctp-pink: #f4b8e4;
--ctp-flamingo: #eebebe;
--ctp-rosewater: #f2d5cf;
}
* {
box-sizing: border-box;
margin: 0;
padding: 0;
}
body {
font-family:
-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
"Helvetica Neue", Arial, sans-serif;
background-color: var(--ctp-base);
color: var(--ctp-text);
line-height: 1.6;
padding: 20px;
}
.container {
max-width: 900px;
margin: 0 auto;
background-color: var(--ctp-mantle);
border-radius: 12px;
padding: 40px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.3);
}
h1 {
color: var(--ctp-blue);
font-size: 2.5em;
margin-bottom: 10px;
border-bottom: 3px solid var(--ctp-surface0);
padding-bottom: 15px;
}
h2 {
color: var(--ctp-mauve);
font-size: 1.8em;
margin-top: 35px;
margin-bottom: 15px;
}
h3 {
color: var(--ctp-sapphire);
font-size: 1.3em;
margin-top: 25px;
margin-bottom: 10px;
}
p {
margin-bottom: 15px;
color: var(--ctp-subtext0);
}
.subtitle {
color: var(--ctp-subtext1);
font-size: 1.2em;
margin-bottom: 30px;
}
code {
background-color: var(--ctp-surface0);
color: var(--ctp-green);
padding: 3px 8px;
border-radius: 5px;
font-family: "Courier New", Courier, monospace;
font-size: 0.95em;
}
pre {
background-color: var(--ctp-crust);
border: 1px solid var(--ctp-surface0);
border-radius: 8px;
padding: 20px;
overflow-x: auto;
margin: 15px 0;
position: relative;
padding-top: 18px; /* leave space for top-right button */
}
pre code {
background: transparent;
color: var(--ctp-text);
display: block;
white-space: pre;
font-family: "Courier New", Courier, monospace;
font-size: 0.95em;
}
/* Copy button styles */
.copy-btn {
position: absolute;
top: 8px;
right: 8px;
background: var(--ctp-surface1);
color: var(--ctp-text);
border: 1px solid var(--ctp-surface2);
padding: 6px 8px;
border-radius: 6px;
cursor: pointer;
font-size: 0.9rem;
display: inline-flex;
align-items: center;
gap: 6px;
transition:
background 0.12s ease,
transform 0.08s ease;
}
.copy-btn:hover {
background: var(--ctp-surface2);
transform: translateY(-1px);
}
.copy-btn:active {
transform: translateY(0);
}
.copy-btn.copied {
background: var(--ctp-green);
color: var(--ctp-crust);
border-color: transparent;
}
.badge {
display: inline-block;
background-color: var(--ctp-surface1);
color: var(--ctp-text);
padding: 5px 12px;
border-radius: 6px;
font-size: 0.9em;
margin: 5px 5px 5px 0;
border: 1px solid var(--ctp-surface2);
}
.badge.arch {
background-color: var(--ctp-surface0);
color: var(--ctp-lavender);
}
.note {
background-color: var(--ctp-surface0);
border-left: 4px solid var(--ctp-yellow);
padding: 15px;
margin: 20px 0;
border-radius: 5px;
}
.note strong {
color: var(--ctp-yellow);
}
.footer {
margin-top: 50px;
padding-top: 20px;
border-top: 2px solid var(--ctp-surface0);
text-align: center;
color: var(--ctp-overlay1);
font-size: 0.9em;
}
.command-comment {
color: var(--ctp-overlay1);
display: block;
margin-bottom: 6px;
}
.highlight-blue {
color: var(--ctp-blue);
}
.highlight-green {
color: var(--ctp-green);
}
.highlight-yellow {
color: var(--ctp-yellow);
}
.highlight-mauve {
color: var(--ctp-mauve);
}
.highlight-peach {
color: var(--ctp-peach);
}
@media (max-width: 768px) {
.container {
padding: 25px;
}
h1 {
font-size: 2em;
}
h2 {
font-size: 1.5em;
}
}
</style>
</head>
<body>
<div class="container">
<h1>socktop APT Repository</h1>
<p class="subtitle">
System monitor with remote agent support for Linux systems
</p>
<h2>📦 Quick Installation</h2>
<p>Add this repository to your Debian/Ubuntu system:</p>
<h3>Step 1: Add GPG Key</h3>
<pre><code># Add the repository's GPG signing key
curl -fsSL https://jasonwitty.github.io/socktop/KEY.gpg | \
sudo gpg --dearmor -o /usr/share/keyrings/socktop-archive-keyring.gpg</code></pre>
<h3>Step 2: Add Repository</h3>
<pre><code># Add the APT repository to your sources
echo "deb [signed-by=/usr/share/keyrings/socktop-archive-keyring.gpg] https://jasonwitty.github.io/socktop stable main" | \
sudo tee /etc/apt/sources.list.d/socktop.list</code></pre>
<h3>Step 3: Install</h3>
<pre><code># Update package lists and install
sudo apt update
sudo apt install socktop socktop-agent</code></pre>
<h2>📋 What's Included</h2>
<ul>
<li>
<strong class="highlight-blue">socktop</strong> - Terminal
UI client for monitoring systems
</li>
<li>
<strong class="highlight-mauve">socktop-agent</strong> -
Background agent that reports system metrics
</li>
</ul>
<div class="note">
<strong>Note:</strong> The agent package automatically installs
and configures a systemd service. Enable it with:
<code>sudo systemctl enable --now socktop-agent</code>
</div>
<h2>🏗️ Supported Architectures</h2>
<div>
<span class="badge arch">amd64</span>
<span class="badge arch">arm64</span>
<span class="badge arch">armhf</span>
<span class="badge arch">riscv64</span>
</div>
<h2>🔧 Usage</h2>
<p>After installation:</p>
<pre><code># Start the TUI client
socktop
# Connect to a remote agent
socktop ws://hostname:3000
# Start the agent (if not using systemd)
socktop_agent</code></pre>
<h2>🔗 Links</h2>
<ul>
<li>
<a href="https://github.com/jasonwitty/socktop"
>Source Code on GitHub</a
>
</li>
<li>
<a href="https://github.com/jasonwitty/socktop/issues"
>Report Issues</a
>
</li>
<li><a href="README.md">Repository Documentation</a></li>
</ul>
<div class="footer">
<p>Hosted on GitHub Pages | Packages signed with GPG</p>
<p>
Theme:
<a href="https://github.com/catppuccin/catppuccin"
>Catppuccin Frappe</a
>
</p>
</div>
</div>
<script>
// Attach copy buttons to all <pre> blocks and enable copy-to-clipboard.
document.addEventListener("DOMContentLoaded", function () {
const pres = document.querySelectorAll("pre");
pres.forEach((pre) => {
// Create button
const btn = document.createElement("button");
btn.type = "button";
btn.className = "copy-btn";
btn.setAttribute("aria-label", "Copy code to clipboard");
// Use emoji for simplicity; you can replace with SVG if desired
btn.innerText = "📋";
// Append to pre
pre.appendChild(btn);
// Click handler
btn.addEventListener("click", async (e) => {
e.stopPropagation();
const code = pre.querySelector("code");
const text = code ? code.innerText : pre.innerText;
try {
if (!navigator.clipboard) {
// Fallback method
const textarea =
document.createElement("textarea");
textarea.value = text;
textarea.style.position = "fixed";
textarea.style.left = "-9999px";
document.body.appendChild(textarea);
textarea.select();
document.execCommand("copy");
document.body.removeChild(textarea);
} else {
await navigator.clipboard.writeText(text);
}
// feedback
btn.classList.add("copied");
const prior = btn.innerText;
btn.innerText = "✓ Copied";
setTimeout(() => {
btn.classList.remove("copied");
btn.innerText = "📋";
}, 1800);
} catch (err) {
btn.innerText = "✖";
setTimeout(() => (btn.innerText = "📋"), 1500);
console.error("Copy failed", err);
}
});
});
});
</script>
</body>
</html>

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,15 +0,0 @@
syntax = "proto3";
package socktop;
// All running processes. Sorting is done client-side.
message Processes {
uint64 process_count = 1; // total processes in the system
repeated Process rows = 2; // all processes
}
message Process {
uint32 pid = 1;
string name = 2;
float cpu_usage = 3; // 0..100
uint64 mem_bytes = 4; // RSS bytes
}

View File

@ -1,3 +0,0 @@
[toolchain]
channel = "stable"
components = ["clippy", "rustfmt"]

View File

@ -1,47 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Cross-check Windows build from Linux using the GNU (MinGW) toolchain.
# - Ensures target `x86_64-pc-windows-gnu` is installed
# - Verifies MinGW cross-compiler is available (x86_64-w64-mingw32-gcc)
# - Runs cargo clippy with warnings-as-errors for the Windows target
# - Builds release binaries for the Windows target
echo "[socktop] Windows cross-check: clippy + build (GNU target)"
have() { command -v "$1" >/dev/null 2>&1; }
if ! have rustup; then
echo "error: rustup not found. Install Rust via rustup first (see README)." >&2
exit 1
fi
if ! rustup target list --installed | grep -q '^x86_64-pc-windows-gnu$'; then
echo "+ rustup target add x86_64-pc-windows-gnu"
rustup target add x86_64-pc-windows-gnu
fi
if ! have x86_64-w64-mingw32-gcc; then
echo "error: Missing MinGW cross-compiler (x86_64-w64-mingw32-gcc)." >&2
if have pacman; then
echo "Arch Linux: sudo pacman -S --needed mingw-w64-gcc" >&2
elif have apt-get; then
echo "Debian/Ubuntu: sudo apt-get install -y mingw-w64" >&2
elif have dnf; then
echo "Fedora: sudo dnf install -y mingw64-gcc" >&2
else
echo "Install the mingw-w64 toolchain for your distro, then re-run." >&2
fi
exit 1
fi
CARGO_FLAGS=(--workspace --all-targets --all-features --target x86_64-pc-windows-gnu)
echo "+ cargo clippy ${CARGO_FLAGS[*]} -- -D warnings"
cargo clippy "${CARGO_FLAGS[@]}" -- -D warnings
echo "+ cargo build --release ${CARGO_FLAGS[*]}"
cargo build --release "${CARGO_FLAGS[@]}"
echo "✅ Windows clippy and build completed successfully."

View File

@ -1,43 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Publish job: "publish new socktop agent version"
# Usage: ./scripts/publish_socktop_agent.sh <new_version>
if [[ ${1:-} == "" ]]; then
echo "Usage: $0 <new_version>" >&2
exit 1
fi
NEW_VERSION="$1"
ROOT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")/.." && pwd)
CRATE_DIR="$ROOT_DIR/socktop_agent"
echo "==> Formatting socktop_agent"
(cd "$ROOT_DIR" && cargo fmt -p socktop_agent)
echo "==> Running tests for socktop_agent"
(cd "$ROOT_DIR" && cargo test -p socktop_agent)
echo "==> Running clippy (warnings as errors) for socktop_agent"
(cd "$ROOT_DIR" && cargo clippy -p socktop_agent -- -D warnings)
echo "==> Building release for socktop_agent"
(cd "$ROOT_DIR" && cargo build -p socktop_agent --release)
echo "==> Bumping version to $NEW_VERSION in socktop_agent/Cargo.toml"
sed -i.bak -E "s/^version = \"[0-9]+\.[0-9]+\.[0-9]+\"/version = \"$NEW_VERSION\"/" "$CRATE_DIR/Cargo.toml"
rm -f "$CRATE_DIR/Cargo.toml.bak"
echo "==> Committing version bump"
(cd "$ROOT_DIR" && git add -A && git commit -m "socktop_agent: bump version to $NEW_VERSION")
CURRENT_BRANCH=$(cd "$ROOT_DIR" && git rev-parse --abbrev-ref HEAD)
echo "==> Pushing to origin $CURRENT_BRANCH"
(cd "$ROOT_DIR" && git push origin "$CURRENT_BRANCH")
echo "==> Publishing socktop_agent $NEW_VERSION to crates.io"
(cd "$ROOT_DIR" && cargo publish -p socktop_agent)
echo "==> Done: socktop_agent $NEW_VERSION published"

View File

@ -1,27 +0,0 @@
[package]
name = "socktop"
version = "1.50.0"
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
description = "Remote system monitor over WebSocket, TUI like top"
edition = "2024"
license = "MIT"
readme = "README.md"
[dependencies]
# socktop connector for agent communication
socktop_connector = "1.50.0"
tokio = { workspace = true }
futures-util = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
url = { workspace = true }
ratatui = { workspace = true }
crossterm = { workspace = true }
anyhow = { workspace = true }
dirs-next = { workspace = true }
sysinfo = { workspace = true }
[dev-dependencies]
assert_cmd = "2.0"
tempfile = "3"

View File

@ -1,26 +0,0 @@
# socktop (client)
Minimal TUI client for the socktop remote monitoring agent.
Features:
- Connects to a socktop_agent over WebSocket / secure WebSocket
- Displays CPU, memory, swap, disks, network, processes, (optional) GPU metrics
- Selfsigned TLS cert pinning via --tls-ca
- Profile management with saved intervals
- Low CPU usage (request-driven updates)
Quick start:
```
cargo install socktop
socktop ws://HOST:3000/ws
```
With TLS (copy agent cert first):
```
socktop --tls-ca cert.pem wss://HOST:8443/ws
```
Demo mode (spawns a local agent automatically on first run prompt):
```
socktop --demo
```
Full documentation, screenshots, and advanced usage:
https://github.com/jasonwitty/socktop

View File

@ -1,15 +0,0 @@
syntax = "proto3";
package socktop;
// All running processes. Sorting is done client-side.
message Processes {
uint64 process_count = 1; // total processes in the system
repeated Process rows = 2; // all processes
}
message Process {
uint32 pid = 1;
string name = 2;
float cpu_usage = 3; // 0..100
uint64 mem_bytes = 4; // RSS bytes
}

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +0,0 @@
//! Small utilities to manage bounded history buffers for charts.
use std::collections::VecDeque;
pub fn push_capped<T>(dq: &mut VecDeque<T>, v: T, cap: usize) {
if dq.len() == cap {
dq.pop_front();
}
dq.push_back(v);
}
// Keeps a history deque per core with a fixed capacity
pub struct PerCoreHistory {
pub deques: Vec<VecDeque<u16>>,
cap: usize,
}
impl PerCoreHistory {
pub fn new(cap: usize) -> Self {
Self {
deques: Vec::new(),
cap,
}
}
// Ensure we have one deque per core; resize on CPU topology changes
pub fn ensure_cores(&mut self, n: usize) {
if self.deques.len() == n {
return;
}
self.deques = (0..n).map(|_| VecDeque::with_capacity(self.cap)).collect();
}
// Push a new sample set for all cores (values 0..=100)
pub fn push_samples(&mut self, samples: &[f32]) {
self.ensure_cores(samples.len());
for (i, v) in samples.iter().enumerate() {
let val = v.clamp(0.0, 100.0).round() as u16;
push_capped(&mut self.deques[i], val, self.cap);
}
}
}

View File

@ -1,6 +0,0 @@
//! Library surface for integration tests and reuse.
pub mod types;
// Re-export connector functionality
pub use socktop_connector::{SocktopConnector, connect_to_socktop_agent};

View File

@ -1,433 +0,0 @@
//! Entry point for the socktop TUI. Parses args and runs the App.
mod app;
mod history;
mod profiles;
mod retry;
mod types;
mod ui; // pure retry timing logic
use app::App;
use profiles::{ProfileEntry, ProfileRequest, ResolveProfile, load_profiles, save_profiles};
use std::env;
use std::io::{self, Write};
pub(crate) struct ParsedArgs {
url: Option<String>,
tls_ca: Option<String>,
profile: Option<String>,
save: bool,
demo: bool,
dry_run: bool, // hidden test helper: skip connecting
metrics_interval_ms: Option<u64>,
processes_interval_ms: Option<u64>,
verify_hostname: bool,
}
pub(crate) fn parse_args<I: IntoIterator<Item = String>>(args: I) -> Result<ParsedArgs, String> {
let mut it = args.into_iter();
let prog = it.next().unwrap_or_else(|| "socktop".into());
let mut url: Option<String> = None;
let mut tls_ca: Option<String> = None;
let mut profile: Option<String> = None;
let mut save = false;
let mut demo = false;
let mut dry_run = false;
let mut metrics_interval_ms: Option<u64> = None;
let mut processes_interval_ms: Option<u64> = None;
let mut verify_hostname = false;
while let Some(arg) = it.next() {
match arg.as_str() {
"-h" | "--help" => {
return Err(format!(
"Usage: {prog} [--tls-ca CERT_PEM|-t CERT_PEM] [--verify-hostname] [--profile NAME|-P NAME] [--save] [--demo] [--metrics-interval-ms N] [--processes-interval-ms N] [ws://HOST:PORT/ws]\n"
));
}
"--tls-ca" | "-t" => {
tls_ca = it.next();
}
"--verify-hostname" => {
// opt-in hostname (SAN) verification
// default behavior is to skip it for easier home network usage
// (still pins the provided certificate)
verify_hostname = true;
}
"--profile" | "-P" => {
profile = it.next();
}
"--save" => {
save = true;
}
"--demo" => {
demo = true;
}
"--dry-run" => {
// intentionally undocumented
dry_run = true;
}
"--metrics-interval-ms" => {
metrics_interval_ms = it.next().and_then(|v| v.parse().ok());
}
"--processes-interval-ms" => {
processes_interval_ms = it.next().and_then(|v| v.parse().ok());
}
_ if arg.starts_with("--tls-ca=") => {
if let Some((_, v)) = arg.split_once('=')
&& !v.is_empty()
{
tls_ca = Some(v.to_string());
}
}
_ if arg.starts_with("--profile=") => {
if let Some((_, v)) = arg.split_once('=')
&& !v.is_empty()
{
profile = Some(v.to_string());
}
}
_ if arg.starts_with("--metrics-interval-ms=") => {
if let Some((_, v)) = arg.split_once('=') {
metrics_interval_ms = v.parse().ok();
}
}
_ if arg.starts_with("--processes-interval-ms=") => {
if let Some((_, v)) = arg.split_once('=') {
processes_interval_ms = v.parse().ok();
}
}
_ => {
if url.is_none() {
url = Some(arg);
} else {
return Err(format!(
"Unexpected argument. Usage: {prog} [--tls-ca CERT_PEM|-t CERT_PEM] [--verify-hostname] [--profile NAME|-P NAME] [--save] [--demo] [ws://HOST:PORT/ws]"
));
}
}
}
}
Ok(ParsedArgs {
url,
tls_ca,
profile,
save,
demo,
dry_run,
metrics_interval_ms,
processes_interval_ms,
verify_hostname,
})
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let parsed = match parse_args(env::args()) {
Ok(v) => v,
Err(msg) => {
eprintln!("{msg}");
return Ok(());
}
};
//support version flag (print and exit)
if env::args().any(|a| a == "--version" || a == "-V") {
println!("socktop {}", env!("CARGO_PKG_VERSION"));
return Ok(());
}
if parsed.demo || matches!(parsed.profile.as_deref(), Some("demo")) {
return run_demo_mode(parsed.tls_ca.as_deref()).await;
}
let profiles_file = load_profiles();
let req = ProfileRequest {
profile_name: parsed.profile.clone(),
url: parsed.url.clone(),
tls_ca: parsed.tls_ca.clone(),
};
let resolved = req.resolve(&profiles_file);
let mut profiles_mut = profiles_file.clone();
let (url, tls_ca, metrics_interval_ms, processes_interval_ms): (
String,
Option<String>,
Option<u64>,
Option<u64>,
) = match resolved {
ResolveProfile::Direct(u, t) => {
if let Some(name) = parsed.profile.as_ref() {
let existing = profiles_mut.profiles.get(name);
match existing {
None => {
let (mi, pi) = gather_intervals(
parsed.metrics_interval_ms,
parsed.processes_interval_ms,
)?;
profiles_mut.profiles.insert(
name.clone(),
ProfileEntry {
url: u.clone(),
tls_ca: t.clone(),
metrics_interval_ms: mi,
processes_interval_ms: pi,
},
);
let _ = save_profiles(&profiles_mut);
(u, t, mi, pi)
}
Some(entry) => {
let changed = entry.url != u || entry.tls_ca != t;
if changed {
let overwrite = if parsed.save {
true
} else {
prompt_yes_no(&format!(
"Overwrite existing profile '{name}'? [y/N]: "
))
};
if overwrite {
let (mi, pi) = gather_intervals(
parsed.metrics_interval_ms,
parsed.processes_interval_ms,
)?;
profiles_mut.profiles.insert(
name.clone(),
ProfileEntry {
url: u.clone(),
tls_ca: t.clone(),
metrics_interval_ms: mi,
processes_interval_ms: pi,
},
);
let _ = save_profiles(&profiles_mut);
(u, t, mi, pi)
} else {
(u, t, entry.metrics_interval_ms, entry.processes_interval_ms)
}
} else {
(u, t, entry.metrics_interval_ms, entry.processes_interval_ms)
}
}
}
} else {
(
u,
t,
parsed.metrics_interval_ms,
parsed.processes_interval_ms,
)
}
}
ResolveProfile::Loaded(u, t) => {
let entry = profiles_mut
.profiles
.get(parsed.profile.as_ref().unwrap())
.unwrap();
(u, t, entry.metrics_interval_ms, entry.processes_interval_ms)
}
ResolveProfile::PromptSelect(mut names) => {
if !names.iter().any(|n: &String| n == "demo") {
names.push("demo".into());
}
eprintln!("Select profile:");
for (i, n) in names.iter().enumerate() {
eprintln!(" {}. {}", i + 1, n);
}
eprint!("Enter number (or blank to abort): ");
let _ = io::stderr().flush();
let mut line = String::new();
if io::stdin().read_line(&mut line).is_ok() {
if let Ok(idx) = line.trim().parse::<usize>() {
if (1..=names.len()).contains(&idx) {
let name = &names[idx - 1];
if name == "demo" {
return run_demo_mode(parsed.tls_ca.as_deref()).await;
}
if let Some(entry) = profiles_mut.profiles.get(name) {
(
entry.url.clone(),
entry.tls_ca.clone(),
entry.metrics_interval_ms,
entry.processes_interval_ms,
)
} else {
return Ok(());
}
} else {
return Ok(());
}
} else {
return Ok(());
}
} else {
return Ok(());
}
}
ResolveProfile::PromptCreate(name) => {
eprintln!("Profile '{name}' does not exist yet.");
let url = prompt_string("Enter URL (ws://HOST:PORT/ws or wss://...): ")?;
if url.trim().is_empty() {
return Ok(());
}
let ca = prompt_string("Enter TLS CA path (or leave blank): ")?;
let ca_opt = if ca.trim().is_empty() {
None
} else {
Some(ca.trim().to_string())
};
let (mi, pi) =
gather_intervals(parsed.metrics_interval_ms, parsed.processes_interval_ms)?;
profiles_mut.profiles.insert(
name.clone(),
ProfileEntry {
url: url.trim().to_string(),
tls_ca: ca_opt.clone(),
metrics_interval_ms: mi,
processes_interval_ms: pi,
},
);
let _ = save_profiles(&profiles_mut);
(url.trim().to_string(), ca_opt, mi, pi)
}
ResolveProfile::None => {
//eprintln!("No URL provided and no profiles to select.");
//first run, no args, no profiles: show welcome message and offer demo mode
if profiles_mut.profiles.is_empty() && parsed.url.is_none() {
eprintln!("Welcome to socktop!");
eprintln!("It looks like this is your first time running the application.");
eprintln!(
"You can connect to a socktop_agent instance to monitor system metrics and processes."
);
eprintln!("If you don't have an agent running, you can try the demo mode.");
if prompt_yes_no("Would you like to start the demo mode now? [Y/n]: ") {
return run_demo_mode(parsed.tls_ca.as_deref()).await;
} else {
eprintln!("Aborting. You can run 'socktop --help' for usage information.");
return Ok(());
}
}
return Err("No URL provided and no profiles to select.".into());
}
};
let is_tls = url.starts_with("wss://");
let has_token = url.contains("token=");
let mut app = App::new()
.with_intervals(metrics_interval_ms, processes_interval_ms)
.with_status(is_tls, has_token);
if parsed.dry_run {
return Ok(());
}
app.run(&url, tls_ca.as_deref(), parsed.verify_hostname)
.await
}
fn prompt_yes_no(prompt: &str) -> bool {
eprint!("{prompt}");
let _ = io::stderr().flush();
let mut line = String::new();
if io::stdin().read_line(&mut line).is_ok() {
matches!(line.trim().to_ascii_lowercase().as_str(), "y" | "yes")
} else {
false
}
}
fn prompt_string(prompt: &str) -> io::Result<String> {
eprint!("{prompt}");
let _ = io::stderr().flush();
let mut line = String::new();
io::stdin().read_line(&mut line)?;
Ok(line)
}
fn gather_intervals(
arg_metrics: Option<u64>,
arg_procs: Option<u64>,
) -> Result<(Option<u64>, Option<u64>), Box<dyn std::error::Error>> {
let default_metrics = 500u64;
let default_procs = 2000u64;
let metrics = match arg_metrics {
Some(v) => Some(v),
None => {
let inp = prompt_string(&format!(
"Metrics interval ms (default {default_metrics}, Enter for default): "
))?;
let t = inp.trim();
if t.is_empty() {
Some(default_metrics)
} else {
Some(t.parse()?)
}
}
};
let procs = match arg_procs {
Some(v) => Some(v),
None => {
let inp = prompt_string(&format!(
"Processes interval ms (default {default_procs}, Enter for default): "
))?;
let t = inp.trim();
if t.is_empty() {
Some(default_procs)
} else {
Some(t.parse()?)
}
}
};
Ok((metrics, procs))
}
// Demo mode implementation
async fn run_demo_mode(_tls_ca: Option<&str>) -> Result<(), Box<dyn std::error::Error>> {
let port = 3231;
let url = format!("ws://127.0.0.1:{port}/ws");
let child = spawn_demo_agent(port)?;
let mut app = App::new();
// Demo mode connects to localhost, so disable hostname verification
tokio::select! { res=app.run(&url,None,false)=>{ drop(child); res } _=tokio::signal::ctrl_c()=>{ drop(child); Ok(()) } }
}
struct DemoGuard {
port: u16,
child: std::sync::Arc<std::sync::Mutex<Option<std::process::Child>>>,
}
impl Drop for DemoGuard {
fn drop(&mut self) {
if let Some(mut ch) = self.child.lock().unwrap().take() {
let _ = ch.kill();
}
eprintln!("Stopped demo agent on port {}", self.port);
}
}
fn spawn_demo_agent(port: u16) -> Result<DemoGuard, Box<dyn std::error::Error>> {
let candidate = find_agent_executable();
let mut cmd = std::process::Command::new(candidate);
cmd.arg("--port").arg(port.to_string());
cmd.env("SOCKTOP_ENABLE_SSL", "0");
//JW: do not disable GPU and TEMP in demo mode
//cmd.env("SOCKTOP_AGENT_GPU", "0");
//cmd.env("SOCKTOP_AGENT_TEMP", "0");
let child = cmd.spawn()?;
std::thread::sleep(std::time::Duration::from_millis(300));
Ok(DemoGuard {
port,
child: std::sync::Arc::new(std::sync::Mutex::new(Some(child))),
})
}
fn find_agent_executable() -> std::path::PathBuf {
if let Ok(exe) = std::env::current_exe()
&& let Some(parent) = exe.parent()
{
#[cfg(windows)]
let name = "socktop_agent.exe";
#[cfg(not(windows))]
let name = "socktop_agent";
let candidate = parent.join(name);
if candidate.exists() {
return candidate;
}
}
std::path::PathBuf::from("socktop_agent")
}

View File

@ -1,103 +0,0 @@
//! Connection profiles: load/save simple JSON mapping of profile name -> { url, tls_ca }
//! Stored under XDG config dir: $XDG_CONFIG_HOME/socktop/profiles.json (fallback ~/.config/socktop/profiles.json)
use serde::{Deserialize, Serialize};
use std::{collections::BTreeMap, fs, path::PathBuf};
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ProfileEntry {
pub url: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub tls_ca: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub metrics_interval_ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub processes_interval_ms: Option<u64>,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ProfilesFile {
#[serde(default)]
pub profiles: BTreeMap<String, ProfileEntry>,
#[serde(default)]
pub version: u32,
}
pub fn config_dir() -> PathBuf {
if let Some(xdg) = std::env::var_os("XDG_CONFIG_HOME") {
PathBuf::from(xdg).join("socktop")
} else {
dirs_next::config_dir()
.unwrap_or_else(|| PathBuf::from("."))
.join("socktop")
}
}
pub fn profiles_path() -> PathBuf {
config_dir().join("profiles.json")
}
pub fn load_profiles() -> ProfilesFile {
let path = profiles_path();
match fs::read_to_string(&path) {
Ok(s) => serde_json::from_str(&s).unwrap_or_default(),
Err(_) => ProfilesFile::default(),
}
}
pub fn save_profiles(p: &ProfilesFile) -> std::io::Result<()> {
let path = profiles_path();
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
let data = serde_json::to_vec_pretty(p).expect("serialize profiles");
fs::write(path, data)
}
pub enum ResolveProfile {
/// Use the provided runtime inputs (not persisted). (url, tls_ca)
Direct(String, Option<String>),
/// Loaded from existing profile entry (url, tls_ca)
Loaded(String, Option<String>),
/// Should prompt user to select among profile names
PromptSelect(Vec<String>),
/// Should prompt user to create a new profile (name)
PromptCreate(String),
/// No profile could be resolved (e.g., missing arguments)
None,
}
pub struct ProfileRequest {
pub profile_name: Option<String>,
pub url: Option<String>,
pub tls_ca: Option<String>,
}
impl ProfileRequest {
pub fn resolve(self, pf: &ProfilesFile) -> ResolveProfile {
// Case: only profile name given -> try load
if self.url.is_none() && self.profile_name.is_some() {
let Some(name) = self.profile_name else {
unreachable!("Already checked profile_name.is_some()")
};
let Some(entry) = pf.profiles.get(&name) else {
return ResolveProfile::PromptCreate(name);
};
return ResolveProfile::Loaded(entry.url.clone(), entry.tls_ca.clone());
}
// Both provided -> direct (maybe later saved by caller)
if let Some(u) = self.url {
return ResolveProfile::Direct(u, self.tls_ca);
}
// Nothing provided -> maybe prompt select if profiles exist
if self.url.is_none() && self.profile_name.is_none() {
if pf.profiles.is_empty() {
ResolveProfile::None
} else {
ResolveProfile::PromptSelect(pf.profiles.keys().cloned().collect())
}
} else {
ResolveProfile::None
}
}
}

View File

@ -1,114 +0,0 @@
//! Pure retry timing logic (decoupled from App state / UI) for testability.
use std::time::{Duration, Instant};
/// Result of computing retry timing.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RetryTiming {
pub should_retry_now: bool,
/// Seconds until next retry (Some(0) means ready now); None means inactive/no countdown.
pub seconds_until_retry: Option<u64>,
}
/// Compute retry timing given connection state inputs.
///
/// Inputs:
/// - `disconnected`: true when connection_state == Disconnected.
/// - `modal_active`: requires the connection error modal be visible to show countdown / trigger auto retry.
/// - `original_disconnect_time`: time we first noticed disconnect.
/// - `last_auto_retry`: time we last performed an automatic retry.
/// - `now`: current time (injected for determinism / tests).
/// - `interval`: retry interval duration.
pub(crate) fn compute_retry_timing(
disconnected: bool,
modal_active: bool,
original_disconnect_time: Option<Instant>,
last_auto_retry: Option<Instant>,
now: Instant,
interval: Duration,
) -> RetryTiming {
if !disconnected || !modal_active {
return RetryTiming {
should_retry_now: false,
seconds_until_retry: None,
};
}
let baseline = match last_auto_retry.or(original_disconnect_time) {
Some(b) => b,
None => {
return RetryTiming {
should_retry_now: false,
seconds_until_retry: None,
};
}
};
let elapsed = now.saturating_duration_since(baseline);
if elapsed >= interval {
RetryTiming {
should_retry_now: true,
seconds_until_retry: Some(0),
}
} else {
let remaining = interval - elapsed;
RetryTiming {
should_retry_now: false,
seconds_until_retry: Some(remaining.as_secs()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn inactive_when_not_disconnected() {
let now = Instant::now();
let rt = compute_retry_timing(false, true, Some(now), None, now, Duration::from_secs(30));
assert!(!rt.should_retry_now);
assert_eq!(rt.seconds_until_retry, None);
}
#[test]
fn countdown_progress_and_ready() {
let base = Instant::now();
let rt1 = compute_retry_timing(
true,
true,
Some(base),
None,
base + Duration::from_secs(10),
Duration::from_secs(30),
);
assert!(!rt1.should_retry_now);
assert_eq!(rt1.seconds_until_retry, Some(20));
let rt2 = compute_retry_timing(
true,
true,
Some(base),
None,
base + Duration::from_secs(30),
Duration::from_secs(30),
);
assert!(rt2.should_retry_now);
assert_eq!(rt2.seconds_until_retry, Some(0));
}
#[test]
fn uses_last_auto_retry_as_baseline() {
let base: Instant = Instant::now();
let last = base + Duration::from_secs(30); // one prior retry
// 10s after last retry => 20s remaining
let rt = compute_retry_timing(
true,
true,
Some(base),
Some(last),
last + Duration::from_secs(10),
Duration::from_secs(30),
);
assert!(!rt.should_retry_now);
assert_eq!(rt.seconds_until_retry, Some(20));
}
}

View File

@ -1,4 +0,0 @@
//! Types that mirror the agent's JSON schema.
// Re-export commonly used types from socktop_connector
pub use socktop_connector::Metrics;

File diff suppressed because it is too large Load Diff

View File

@ -1,434 +0,0 @@
//! CPU average sparkline + per-core mini bars.
use crate::ui::theme::{SB_ARROW, SB_THUMB, SB_TRACK};
use crossterm::event::{KeyCode, KeyEvent, MouseButton, MouseEvent, MouseEventKind};
use ratatui::style::Modifier;
use ratatui::style::{Color, Style};
use ratatui::{
layout::{Constraint, Direction, Layout, Rect},
text::{Line, Span},
widgets::{Block, Borders, Paragraph, Sparkline},
};
use crate::history::PerCoreHistory;
use crate::types::Metrics;
/// State for dragging the scrollbar thumb
#[derive(Clone, Copy, Debug, Default)]
pub struct PerCoreScrollDrag {
pub active: bool,
pub start_y: u16, // mouse row where drag started
pub start_top: usize, // thumb top (in track rows) at drag start
}
/// Returns the content area for per-core CPU bars, excluding borders and reserving space for scrollbar.
pub fn per_core_content_area(area: Rect) -> Rect {
// Inner minus borders
let inner = Rect {
x: area.x + 1,
y: area.y + 1,
width: area.width.saturating_sub(2),
height: area.height.saturating_sub(2),
};
// Reserve 1 column on the right for a gutter and 1 for the scrollbar
Rect {
x: inner.x,
y: inner.y,
width: inner.width.saturating_sub(2),
height: inner.height,
}
}
/// Handles key events for per-core CPU bars.
pub fn per_core_handle_key(scroll_offset: &mut usize, key: KeyEvent, page_size: usize) {
match key.code {
KeyCode::Left => *scroll_offset = scroll_offset.saturating_sub(1),
KeyCode::Right => *scroll_offset = scroll_offset.saturating_add(1),
KeyCode::PageUp => {
let step = page_size.max(1);
*scroll_offset = scroll_offset.saturating_sub(step);
}
KeyCode::PageDown => {
let step = page_size.max(1);
*scroll_offset = scroll_offset.saturating_add(step);
}
KeyCode::Home => *scroll_offset = 0,
KeyCode::End => *scroll_offset = usize::MAX, // draw() clamps to max
_ => {}
}
}
/// Handles mouse wheel over the content.
pub fn per_core_handle_mouse(
scroll_offset: &mut usize,
mouse: MouseEvent,
content_area: Rect,
page_size: usize,
) {
let inside = mouse.column >= content_area.x
&& mouse.column < content_area.x + content_area.width
&& mouse.row >= content_area.y
&& mouse.row < content_area.y + content_area.height;
if !inside {
return;
}
match mouse.kind {
MouseEventKind::ScrollUp => *scroll_offset = scroll_offset.saturating_sub(1),
MouseEventKind::ScrollDown => *scroll_offset = scroll_offset.saturating_add(1),
// Optional paging via horizontal wheel
MouseEventKind::ScrollLeft => {
let step = page_size.max(1);
*scroll_offset = scroll_offset.saturating_sub(step);
}
MouseEventKind::ScrollRight => {
let step = page_size.max(1);
*scroll_offset = scroll_offset.saturating_add(step);
}
_ => {}
}
}
/// Handles mouse interaction with the scrollbar itself (click arrows/page/drag).
pub fn per_core_handle_scrollbar_mouse(
scroll_offset: &mut usize,
drag: &mut Option<PerCoreScrollDrag>,
mouse: MouseEvent,
per_core_area: Rect,
total_rows: usize,
) {
// Geometry
let inner = Rect {
x: per_core_area.x + 1,
y: per_core_area.y + 1,
width: per_core_area.width.saturating_sub(2),
height: per_core_area.height.saturating_sub(2),
};
if inner.height < 3 || inner.width < 1 {
return;
}
let content = Rect {
x: inner.x,
y: inner.y,
width: inner.width.saturating_sub(2),
height: inner.height,
};
let scroll_area = Rect {
x: inner.x + inner.width.saturating_sub(1),
y: inner.y,
width: 1,
height: inner.height,
};
let viewport_rows = content.height as usize;
let total = total_rows.max(1);
let view = viewport_rows.clamp(1, total);
let max_off = total.saturating_sub(view);
let mut offset = (*scroll_offset).min(max_off);
// Track and current thumb
let track = (scroll_area.height - 2) as usize;
if track == 0 {
return;
}
let thumb_len = (track * view).div_ceil(total).max(1).min(track);
let top_for_offset = |off: usize| -> usize {
if max_off == 0 {
0
} else {
((track - thumb_len) * off + max_off / 2) / max_off
}
};
let thumb_top = top_for_offset(offset);
let inside_scrollbar = mouse.column == scroll_area.x
&& mouse.row >= scroll_area.y
&& mouse.row < scroll_area.y + scroll_area.height;
// Helper to page
let page_up = || offset.saturating_sub(view.max(1));
let page_down = || offset.saturating_add(view.max(1));
match mouse.kind {
MouseEventKind::Down(MouseButton::Left) if inside_scrollbar => {
// Where within the track?
let row = mouse.row;
if row == scroll_area.y {
// Top arrow
offset = offset.saturating_sub(1);
} else if row + 1 == scroll_area.y + scroll_area.height {
// Bottom arrow
offset = offset.saturating_add(1);
} else {
// Inside track
let rel = (row - (scroll_area.y + 1)) as usize;
let thumb_end = thumb_top + thumb_len;
if rel < thumb_top {
// Page up
offset = page_up();
} else if rel >= thumb_end {
// Page down
offset = page_down();
} else {
// Start dragging
*drag = Some(PerCoreScrollDrag {
active: true,
start_y: row,
start_top: thumb_top,
});
}
}
}
MouseEventKind::Drag(MouseButton::Left) => {
if let Some(mut d) = drag.take()
&& d.active
{
let dy = (mouse.row as i32) - (d.start_y as i32);
let new_top = (d.start_top as i32 + dy)
.clamp(0, (track.saturating_sub(thumb_len)) as i32)
as usize;
// Inverse mapping top -> offset
if track > thumb_len {
let denom = track - thumb_len;
offset = if max_off == 0 {
0
} else {
(new_top * max_off + denom / 2) / denom
};
} else {
offset = 0;
}
// Keep dragging
d.start_top = new_top;
d.start_y = mouse.row;
*drag = Some(d);
}
}
MouseEventKind::Up(MouseButton::Left) => {
// End drag
*drag = None;
}
// Also allow wheel scrolling when cursor is over the scrollbar
MouseEventKind::ScrollUp if inside_scrollbar => {
offset = offset.saturating_sub(1);
}
MouseEventKind::ScrollDown if inside_scrollbar => {
offset = offset.saturating_add(1);
}
_ => {}
}
// Clamp and write back
if offset > max_off {
offset = max_off;
}
*scroll_offset = offset;
}
/// Clamp scroll offset to the valid range given content and viewport.
pub fn per_core_clamp(scroll_offset: &mut usize, total_rows: usize, viewport_rows: usize) {
let max_offset = total_rows.saturating_sub(viewport_rows);
if *scroll_offset > max_offset {
*scroll_offset = max_offset;
}
}
/// Draws the CPU average sparkline graph.
pub fn draw_cpu_avg_graph(
f: &mut ratatui::Frame<'_>,
area: Rect,
hist: &std::collections::VecDeque<u64>,
m: Option<&Metrics>,
) {
// Calculate average CPU over the monitoring period
let avg_cpu = if !hist.is_empty() {
let sum: u64 = hist.iter().sum();
sum as f64 / hist.len() as f64
} else {
0.0
};
let title = if let Some(mm) = m {
format!("CPU (now: {:>5.1}% | avg: {:>5.1}%)", mm.cpu_total, avg_cpu)
} else {
"CPU avg".into()
};
// Build the top-right info (CPU temp and polling intervals)
let top_right_info = if let Some(mm) = m {
mm.cpu_temp_c
.map(|t| {
let icon = if t < 50.0 {
"😎"
} else if t < 85.0 {
"⚠️"
} else {
"🔥"
};
format!("CPU Temp: {t:.1}°C {icon}")
})
.unwrap_or_else(|| "CPU Temp: N/A".into())
} else {
String::new()
};
let max_points = area.width.saturating_sub(2) as usize;
let start = hist.len().saturating_sub(max_points);
let data: Vec<u64> = hist.iter().skip(start).cloned().collect();
// Render the sparkline with title on left
let spark = Sparkline::default()
.block(Block::default().borders(Borders::ALL).title(title))
.data(&data)
.max(100)
.style(Style::default().fg(Color::Cyan));
f.render_widget(spark, area);
// Render the top-right info as text overlay in the top-right corner
if !top_right_info.is_empty() {
let info_area = Rect {
x: area.x + area.width.saturating_sub(top_right_info.len() as u16 + 2),
y: area.y,
width: top_right_info.len() as u16 + 1,
height: 1,
};
let info_line = Line::from(Span::raw(top_right_info));
f.render_widget(Paragraph::new(info_line), info_area);
}
}
/// Draws the per-core CPU bars with sparklines and trends.
pub fn draw_per_core_bars(
f: &mut ratatui::Frame<'_>,
area: Rect,
m: Option<&Metrics>,
per_core_hist: &PerCoreHistory,
scroll_offset: usize,
) {
f.render_widget(
Block::default().borders(Borders::ALL).title("Per-core"),
area,
);
let Some(mm) = m else {
return;
};
// Compute inner rect and content area
let inner = Rect {
x: area.x + 1,
y: area.y + 1,
width: area.width.saturating_sub(2),
height: area.height.saturating_sub(2),
};
if inner.height == 0 || inner.width <= 2 {
return;
}
let content = Rect {
x: inner.x,
y: inner.y,
width: inner.width.saturating_sub(2),
height: inner.height,
};
let total_rows = mm.cpu_per_core.len();
let viewport_rows = content.height as usize;
let max_offset = total_rows.saturating_sub(viewport_rows);
let offset = scroll_offset.min(max_offset);
let show_n = total_rows.saturating_sub(offset).min(viewport_rows);
let constraints: Vec<Constraint> = (0..show_n).map(|_| Constraint::Length(1)).collect();
let vchunks = Layout::default()
.direction(Direction::Vertical)
.constraints(constraints)
.split(content);
for i in 0..show_n {
let idx = offset + i;
let rect = vchunks[i];
let hchunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Min(6), Constraint::Length(12)])
.split(rect);
let curr = mm.cpu_per_core[idx].clamp(0.0, 100.0);
let older = per_core_hist
.deques
.get(idx)
.and_then(|d| d.iter().rev().nth(20).copied())
.map(|v| v as f32)
.unwrap_or(curr);
let trend = if curr > older + 0.2 {
""
} else if curr + 0.2 < older {
""
} else {
""
};
let fg = match curr {
x if x < 25.0 => Color::Green,
x if x < 60.0 => Color::Yellow,
_ => Color::Red,
};
let hist: Vec<u64> = per_core_hist
.deques
.get(idx)
.map(|d| {
let max_points = hchunks[0].width as usize;
let start = d.len().saturating_sub(max_points);
d.iter().skip(start).map(|&v| v as u64).collect()
})
.unwrap_or_default();
let spark = Sparkline::default()
.data(&hist)
.max(100)
.style(Style::default().fg(fg));
f.render_widget(spark, hchunks[0]);
let label = format!("cpu{idx:<2}{trend}{curr:>5.1}%");
let line = Line::from(Span::styled(
label,
Style::default().fg(fg).add_modifier(Modifier::BOLD),
));
f.render_widget(Paragraph::new(line).right_aligned(), hchunks[1]);
}
// Custom 1-col scrollbar with arrows, track, and exact mapping
let scroll_area = Rect {
x: inner.x + inner.width.saturating_sub(1),
y: inner.y,
width: 1,
height: inner.height,
};
if scroll_area.height >= 3 {
let track = (scroll_area.height - 2) as usize;
let total = total_rows.max(1);
let view = viewport_rows.clamp(1, total);
let max_off = total.saturating_sub(view);
let thumb_len = (track * view).div_ceil(total).max(1).min(track);
let thumb_top = if max_off == 0 {
0
} else {
((track - thumb_len) * offset + max_off / 2) / max_off
};
// Build lines: top arrow, track (with thumb), bottom arrow
let mut lines: Vec<Line> = Vec::with_capacity(scroll_area.height as usize);
lines.push(Line::from(Span::styled("", Style::default().fg(SB_ARROW))));
for i in 0..track {
if i >= thumb_top && i < thumb_top + thumb_len {
lines.push(Line::from(Span::styled("", Style::default().fg(SB_THUMB))));
} else {
lines.push(Line::from(Span::styled("", Style::default().fg(SB_TRACK))));
}
}
lines.push(Line::from(Span::styled("", Style::default().fg(SB_ARROW))));
f.render_widget(Paragraph::new(lines), scroll_area);
}
}

View File

@ -1,119 +0,0 @@
//! Disk cards with per-device gauge and title line.
use crate::types::Metrics;
use crate::ui::util::{disk_icon, human, truncate_middle};
use ratatui::{
layout::{Constraint, Direction, Layout, Rect},
style::Style,
widgets::{Block, Borders, Gauge},
};
pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
f.render_widget(Block::default().borders(Borders::ALL).title("Disks"), area);
let Some(mm) = m else {
return;
};
let inner = Rect {
x: area.x + 1,
y: area.y + 1,
width: area.width.saturating_sub(2),
height: area.height.saturating_sub(2),
};
if inner.height < 3 {
return;
}
// Filter duplicates by keeping first occurrence of each unique name
let mut seen_names = std::collections::HashSet::new();
let unique_disks: Vec<_> = mm
.disks
.iter()
.filter(|d| seen_names.insert(d.name.clone()))
.collect();
let per_disk_h = 3u16;
let max_cards = (inner.height / per_disk_h).min(unique_disks.len() as u16) as usize;
let constraints: Vec<Constraint> = (0..max_cards)
.map(|_| Constraint::Length(per_disk_h))
.collect();
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints(constraints)
.split(inner);
for (i, slot) in rows.iter().enumerate() {
let d = unique_disks[i];
let used = d.total.saturating_sub(d.available);
let ratio = if d.total > 0 {
used as f64 / d.total as f64
} else {
0.0
};
let pct = (ratio * 100.0).round() as u16;
let color = if pct < 70 {
ratatui::style::Color::Green
} else if pct < 90 {
ratatui::style::Color::Yellow
} else {
ratatui::style::Color::Red
};
// Add indentation for partitions
let indent = if d.is_partition { "└─" } else { "" };
// Add temperature if available
let temp_str = d
.temperature
.map(|t| format!(" {}°C", t.round() as i32))
.unwrap_or_default();
let title = format!(
"{}{}{}{} {} / {} ({}%)",
indent,
disk_icon(&d.name),
truncate_middle(&d.name, (slot.width.saturating_sub(6)) as usize / 2),
temp_str,
human(used),
human(d.total),
pct
);
// Indent the entire card (block) for partitions to align with └─ prefix (4 chars)
let card_indent = if d.is_partition { 4 } else { 0 };
let card_rect = Rect {
x: slot.x + card_indent,
y: slot.y,
width: slot.width.saturating_sub(card_indent),
height: slot.height,
};
let card = Block::default().borders(Borders::ALL).title(title);
f.render_widget(card, card_rect);
let inner_card = Rect {
x: card_rect.x + 1,
y: card_rect.y + 1,
width: card_rect.width.saturating_sub(2),
height: card_rect.height.saturating_sub(2),
};
if inner_card.height == 0 {
continue;
}
let gauge_rect = Rect {
x: inner_card.x,
y: inner_card.y + inner_card.height / 2,
width: inner_card.width,
height: 1,
};
let g = Gauge::default()
.percent(pct)
.gauge_style(Style::default().fg(color));
f.render_widget(g, gauge_rect);
}
}

View File

@ -1,123 +0,0 @@
use ratatui::{
layout::{Constraint, Direction, Layout, Rect},
style::{Color, Style},
text::Span,
widgets::{Block, Borders, Gauge, Paragraph},
};
use crate::types::Metrics;
fn fmt_bytes(b: u64) -> String {
const KB: f64 = 1024.0;
const MB: f64 = KB * 1024.0;
const GB: f64 = MB * 1024.0;
let fb = b as f64;
if fb >= GB {
format!("{:.1}G", fb / GB)
} else if fb >= MB {
format!("{:.1}M", fb / MB)
} else if fb >= KB {
format!("{:.1}K", fb / KB)
} else {
format!("{b}B")
}
}
pub fn draw_gpu(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
let mut area = area;
let block = Block::default().borders(Borders::ALL).title("GPU");
f.render_widget(block, area);
// Guard: need some space inside the block
if area.height <= 2 || area.width <= 2 {
return;
}
// Inner padding consistent with the rest of the app
area.y += 1;
area.height = area.height.saturating_sub(2);
area.x += 1;
area.width = area.width.saturating_sub(2);
let Some(metrics) = m else {
return;
};
let Some(gpus) = metrics.gpus.as_ref() else {
f.render_widget(Paragraph::new("No GPUs"), area);
return;
};
if gpus.is_empty() {
f.render_widget(Paragraph::new("No GPUs"), area);
return;
}
// Show 3 rows per GPU: name, util bar, vram bar.
if area.height < 3 {
return;
}
let per_gpu_rows: u16 = 3;
let max_gpus = (area.height / per_gpu_rows) as usize;
let count = gpus.len().min(max_gpus);
let constraints = vec![Constraint::Length(1); count * per_gpu_rows as usize];
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints(constraints)
.split(area);
// Per bar horizontal layout: [gauge] [value]
let split_bar = |r: Rect| {
Layout::default()
.direction(Direction::Horizontal)
.constraints([
Constraint::Min(8), // gauge column
Constraint::Length(24), // value column
])
.split(r)
};
for i in 0..count {
let g = &gpus[i];
// Row 1: GPU name
let name_text = g.name.as_deref().unwrap_or("GPU");
let name_p = Paragraph::new(Span::raw(name_text)).style(Style::default().fg(Color::Gray));
f.render_widget(name_p, rows[i * 3]);
// Row 2: Utilization bar + right label
let util_cols = split_bar(rows[i * 3 + 1]);
let util = g.utilization.unwrap_or(0.0).clamp(0.0, 100.0) as u16;
let util_gauge = Gauge::default()
.gauge_style(Style::default().fg(Color::Green))
.label(Span::raw(""))
.ratio(util as f64 / 100.0);
f.render_widget(util_gauge, util_cols[0]);
f.render_widget(
Paragraph::new(Span::raw(format!("util: {util}%")))
.style(Style::default().fg(Color::Gray)),
util_cols[1],
);
// Row 3: VRAM bar + right label
let mem_cols = split_bar(rows[i * 3 + 2]);
let used = g.mem_used.unwrap_or(0);
let total = g.mem_total.unwrap_or(1);
let mem_ratio = used as f64 / total as f64;
let mem_pct = (mem_ratio * 100.0).round() as u16;
let mem_gauge = Gauge::default()
.gauge_style(Style::default().fg(Color::LightMagenta))
.label(Span::raw(""))
.ratio(mem_ratio);
f.render_widget(mem_gauge, mem_cols[0]);
let used_s = fmt_bytes(used);
let total_s = fmt_bytes(total);
f.render_widget(
Paragraph::new(Span::raw(format!("vram: {used_s}/{total_s} ({mem_pct}%)")))
.style(Style::default().fg(Color::Gray)),
mem_cols[1],
);
}
}

View File

@ -1,55 +0,0 @@
//! Top header with hostname and CPU temperature indicator.
use crate::types::Metrics;
use ratatui::{
layout::Rect,
text::{Line, Span},
widgets::{Block, Borders, Paragraph},
};
use std::time::Duration;
pub fn draw_header(
f: &mut ratatui::Frame<'_>,
area: Rect,
m: Option<&Metrics>,
is_tls: bool,
has_token: bool,
metrics_interval: Duration,
procs_interval: Duration,
) {
let base = if let Some(mm) = m {
format!("socktop — host: {}", mm.hostname)
} else {
"socktop — connecting...".into()
};
// TLS indicator: lock vs lock with cross (using ✗). Keep explicit label for clarity.
let tls_txt = if is_tls { "🔒 TLS" } else { "🔒✗ TLS" };
// Token indicator
let tok_txt = if has_token { "🔑 token" } else { "" };
let mut parts = vec![base, tls_txt.into()];
if !tok_txt.is_empty() {
parts.push(tok_txt.into());
}
parts.push("(a: about, h: help, q: quit)".into());
let title = parts.join(" | ");
// Render the block with left-aligned title
f.render_widget(Block::default().title(title).borders(Borders::BOTTOM), area);
// Render polling intervals on the right side
let mi = metrics_interval.as_millis();
let pi = procs_interval.as_millis();
let intervals = format!("{mi}ms metrics | {pi}ms procs");
let intervals_width = intervals.len() as u16;
if area.width > intervals_width + 2 {
let right_area = Rect {
x: area.x + area.width.saturating_sub(intervals_width + 1),
y: area.y,
width: intervals_width,
height: 1,
};
let intervals_line = Line::from(Span::raw(intervals));
f.render_widget(Paragraph::new(intervals_line), right_area);
}
}

View File

@ -1,29 +0,0 @@
//! Memory gauge.
use crate::types::Metrics;
use crate::ui::util::human;
use ratatui::{
layout::Rect,
style::{Color, Style},
widgets::{Block, Borders, Gauge},
};
pub fn draw_mem(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
let (used, total, pct) = if let Some(mm) = m {
let pct = if mm.mem_total > 0 {
(mm.mem_used as f64 / mm.mem_total as f64 * 100.0) as u16
} else {
0
};
(mm.mem_used, mm.mem_total, pct)
} else {
(0, 0, 0)
};
let g = Gauge::default()
.block(Block::default().borders(Borders::ALL).title("Memory"))
.gauge_style(Style::default().fg(Color::Magenta))
.percent(pct)
.label(format!("{} / {}", human(used), human(total)));
f.render_widget(g, area);
}

View File

@ -1,17 +0,0 @@
//! UI module root: exposes drawing functions for individual panels.
pub mod cpu;
pub mod disks;
pub mod gpu;
pub mod header;
pub mod mem;
pub mod modal;
pub mod modal_connection;
pub mod modal_format;
pub mod modal_process;
pub mod modal_types;
pub mod net;
pub mod processes;
pub mod swap;
pub mod theme;
pub mod util;

View File

@ -1,634 +0,0 @@
//! Modal window system for socktop TUI application
use super::theme::MODAL_DIM_BG;
use crossterm::event::KeyCode;
use ratatui::{
Frame,
layout::{Alignment, Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style},
text::Line,
widgets::{Block, Borders, Clear, Paragraph, Wrap},
};
// Re-export types from modal_types
pub use super::modal_types::{
ModalAction, ModalButton, ModalType, ProcessHistoryData, ProcessModalData,
};
#[derive(Debug)]
pub struct ModalManager {
stack: Vec<ModalType>,
pub(super) active_button: ModalButton,
pub thread_scroll_offset: usize,
pub journal_scroll_offset: usize,
pub thread_scroll_max: usize,
pub journal_scroll_max: usize,
pub help_scroll_offset: usize,
}
impl ModalManager {
pub fn new() -> Self {
Self {
stack: Vec::new(),
active_button: ModalButton::Retry,
thread_scroll_offset: 0,
journal_scroll_offset: 0,
thread_scroll_max: 0,
journal_scroll_max: 0,
help_scroll_offset: 0,
}
}
pub fn is_active(&self) -> bool {
!self.stack.is_empty()
}
pub fn current_modal(&self) -> Option<&ModalType> {
self.stack.last()
}
pub fn push_modal(&mut self, modal: ModalType) {
self.stack.push(modal);
self.active_button = match self.stack.last() {
Some(ModalType::ConnectionError { .. }) => ModalButton::Retry,
Some(ModalType::ProcessDetails { .. }) => {
// Reset scroll state for new process details
self.thread_scroll_offset = 0;
self.journal_scroll_offset = 0;
self.thread_scroll_max = 0;
self.journal_scroll_max = 0;
ModalButton::Ok
}
Some(ModalType::About) => ModalButton::Ok,
Some(ModalType::Help) => {
// Reset scroll state for help modal
self.help_scroll_offset = 0;
ModalButton::Ok
}
Some(ModalType::Confirmation { .. }) => ModalButton::Confirm,
Some(ModalType::Info { .. }) => ModalButton::Ok,
None => ModalButton::Ok,
};
}
pub fn pop_modal(&mut self) -> Option<ModalType> {
let m = self.stack.pop();
if let Some(next) = self.stack.last() {
self.active_button = match next {
ModalType::ConnectionError { .. } => ModalButton::Retry,
ModalType::ProcessDetails { .. } => ModalButton::Ok,
ModalType::About => ModalButton::Ok,
ModalType::Help => ModalButton::Ok,
ModalType::Confirmation { .. } => ModalButton::Confirm,
ModalType::Info { .. } => ModalButton::Ok,
};
}
m
}
pub fn update_connection_error_countdown(&mut self, new_countdown: Option<u64>) {
if let Some(ModalType::ConnectionError {
auto_retry_countdown,
..
}) = self.stack.last_mut()
{
*auto_retry_countdown = new_countdown;
}
}
pub fn handle_key(&mut self, key: KeyCode) -> ModalAction {
if !self.is_active() {
return ModalAction::None;
}
match key {
KeyCode::Esc => {
self.pop_modal();
ModalAction::Cancel
}
KeyCode::Enter => self.handle_enter(),
KeyCode::Tab | KeyCode::Right => {
self.next_button();
ModalAction::None
}
KeyCode::BackTab | KeyCode::Left => {
self.prev_button();
ModalAction::None
}
KeyCode::Char('r') | KeyCode::Char('R') => {
if matches!(self.stack.last(), Some(ModalType::ConnectionError { .. })) {
ModalAction::RetryConnection
} else {
ModalAction::None
}
}
KeyCode::Char('q') | KeyCode::Char('Q') => {
if matches!(self.stack.last(), Some(ModalType::ConnectionError { .. })) {
ModalAction::ExitApp
} else {
ModalAction::None
}
}
KeyCode::Char('x') | KeyCode::Char('X') => {
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
// Close all ProcessDetails modals at once (handles parent navigation chain)
while matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
self.pop_modal();
}
ModalAction::Dismiss
} else {
ModalAction::None
}
}
KeyCode::Char('j') | KeyCode::Char('J') => {
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
self.thread_scroll_offset = self
.thread_scroll_offset
.saturating_add(1)
.min(self.thread_scroll_max);
ModalAction::Handled
} else {
ModalAction::None
}
}
KeyCode::Char('k') | KeyCode::Char('K') => {
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
self.thread_scroll_offset = self.thread_scroll_offset.saturating_sub(1);
ModalAction::Handled
} else {
ModalAction::None
}
}
KeyCode::Char('d') | KeyCode::Char('D') => {
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
self.thread_scroll_offset = self
.thread_scroll_offset
.saturating_add(10)
.min(self.thread_scroll_max);
ModalAction::Handled
} else {
ModalAction::None
}
}
KeyCode::Char('u') | KeyCode::Char('U') => {
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
self.thread_scroll_offset = self.thread_scroll_offset.saturating_sub(10);
ModalAction::Handled
} else {
ModalAction::None
}
}
KeyCode::Char('[') => {
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
self.journal_scroll_offset = self.journal_scroll_offset.saturating_sub(1);
ModalAction::Handled
} else {
ModalAction::None
}
}
KeyCode::Char(']') => {
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
self.journal_scroll_offset = self
.journal_scroll_offset
.saturating_add(1)
.min(self.journal_scroll_max);
ModalAction::Handled
} else {
ModalAction::None
}
}
KeyCode::Char('p') | KeyCode::Char('P') => {
// Switch to parent process if it exists
if let Some(ModalType::ProcessDetails { pid }) = self.stack.last() {
// We need to get the parent PID from the process details
// For now, return a special action that the app can handle
// The app has access to the process details and can extract parent_pid
ModalAction::SwitchToParentProcess(*pid)
} else {
ModalAction::None
}
}
KeyCode::Up => {
if matches!(self.stack.last(), Some(ModalType::Help)) {
self.help_scroll_offset = self.help_scroll_offset.saturating_sub(1);
ModalAction::Handled
} else {
ModalAction::None
}
}
KeyCode::Down => {
if matches!(self.stack.last(), Some(ModalType::Help)) {
self.help_scroll_offset = self.help_scroll_offset.saturating_add(1);
ModalAction::Handled
} else {
ModalAction::None
}
}
_ => ModalAction::None,
}
}
fn handle_enter(&mut self) -> ModalAction {
match (&self.stack.last(), &self.active_button) {
(Some(ModalType::ConnectionError { .. }), ModalButton::Retry) => {
ModalAction::RetryConnection
}
(Some(ModalType::ConnectionError { .. }), ModalButton::Exit) => ModalAction::ExitApp,
(Some(ModalType::ProcessDetails { .. }), ModalButton::Ok) => {
self.pop_modal();
ModalAction::Dismiss
}
(Some(ModalType::About), ModalButton::Ok) => {
self.pop_modal();
ModalAction::Dismiss
}
(Some(ModalType::Help), ModalButton::Ok) => {
self.pop_modal();
ModalAction::Dismiss
}
(Some(ModalType::Confirmation { .. }), ModalButton::Confirm) => ModalAction::Confirm,
(Some(ModalType::Confirmation { .. }), ModalButton::Cancel) => ModalAction::Cancel,
(Some(ModalType::Info { .. }), ModalButton::Ok) => {
self.pop_modal();
ModalAction::Dismiss
}
_ => ModalAction::None,
}
}
fn next_button(&mut self) {
self.active_button = match (&self.stack.last(), &self.active_button) {
(Some(ModalType::ConnectionError { .. }), ModalButton::Retry) => ModalButton::Exit,
(Some(ModalType::ConnectionError { .. }), ModalButton::Exit) => ModalButton::Retry,
(Some(ModalType::Confirmation { .. }), ModalButton::Confirm) => ModalButton::Cancel,
(Some(ModalType::Confirmation { .. }), ModalButton::Cancel) => ModalButton::Confirm,
_ => self.active_button.clone(),
};
}
fn prev_button(&mut self) {
self.next_button();
}
pub fn render(&mut self, f: &mut Frame, data: ProcessModalData) {
if let Some(m) = self.stack.last().cloned() {
self.render_background_dim(f);
self.render_modal_content(f, &m, data);
}
}
fn render_background_dim(&self, f: &mut Frame) {
let area = f.area();
f.render_widget(Clear, area);
f.render_widget(
Block::default()
.style(Style::default().bg(MODAL_DIM_BG).fg(MODAL_DIM_BG))
.borders(Borders::NONE),
area,
);
}
fn render_modal_content(&mut self, f: &mut Frame, modal: &ModalType, data: ProcessModalData) {
let area = f.area();
// Different sizes for different modal types
let modal_area = match modal {
ModalType::ProcessDetails { .. } => {
// Process details modal uses almost full screen (95% width, 90% height)
self.centered_rect(95, 90, area)
}
ModalType::About => {
// About modal uses medium size
self.centered_rect(90, 90, area)
}
ModalType::Help => {
// Help modal uses medium size
self.centered_rect(70, 80, area)
}
_ => {
// Other modals use smaller size
self.centered_rect(70, 50, area)
}
};
f.render_widget(Clear, modal_area);
match modal {
ModalType::ConnectionError {
message,
disconnected_at,
retry_count,
auto_retry_countdown,
} => self.render_connection_error(
f,
modal_area,
message,
*disconnected_at,
*retry_count,
*auto_retry_countdown,
),
ModalType::ProcessDetails { pid } => {
self.render_process_details(f, modal_area, *pid, data)
}
ModalType::About => self.render_about(f, modal_area),
ModalType::Help => self.render_help(f, modal_area),
ModalType::Confirmation {
title,
message,
confirm_text,
cancel_text,
} => self.render_confirmation(f, modal_area, title, message, confirm_text, cancel_text),
ModalType::Info { title, message } => self.render_info(f, modal_area, title, message),
}
}
fn render_confirmation(
&self,
f: &mut Frame,
area: Rect,
title: &str,
message: &str,
confirm_text: &str,
cancel_text: &str,
) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(1), Constraint::Length(3)])
.split(area);
let block = Block::default()
.title(format!(" {title} "))
.borders(Borders::ALL)
.style(Style::default().bg(Color::Black));
f.render_widget(block, area);
f.render_widget(
Paragraph::new(message)
.style(Style::default().fg(Color::White))
.alignment(Alignment::Center)
.wrap(Wrap { trim: true }),
chunks[0],
);
let buttons = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)])
.split(chunks[1]);
let confirm_style = if self.active_button == ModalButton::Confirm {
Style::default()
.bg(Color::Green)
.fg(Color::Black)
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Color::Green)
};
let cancel_style = if self.active_button == ModalButton::Cancel {
Style::default()
.bg(Color::Red)
.fg(Color::Black)
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Color::Red)
};
f.render_widget(
Paragraph::new(confirm_text)
.style(confirm_style)
.alignment(Alignment::Center),
buttons[0],
);
f.render_widget(
Paragraph::new(cancel_text)
.style(cancel_style)
.alignment(Alignment::Center),
buttons[1],
);
}
fn render_info(&self, f: &mut Frame, area: Rect, title: &str, message: &str) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(1), Constraint::Length(3)])
.split(area);
let block = Block::default()
.title(format!(" {title} "))
.borders(Borders::ALL)
.style(Style::default().bg(Color::Black));
f.render_widget(block, area);
f.render_widget(
Paragraph::new(message)
.style(Style::default().fg(Color::White))
.alignment(Alignment::Center)
.wrap(Wrap { trim: true }),
chunks[0],
);
let ok_style = if self.active_button == ModalButton::Ok {
Style::default()
.bg(Color::Blue)
.fg(Color::White)
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Color::Blue)
};
f.render_widget(
Paragraph::new("[ Enter ] OK")
.style(ok_style)
.alignment(Alignment::Center),
chunks[1],
);
}
fn render_about(&self, f: &mut Frame, area: Rect) {
//get ASCII art from a constant stored in theme.rs
use super::theme::ASCII_ART;
let version = env!("CARGO_PKG_VERSION");
let about_text = format!(
"{}\n\
Version {}\n\
\n\
A terminal first remote monitoring tool\n\
\n\
Website: https://socktop.io\n\
GitHub: https://github.com/jasonwitty/socktop\n\
\n\
License: MIT License\n\
\n\
Created by Jason Witty\n\
jasonpwitty+socktop@proton.me",
ASCII_ART, version
);
// Render the border block
let block = Block::default()
.title(" About socktop ")
.borders(Borders::ALL)
.style(Style::default().bg(Color::Black).fg(Color::DarkGray));
f.render_widget(block, area);
// Calculate inner area manually to avoid any parent styling
let inner_area = Rect {
x: area.x + 1,
y: area.y + 1,
width: area.width.saturating_sub(2),
height: area.height.saturating_sub(2), // Leave room for button at bottom
};
// Render content area with explicit black background
f.render_widget(
Paragraph::new(about_text)
.style(Style::default().fg(Color::Cyan).bg(Color::Black))
.alignment(Alignment::Center)
.wrap(Wrap { trim: false }),
inner_area,
);
// Button area
let button_area = Rect {
x: area.x + 1,
y: area.y + area.height.saturating_sub(2),
width: area.width.saturating_sub(2),
height: 1,
};
let ok_style = if self.active_button == ModalButton::Ok {
Style::default()
.bg(Color::Blue)
.fg(Color::White)
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Color::Blue).bg(Color::Black)
};
f.render_widget(
Paragraph::new("[ Enter ] Close")
.style(ok_style)
.alignment(Alignment::Center),
button_area,
);
}
fn render_help(&self, f: &mut Frame, area: Rect) {
let help_lines = vec![
"GLOBAL",
" q/Q/Esc ........ Quit │ a/A ....... About │ h/H ....... Help",
"",
"PROCESS LIST",
" / .............. Start/edit fuzzy search",
" c/C ............ Clear search filter",
" ↑/↓ ............ Select/navigate processes",
" Enter .......... Open Process Details",
" x/X ............ Clear selection",
" Click header ... Sort by column (CPU/Mem)",
" Click row ...... Select process",
"",
"SEARCH MODE (after pressing /)",
" Type ........... Enter search query (fuzzy match)",
" ↑/↓ ............ Navigate results while typing",
" Esc ............ Cancel search and clear filter",
" Enter .......... Apply filter and select first result",
"",
"CPU PER-CORE",
" ←/→ ............ Scroll cores │ PgUp/PgDn ... Page up/down",
" Home/End ....... Jump to first/last core",
"",
"PROCESS DETAILS MODAL",
" x/X ............ Close modal (all parent modals)",
" p/P ............ Navigate to parent process",
" j/k ............ Scroll threads ↓/↑ (1 line)",
" d/u ............ Scroll threads ↓/↑ (10 lines)",
" [ / ] .......... Scroll journal ↑/↓",
" Esc/Enter ...... Close modal",
"",
"MODAL NAVIGATION",
" Tab/→ .......... Next button │ Shift+Tab/← ... Previous button",
" Enter .......... Confirm/OK │ Esc ............ Cancel/Close",
];
// Render the border block
let block = Block::default()
.title(" Hotkey Help (use ↑/↓ to scroll) ")
.borders(Borders::ALL)
.style(Style::default().bg(Color::Black).fg(Color::DarkGray));
f.render_widget(block, area);
// Split into content area and button area
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(1), Constraint::Length(1)])
.split(Rect {
x: area.x + 1,
y: area.y + 1,
width: area.width.saturating_sub(2),
height: area.height.saturating_sub(2),
});
let content_area = chunks[0];
let button_area = chunks[1];
// Calculate visible window
let visible_height = content_area.height as usize;
let total_lines = help_lines.len();
let max_scroll = total_lines.saturating_sub(visible_height);
let scroll_offset = self.help_scroll_offset.min(max_scroll);
// Get visible lines
let visible_lines: Vec<Line> = help_lines
.iter()
.skip(scroll_offset)
.take(visible_height)
.map(|s| Line::from(*s))
.collect();
// Render scrollable content
f.render_widget(
Paragraph::new(visible_lines)
.style(Style::default().fg(Color::Cyan).bg(Color::Black))
.alignment(Alignment::Left),
content_area,
);
// Render scrollbar if needed
if total_lines > visible_height {
use ratatui::widgets::{Scrollbar, ScrollbarOrientation, ScrollbarState};
let scrollbar_area = Rect {
x: area.x + area.width.saturating_sub(2),
y: area.y + 1,
width: 1,
height: area.height.saturating_sub(2),
};
let mut scrollbar_state = ScrollbarState::new(max_scroll).position(scroll_offset);
let scrollbar = Scrollbar::new(ScrollbarOrientation::VerticalRight)
.begin_symbol(Some(""))
.end_symbol(Some(""))
.style(Style::default().fg(Color::DarkGray));
f.render_stateful_widget(scrollbar, scrollbar_area, &mut scrollbar_state);
}
// Button area
let ok_style = if self.active_button == ModalButton::Ok {
Style::default()
.bg(Color::Blue)
.fg(Color::White)
.add_modifier(Modifier::BOLD)
} else {
Style::default().fg(Color::Blue).bg(Color::Black)
};
f.render_widget(
Paragraph::new("[ Enter ] Close")
.style(ok_style)
.alignment(Alignment::Center),
button_area,
);
}
fn centered_rect(&self, percent_x: u16, percent_y: u16, r: Rect) -> Rect {
let vert = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Percentage((100 - percent_y) / 2),
Constraint::Percentage(percent_y),
Constraint::Percentage((100 - percent_y) / 2),
])
.split(r);
Layout::default()
.direction(Direction::Horizontal)
.constraints([
Constraint::Percentage((100 - percent_x) / 2),
Constraint::Percentage(percent_x),
Constraint::Percentage((100 - percent_x) / 2),
])
.split(vert[1])[1]
}
}

View File

@ -1,297 +0,0 @@
//! Connection error modal rendering
use std::time::Instant;
use super::modal_format::format_duration;
use super::theme::{
BTN_EXIT_BG_ACTIVE, BTN_EXIT_FG_ACTIVE, BTN_EXIT_FG_INACTIVE, BTN_EXIT_TEXT,
BTN_RETRY_BG_ACTIVE, BTN_RETRY_FG_ACTIVE, BTN_RETRY_FG_INACTIVE, BTN_RETRY_TEXT, ICON_CLUSTER,
ICON_COUNTDOWN_LABEL, ICON_MESSAGE, ICON_OFFLINE_LABEL, ICON_RETRY_LABEL, ICON_WARNING_TITLE,
LARGE_ERROR_ICON, MODAL_AGENT_FG, MODAL_BG, MODAL_BORDER_FG, MODAL_COUNTDOWN_LABEL_FG,
MODAL_FG, MODAL_HINT_FG, MODAL_ICON_PINK, MODAL_OFFLINE_LABEL_FG, MODAL_RETRY_LABEL_FG,
MODAL_TITLE_FG,
};
use ratatui::{
Frame,
layout::{Alignment, Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style},
text::{Line, Span, Text},
widgets::{Block, Borders, Paragraph, Wrap},
};
use super::modal::{ModalButton, ModalManager};
impl ModalManager {
pub(super) fn render_connection_error(
&self,
f: &mut Frame,
area: Rect,
message: &str,
disconnected_at: Instant,
retry_count: u32,
auto_retry_countdown: Option<u64>,
) {
let duration_text = format_duration(disconnected_at.elapsed());
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(3),
Constraint::Min(4),
Constraint::Length(4),
])
.split(area);
let block = Block::default()
.title(ICON_WARNING_TITLE)
.title_style(
Style::default()
.fg(MODAL_TITLE_FG)
.add_modifier(Modifier::BOLD),
)
.borders(Borders::ALL)
.border_style(Style::default().fg(MODAL_BORDER_FG))
.style(Style::default().bg(MODAL_BG).fg(MODAL_FG));
f.render_widget(block, area);
let content_area = chunks[1];
let max_w = content_area.width.saturating_sub(15) as usize;
let clean_message = if message.to_lowercase().contains("hostname verification")
|| message.contains("socktop_connector")
{
"Connection failed - hostname verification disabled".to_string()
} else if message.contains("Failed to fetch metrics:") {
if let Some(p) = message.find(':') {
let ess = message[p + 1..].trim();
if ess.len() > max_w {
format!("{}...", &ess[..max_w.saturating_sub(3)])
} else {
ess.to_string()
}
} else {
"Connection error".to_string()
}
} else if message.starts_with("Retry failed:") {
if let Some(p) = message.find(':') {
let ess = message[p + 1..].trim();
if ess.len() > max_w {
format!("{}...", &ess[..max_w.saturating_sub(3)])
} else {
ess.to_string()
}
} else {
"Retry failed".to_string()
}
} else if message.len() > max_w {
format!("{}...", &message[..max_w.saturating_sub(3)])
} else {
message.to_string()
};
let truncate = |s: &str| {
if s.len() > max_w {
format!("{}...", &s[..max_w.saturating_sub(3)])
} else {
s.to_string()
}
};
let agent_text = truncate("📡 Cannot connect to socktop agent");
let message_text = truncate(&clean_message);
let duration_display = truncate(&duration_text);
let retry_display = truncate(&retry_count.to_string());
let countdown_text = auto_retry_countdown.map(|c| {
if c == 0 {
"Auto retry now...".to_string()
} else {
format!("{c}s")
}
});
// Determine if we have enough space (height + width) to show large centered icon
let icon_max_width = LARGE_ERROR_ICON
.iter()
.map(|l| l.trim().chars().count())
.max()
.unwrap_or(0) as u16;
let large_allowed = content_area.height >= (LARGE_ERROR_ICON.len() as u16 + 8)
&& content_area.width >= icon_max_width + 6; // small margin for borders/padding
let mut icon_lines: Vec<Line> = Vec::new();
if large_allowed {
for &raw in LARGE_ERROR_ICON.iter() {
let trimmed = raw.trim();
icon_lines.push(Line::from(
trimmed
.chars()
.map(|ch| {
if ch == '!' {
Span::styled(
ch.to_string(),
Style::default()
.fg(Color::White)
.add_modifier(Modifier::BOLD),
)
} else if ch == '/' || ch == '\\' || ch == '_' {
// keep outline in pink
Span::styled(
ch.to_string(),
Style::default()
.fg(MODAL_ICON_PINK)
.add_modifier(Modifier::BOLD),
)
} else if ch == ' ' {
Span::raw(" ")
} else {
Span::styled(ch.to_string(), Style::default().fg(MODAL_ICON_PINK))
}
})
.collect::<Vec<_>>(),
));
}
icon_lines.push(Line::from("")); // blank spacer line below icon
}
let mut info_lines: Vec<Line> = Vec::new();
if !large_allowed {
info_lines.push(Line::from(vec![Span::styled(
ICON_CLUSTER,
Style::default().fg(MODAL_ICON_PINK),
)]));
info_lines.push(Line::from(""));
}
info_lines.push(Line::from(vec![Span::styled(
&agent_text,
Style::default().fg(MODAL_AGENT_FG),
)]));
info_lines.push(Line::from(""));
info_lines.push(Line::from(vec![
Span::styled(ICON_MESSAGE, Style::default().fg(MODAL_HINT_FG)),
Span::styled(&message_text, Style::default().fg(MODAL_AGENT_FG)),
]));
info_lines.push(Line::from(""));
info_lines.push(Line::from(vec![
Span::styled(
ICON_OFFLINE_LABEL,
Style::default().fg(MODAL_OFFLINE_LABEL_FG),
),
Span::styled(
&duration_display,
Style::default()
.fg(Color::White)
.add_modifier(Modifier::BOLD),
),
]));
info_lines.push(Line::from(vec![
Span::styled(ICON_RETRY_LABEL, Style::default().fg(MODAL_RETRY_LABEL_FG)),
Span::styled(
&retry_display,
Style::default()
.fg(Color::White)
.add_modifier(Modifier::BOLD),
),
]));
if let Some(cd) = &countdown_text {
info_lines.push(Line::from(vec![
Span::styled(
ICON_COUNTDOWN_LABEL,
Style::default().fg(MODAL_COUNTDOWN_LABEL_FG),
),
Span::styled(
cd,
Style::default()
.fg(Color::White)
.add_modifier(Modifier::BOLD),
),
]));
}
let constrained = Rect {
x: content_area.x + 2,
y: content_area.y,
width: content_area.width.saturating_sub(4),
height: content_area.height,
};
if large_allowed {
let split = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Length(icon_lines.len() as u16),
Constraint::Min(0),
])
.split(constrained);
// Center the icon block; each line already trimmed so per-line centering keeps shape
f.render_widget(
Paragraph::new(Text::from(icon_lines))
.alignment(Alignment::Center)
.wrap(Wrap { trim: false }),
split[0],
);
f.render_widget(
Paragraph::new(Text::from(info_lines))
.alignment(Alignment::Center)
.wrap(Wrap { trim: true }),
split[1],
);
} else {
f.render_widget(
Paragraph::new(Text::from(info_lines))
.alignment(Alignment::Center)
.wrap(Wrap { trim: true }),
constrained,
);
}
let button_area = Rect {
x: chunks[2].x,
y: chunks[2].y,
width: chunks[2].width,
height: chunks[2].height.saturating_sub(1),
};
self.render_connection_error_buttons(f, button_area);
}
fn render_connection_error_buttons(&self, f: &mut Frame, area: Rect) {
let button_chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([
Constraint::Percentage(30),
Constraint::Percentage(15),
Constraint::Percentage(10),
Constraint::Percentage(15),
Constraint::Percentage(30),
])
.split(area);
let retry_style = if self.active_button == ModalButton::Retry {
Style::default()
.bg(BTN_RETRY_BG_ACTIVE)
.fg(BTN_RETRY_FG_ACTIVE)
.add_modifier(Modifier::BOLD)
} else {
Style::default()
.fg(BTN_RETRY_FG_INACTIVE)
.add_modifier(Modifier::DIM)
};
let exit_style = if self.active_button == ModalButton::Exit {
Style::default()
.bg(BTN_EXIT_BG_ACTIVE)
.fg(BTN_EXIT_FG_ACTIVE)
.add_modifier(Modifier::BOLD)
} else {
Style::default()
.fg(BTN_EXIT_FG_INACTIVE)
.add_modifier(Modifier::DIM)
};
f.render_widget(
Paragraph::new(Text::from(Line::from(vec![Span::styled(
BTN_RETRY_TEXT,
retry_style,
)])))
.alignment(Alignment::Center),
button_chunks[1],
);
f.render_widget(
Paragraph::new(Text::from(Line::from(vec![Span::styled(
BTN_EXIT_TEXT,
exit_style,
)])))
.alignment(Alignment::Center),
button_chunks[3],
);
}
}

View File

@ -1,112 +0,0 @@
//! Formatting utilities for process details modal
use std::time::Duration;
/// Format uptime in human-readable form
pub fn format_uptime(secs: u64) -> String {
let days = secs / 86400;
let hours = (secs % 86400) / 3600;
let minutes = (secs % 3600) / 60;
let seconds = secs % 60;
if days > 0 {
format!("{days}d {hours}h {minutes}m")
} else if hours > 0 {
format!("{hours}h {minutes}m {seconds}s")
} else if minutes > 0 {
format!("{minutes}m {seconds}s")
} else {
format!("{seconds}s")
}
}
/// Format duration in human-readable form
pub fn format_duration(duration: Duration) -> String {
let total = duration.as_secs();
let h = total / 3600;
let m = (total % 3600) / 60;
let s = total % 60;
if h > 0 {
format!("{h}h {m}m {s}s")
} else if m > 0 {
format!("{m}m {s}s")
} else {
format!("{s}s")
}
}
/// Normalize CPU usage to 0-100% by dividing by thread count
pub fn normalize_cpu_usage(cpu_usage: f32, thread_count: u32) -> f32 {
let threads = thread_count.max(1) as f32;
(cpu_usage / threads).min(100.0)
}
/// Calculate dynamic Y-axis maximum in 10% increments
pub fn calculate_dynamic_y_max(max_value: f64) -> f64 {
((max_value / 10.0).ceil() * 10.0).clamp(10.0, 100.0)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_format_uptime_seconds() {
assert_eq!(format_uptime(45), "45s");
}
#[test]
fn test_format_uptime_minutes() {
assert_eq!(format_uptime(125), "2m 5s");
}
#[test]
fn test_format_uptime_hours() {
assert_eq!(format_uptime(3665), "1h 1m 5s");
}
#[test]
fn test_format_uptime_days() {
assert_eq!(format_uptime(90061), "1d 1h 1m");
}
#[test]
fn test_normalize_cpu_single_thread() {
assert_eq!(normalize_cpu_usage(50.0, 1), 50.0);
}
#[test]
fn test_normalize_cpu_multi_thread() {
assert_eq!(normalize_cpu_usage(400.0, 4), 100.0);
}
#[test]
fn test_normalize_cpu_zero_threads() {
// Should default to 1 thread to avoid division by zero
assert_eq!(normalize_cpu_usage(100.0, 0), 100.0);
}
#[test]
fn test_normalize_cpu_caps_at_100() {
assert_eq!(normalize_cpu_usage(150.0, 1), 100.0);
}
#[test]
fn test_dynamic_y_max_rounds_up() {
assert_eq!(calculate_dynamic_y_max(15.0), 20.0);
assert_eq!(calculate_dynamic_y_max(25.0), 30.0);
assert_eq!(calculate_dynamic_y_max(5.0), 10.0);
}
#[test]
fn test_dynamic_y_max_minimum() {
assert_eq!(calculate_dynamic_y_max(0.0), 10.0);
assert_eq!(calculate_dynamic_y_max(3.0), 10.0);
}
#[test]
fn test_dynamic_y_max_caps_at_100() {
assert_eq!(calculate_dynamic_y_max(95.0), 100.0);
assert_eq!(calculate_dynamic_y_max(100.0), 100.0);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,77 +0,0 @@
//! Type definitions for modal system
use std::time::Instant;
/// History data for process metrics rendering
pub struct ProcessHistoryData<'a> {
pub cpu: &'a std::collections::VecDeque<f32>,
pub mem: &'a std::collections::VecDeque<u64>,
pub io_read: &'a std::collections::VecDeque<u64>,
pub io_write: &'a std::collections::VecDeque<u64>,
}
/// Process data for modal rendering
pub struct ProcessModalData<'a> {
pub details: Option<&'a socktop_connector::ProcessMetricsResponse>,
pub journal: Option<&'a socktop_connector::JournalResponse>,
pub history: ProcessHistoryData<'a>,
pub max_mem_bytes: u64,
pub unsupported: bool,
}
/// Parameters for rendering scatter plot
pub(super) struct ScatterPlotParams<'a> {
pub process: &'a socktop_connector::DetailedProcessInfo,
pub main_user_ms: f64,
pub main_system_ms: f64,
pub max_user: f64,
pub max_system: f64,
}
#[derive(Debug, Clone)]
pub enum ModalType {
ConnectionError {
message: String,
disconnected_at: Instant,
retry_count: u32,
auto_retry_countdown: Option<u64>,
},
ProcessDetails {
pid: u32,
},
About,
Help,
#[allow(dead_code)]
Confirmation {
title: String,
message: String,
confirm_text: String,
cancel_text: String,
},
#[allow(dead_code)]
Info {
title: String,
message: String,
},
}
#[derive(Debug, Clone, PartialEq)]
pub enum ModalAction {
None, // Modal didn't handle the key, pass to main window
Handled, // Modal handled the key, don't pass to main window
RetryConnection,
ExitApp,
Confirm,
Cancel,
Dismiss,
SwitchToParentProcess(u32), // Switch to viewing parent process details
}
#[derive(Debug, Clone, PartialEq)]
pub enum ModalButton {
Retry,
Exit,
Confirm,
Cancel,
Ok,
}

View File

@ -1,30 +0,0 @@
//! Network sparklines (download/upload).
use ratatui::{
layout::Rect,
style::{Color, Style},
widgets::{Block, Borders, Sparkline},
};
use std::collections::VecDeque;
pub fn draw_net_spark(
f: &mut ratatui::Frame<'_>,
area: Rect,
title: &str,
hist: &VecDeque<u64>,
color: Color,
) {
let max_points = area.width.saturating_sub(2) as usize;
let start = hist.len().saturating_sub(max_points);
let data: Vec<u64> = hist.iter().skip(start).cloned().collect();
let spark = Sparkline::default()
.block(
Block::default()
.borders(Borders::ALL)
.title(title.to_string()),
)
.data(&data)
.style(Style::default().fg(color));
f.render_widget(spark, area);
}

View File

@ -1,638 +0,0 @@
//! Top processes table with per-cell coloring, zebra striping, sorting, and a scrollbar.
use crossterm::event::{MouseButton, MouseEvent, MouseEventKind};
use ratatui::style::Modifier;
use ratatui::{
layout::{Constraint, Direction, Layout, Rect},
style::{Color, Style},
text::{Line, Span},
widgets::{Block, Borders, Paragraph, Table},
};
use std::cmp::Ordering;
use crate::types::Metrics;
use crate::ui::cpu::{per_core_clamp, per_core_handle_scrollbar_mouse};
use crate::ui::theme::{
PROCESS_SELECTION_BG, PROCESS_SELECTION_FG, PROCESS_TOOLTIP_BG, PROCESS_TOOLTIP_FG, SB_ARROW,
SB_THUMB, SB_TRACK,
};
use crate::ui::util::human;
/// Simple fuzzy matching: returns true if all characters in needle appear in haystack in order (case-insensitive)
fn fuzzy_match(haystack: &str, needle: &str) -> bool {
if needle.is_empty() {
return true;
}
let haystack_lower = haystack.to_lowercase();
let needle_lower = needle.to_lowercase();
let mut haystack_chars = haystack_lower.chars();
for needle_char in needle_lower.chars() {
if !haystack_chars.any(|c| c == needle_char) {
return false;
}
}
true
}
/// Get filtered and sorted process indices based on search query and sort order
pub fn get_filtered_sorted_indices(
metrics: &Metrics,
search_query: &str,
sort_by: ProcSortBy,
) -> Vec<usize> {
// Filter processes by search query (fuzzy match)
let mut filtered_idxs: Vec<usize> = if search_query.is_empty() {
(0..metrics.top_processes.len()).collect()
} else {
(0..metrics.top_processes.len())
.filter(|&i| fuzzy_match(&metrics.top_processes[i].name, search_query))
.collect()
};
// Sort filtered rows
match sort_by {
ProcSortBy::CpuDesc => filtered_idxs.sort_by(|&a, &b| {
let aa = metrics.top_processes[a].cpu_usage;
let bb = metrics.top_processes[b].cpu_usage;
bb.partial_cmp(&aa).unwrap_or(Ordering::Equal)
}),
ProcSortBy::MemDesc => filtered_idxs.sort_by(|&a, &b| {
let aa = metrics.top_processes[a].mem_bytes;
let bb = metrics.top_processes[b].mem_bytes;
bb.cmp(&aa)
}),
}
filtered_idxs
}
/// Parameters for drawing the top processes table
pub struct ProcessDisplayParams<'a> {
pub metrics: Option<&'a Metrics>,
pub scroll_offset: usize,
pub sort_by: ProcSortBy,
pub selected_process_pid: Option<u32>,
pub selected_process_index: Option<usize>,
pub search_query: &'a str,
pub search_active: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum ProcSortBy {
#[default]
CpuDesc,
MemDesc,
}
// Keep the original header widths here so drawing and hit-testing match.
const COLS: [Constraint; 5] = [
Constraint::Length(8), // PID
Constraint::Percentage(40), // Name
Constraint::Length(8), // CPU %
Constraint::Length(12), // Mem
Constraint::Length(8), // Mem %
];
pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, params: ProcessDisplayParams) {
// Draw outer block and title
let Some(mm) = params.metrics else { return };
let total = mm.process_count.unwrap_or(mm.top_processes.len());
let block = Block::default()
.borders(Borders::ALL)
.title(format!("Top Processes ({total} total)"));
f.render_widget(block, area);
// Inner area (reserve space for search box if active)
let inner = Rect {
x: area.x + 1,
y: area.y + 1,
width: area.width.saturating_sub(2),
height: area.height.saturating_sub(2),
};
// Draw search box if active
let content_start_y = if params.search_active || !params.search_query.is_empty() {
let search_area = Rect {
x: inner.x,
y: inner.y,
width: inner.width,
height: 3, // Height for border + content
};
let search_text = if params.search_active {
format!("Search: {}_", params.search_query)
} else {
format!(
"Filter: {} (press / to edit, c to clear)",
params.search_query
)
};
let search_block = Block::default()
.borders(Borders::ALL)
.border_style(Style::default().fg(Color::Yellow));
let search_paragraph = Paragraph::new(search_text)
.block(search_block)
.style(Style::default().fg(Color::Yellow));
f.render_widget(search_paragraph, search_area);
inner.y + 3
} else {
inner.y
};
// Content area (reserve 2 columns for scrollbar)
let inner = Rect {
x: inner.x,
y: content_start_y,
width: inner.width,
height: inner.height.saturating_sub(content_start_y - (area.y + 1)),
};
if inner.height < 1 || inner.width < 3 {
return;
}
let content = Rect {
x: inner.x,
y: inner.y,
width: inner.width.saturating_sub(2),
height: inner.height,
};
// Get filtered and sorted indices
let idxs = get_filtered_sorted_indices(mm, params.search_query, params.sort_by);
// Scrolling
let total_rows = idxs.len();
let header_rows = 1usize;
let viewport_rows = content.height.saturating_sub(header_rows as u16) as usize;
let max_off = total_rows.saturating_sub(viewport_rows);
let offset = params.scroll_offset.min(max_off);
let show_n = total_rows.saturating_sub(offset).min(viewport_rows);
// Build visible rows
let total_mem_bytes = mm.mem_total.max(1);
let peak_cpu = mm
.top_processes
.iter()
.map(|p| p.cpu_usage)
.fold(0.0_f32, f32::max);
let rows_iter = idxs.iter().skip(offset).take(show_n).map(|&ix| {
let p = &mm.top_processes[ix];
let mem_pct = (p.mem_bytes as f64 / total_mem_bytes as f64) * 100.0;
let cpu_val = p.cpu_usage;
let cpu_fg = match cpu_val {
x if x < 25.0 => Color::Green,
x if x < 60.0 => Color::Yellow,
_ => Color::Red,
};
let mem_fg = match mem_pct {
x if x < 5.0 => Color::Blue,
x if x < 20.0 => Color::Magenta,
_ => Color::Red,
};
let mut emphasis = if (cpu_val - peak_cpu).abs() < f32::EPSILON {
Style::default().add_modifier(Modifier::BOLD)
} else {
Style::default()
};
// Check if this process is selected - prioritize PID matching
let is_selected = if let Some(selected_pid) = params.selected_process_pid {
selected_pid == p.pid
} else if let Some(selected_idx) = params.selected_process_index {
selected_idx == ix // ix is the absolute index in the sorted list
} else {
false
};
// Apply selection highlighting
if is_selected {
emphasis = emphasis
.bg(PROCESS_SELECTION_BG)
.fg(PROCESS_SELECTION_FG)
.add_modifier(Modifier::BOLD);
}
let cpu_str = fmt_cpu_pct(cpu_val);
ratatui::widgets::Row::new(vec![
ratatui::widgets::Cell::from(p.pid.to_string())
.style(Style::default().fg(Color::DarkGray)),
ratatui::widgets::Cell::from(p.name.clone()),
ratatui::widgets::Cell::from(cpu_str).style(Style::default().fg(cpu_fg)),
ratatui::widgets::Cell::from(human(p.mem_bytes)),
ratatui::widgets::Cell::from(format!("{mem_pct:.2}%"))
.style(Style::default().fg(mem_fg)),
])
.style(emphasis)
});
// Header with sort indicator
let cpu_hdr = match params.sort_by {
ProcSortBy::CpuDesc => "CPU % •",
_ => "CPU %",
};
let mem_hdr = match params.sort_by {
ProcSortBy::MemDesc => "Mem •",
_ => "Mem",
};
let header = ratatui::widgets::Row::new(vec!["PID", "Name", cpu_hdr, mem_hdr, "Mem %"]).style(
Style::default()
.fg(Color::Cyan)
.add_modifier(Modifier::BOLD),
);
// Render table inside content area (no borders here; outer block already drawn)
let table = Table::new(rows_iter, COLS.to_vec())
.header(header)
.column_spacing(1);
f.render_widget(table, content);
// Draw tooltip if a process is selected
if let Some(selected_pid) = params.selected_process_pid {
// Find the selected process to get its name
let process_info = if let Some(metrics) = params.metrics {
metrics
.top_processes
.iter()
.find(|p| p.pid == selected_pid)
.map(|p| format!("PID {}{}", p.pid, p.name))
.unwrap_or_else(|| format!("PID {selected_pid}"))
} else {
format!("PID {selected_pid}")
};
let tooltip_text = format!("{process_info} | Enter for details • X to unselect");
let tooltip_width = tooltip_text.len() as u16 + 2; // Add padding
let tooltip_height = 3;
// Position tooltip at bottom-right of the processes area
if area.width > tooltip_width + 2 && area.height > tooltip_height + 1 {
let tooltip_area = Rect {
x: area.x + area.width.saturating_sub(tooltip_width + 1),
y: area.y + area.height.saturating_sub(tooltip_height + 1),
width: tooltip_width,
height: tooltip_height,
};
let tooltip_block = Block::default().borders(Borders::ALL).style(
Style::default()
.bg(PROCESS_TOOLTIP_BG)
.fg(PROCESS_TOOLTIP_FG),
);
let tooltip_paragraph = Paragraph::new(tooltip_text)
.block(tooltip_block)
.wrap(ratatui::widgets::Wrap { trim: true });
f.render_widget(tooltip_paragraph, tooltip_area);
}
}
// Draw scrollbar like CPU pane
let scroll_area = Rect {
x: inner.x + inner.width.saturating_sub(1),
y: inner.y,
width: 1,
height: inner.height,
};
if scroll_area.height >= 3 {
let track = (scroll_area.height - 2) as usize;
let total = total_rows.max(1);
let view = viewport_rows.clamp(1, total);
let max_off = total.saturating_sub(view);
let thumb_len = (track * view).div_ceil(total).max(1).min(track);
let thumb_top = if max_off == 0 {
0
} else {
((track - thumb_len) * offset + max_off / 2) / max_off
};
// Build lines: top arrow, track (with thumb), bottom arrow
let mut lines: Vec<Line> = Vec::with_capacity(scroll_area.height as usize);
lines.push(Line::from(Span::styled("", Style::default().fg(SB_ARROW))));
for i in 0..track {
if i >= thumb_top && i < thumb_top + thumb_len {
lines.push(Line::from(Span::styled("", Style::default().fg(SB_THUMB))));
} else {
lines.push(Line::from(Span::styled("", Style::default().fg(SB_TRACK))));
}
}
lines.push(Line::from(Span::styled("", Style::default().fg(SB_ARROW))));
f.render_widget(Paragraph::new(lines), scroll_area);
}
}
fn fmt_cpu_pct(v: f32) -> String {
format!("{:>5.1}", v.clamp(0.0, 100.0))
}
/// Handle keyboard scrolling (Up/Down/PageUp/PageDown/Home/End)
/// Parameters for process key event handling
pub struct ProcessKeyParams<'a> {
pub selected_process_pid: &'a mut Option<u32>,
pub selected_process_index: &'a mut Option<usize>,
pub key: crossterm::event::KeyEvent,
pub metrics: Option<&'a Metrics>,
pub sort_by: ProcSortBy,
pub search_query: &'a str,
}
/// LEGACY: Use processes_handle_key_with_selection for enhanced functionality
#[allow(dead_code)]
pub fn processes_handle_key(
scroll_offset: &mut usize,
key: crossterm::event::KeyEvent,
page_size: usize,
) {
crate::ui::cpu::per_core_handle_key(scroll_offset, key, page_size);
}
pub fn processes_handle_key_with_selection(params: ProcessKeyParams) -> bool {
use crossterm::event::KeyCode;
match params.key.code {
KeyCode::Up => {
// Navigate through filtered and sorted results
if let Some(m) = params.metrics {
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
if idxs.is_empty() {
// No filtered results, clear selection
*params.selected_process_index = None;
*params.selected_process_pid = None;
} else if params.selected_process_index.is_none()
|| params.selected_process_pid.is_none()
{
// No selection - select the first process in filtered/sorted order
let first_idx = idxs[0];
*params.selected_process_index = Some(first_idx);
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
} else if let Some(current_idx) = *params.selected_process_index {
// Find current position in filtered/sorted list
if let Some(pos) = idxs.iter().position(|&idx| idx == current_idx) {
if pos > 0 {
// Move up in filtered/sorted list
let new_idx = idxs[pos - 1];
*params.selected_process_index = Some(new_idx);
*params.selected_process_pid = Some(m.top_processes[new_idx].pid);
}
} else {
// Current selection not in filtered list, select first result
let first_idx = idxs[0];
*params.selected_process_index = Some(first_idx);
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
}
}
}
true // Handled
}
KeyCode::Down => {
// Navigate through filtered and sorted results
if let Some(m) = params.metrics {
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
if idxs.is_empty() {
// No filtered results, clear selection
*params.selected_process_index = None;
*params.selected_process_pid = None;
} else if params.selected_process_index.is_none()
|| params.selected_process_pid.is_none()
{
// No selection - select the first process in filtered/sorted order
let first_idx = idxs[0];
*params.selected_process_index = Some(first_idx);
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
} else if let Some(current_idx) = *params.selected_process_index {
// Find current position in filtered/sorted list
if let Some(pos) = idxs.iter().position(|&idx| idx == current_idx) {
if pos + 1 < idxs.len() {
// Move down in filtered/sorted list
let new_idx = idxs[pos + 1];
*params.selected_process_index = Some(new_idx);
*params.selected_process_pid = Some(m.top_processes[new_idx].pid);
}
} else {
// Current selection not in filtered list, select first result
let first_idx = idxs[0];
*params.selected_process_index = Some(first_idx);
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
}
}
}
true // Handled
}
KeyCode::Char('x') | KeyCode::Char('X') => {
// Unselect any selected process
if params.selected_process_pid.is_some() || params.selected_process_index.is_some() {
*params.selected_process_pid = None;
*params.selected_process_index = None;
true // Handled
} else {
false // No selection to clear
}
}
KeyCode::Enter => {
// Signal that Enter was pressed with a selection
params.selected_process_pid.is_some() // Return true if we have a selection to handle
}
_ => {
// No other keys handled - let scrollbar handle all navigation
false
}
}
}
/// Handle mouse for content scrolling and scrollbar dragging.
/// Returns Some(new_sort) if the header "CPU %" or "Mem" was clicked.
/// LEGACY: Use processes_handle_mouse_with_selection for enhanced functionality
#[allow(dead_code)]
pub fn processes_handle_mouse(
scroll_offset: &mut usize,
drag: &mut Option<crate::ui::cpu::PerCoreScrollDrag>,
mouse: MouseEvent,
area: Rect,
total_rows: usize,
) -> Option<ProcSortBy> {
// Inner and content areas (match draw_top_processes)
let inner = Rect {
x: area.x + 1,
y: area.y + 1,
width: area.width.saturating_sub(2),
height: area.height.saturating_sub(2),
};
if inner.height == 0 || inner.width <= 2 {
return None;
}
let content = Rect {
x: inner.x,
y: inner.y,
width: inner.width.saturating_sub(2),
height: inner.height,
};
// Scrollbar interactions (click arrows/page/drag)
per_core_handle_scrollbar_mouse(scroll_offset, drag, mouse, area, total_rows);
// Wheel scrolling when inside the content
crate::ui::cpu::per_core_handle_mouse(scroll_offset, mouse, content, content.height as usize);
// Header click to change sort
let header_area = Rect {
x: content.x,
y: content.y,
width: content.width,
height: 1,
};
let inside_header = mouse.row == header_area.y
&& mouse.column >= header_area.x
&& mouse.column < header_area.x + header_area.width;
if inside_header && matches!(mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
// Split header into the same columns
let cols = Layout::default()
.direction(Direction::Horizontal)
.constraints(COLS.to_vec())
.split(header_area);
if mouse.column >= cols[2].x && mouse.column < cols[2].x + cols[2].width {
return Some(ProcSortBy::CpuDesc);
}
if mouse.column >= cols[3].x && mouse.column < cols[3].x + cols[3].width {
return Some(ProcSortBy::MemDesc);
}
}
// Clamp to valid range
per_core_clamp(
scroll_offset,
total_rows,
(content.height.saturating_sub(1)) as usize,
);
None
}
/// Parameters for process mouse event handling
pub struct ProcessMouseParams<'a> {
pub scroll_offset: &'a mut usize,
pub selected_process_pid: &'a mut Option<u32>,
pub selected_process_index: &'a mut Option<usize>,
pub drag: &'a mut Option<crate::ui::cpu::PerCoreScrollDrag>,
pub mouse: MouseEvent,
pub area: Rect,
pub total_rows: usize,
pub metrics: Option<&'a Metrics>,
pub sort_by: ProcSortBy,
pub search_query: &'a str,
}
/// Enhanced mouse handler that also manages process selection
/// Returns Some(new_sort) if the header was clicked, or handles row selection
pub fn processes_handle_mouse_with_selection(params: ProcessMouseParams) -> Option<ProcSortBy> {
// Inner and content areas (match draw_top_processes)
let inner = Rect {
x: params.area.x + 1,
y: params.area.y + 1,
width: params.area.width.saturating_sub(2),
height: params.area.height.saturating_sub(2),
};
if inner.height == 0 || inner.width <= 2 {
return None;
}
// Calculate content area - must match draw_top_processes exactly!
// If search is active or query exists, content starts after search box (3 lines)
let search_active = !params.search_query.is_empty();
let content_start_y = if search_active { inner.y + 3 } else { inner.y };
let content = Rect {
x: inner.x,
y: content_start_y,
width: inner.width.saturating_sub(2),
height: inner
.height
.saturating_sub(if search_active { 3 } else { 0 }),
};
// Scrollbar interactions (click arrows/page/drag)
per_core_handle_scrollbar_mouse(
params.scroll_offset,
params.drag,
params.mouse,
params.area,
params.total_rows,
);
// Wheel scrolling when inside the content
crate::ui::cpu::per_core_handle_mouse(
params.scroll_offset,
params.mouse,
content,
content.height as usize,
);
// Header click to change sort
let header_area = Rect {
x: content.x,
y: content.y,
width: content.width,
height: 1,
};
let inside_header = params.mouse.row == header_area.y
&& params.mouse.column >= header_area.x
&& params.mouse.column < header_area.x + header_area.width;
if inside_header && matches!(params.mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
// Split header into the same columns
let cols = Layout::default()
.direction(Direction::Horizontal)
.constraints(COLS.to_vec())
.split(header_area);
if params.mouse.column >= cols[2].x && params.mouse.column < cols[2].x + cols[2].width {
return Some(ProcSortBy::CpuDesc);
}
if params.mouse.column >= cols[3].x && params.mouse.column < cols[3].x + cols[3].width {
return Some(ProcSortBy::MemDesc);
}
}
// Row click for process selection
let data_start_row = content.y + 1; // Skip header
let data_area_height = content.height.saturating_sub(1); // Exclude header
if matches!(params.mouse.kind, MouseEventKind::Down(MouseButton::Left))
&& params.mouse.row >= data_start_row
&& params.mouse.row < data_start_row + data_area_height
&& params.mouse.column >= content.x
&& params.mouse.column < content.x + content.width
{
let clicked_row = (params.mouse.row - data_start_row) as usize;
// Find the actual process using the same filtering/sorting logic as the drawing code
if let Some(m) = params.metrics {
// Use the same filtered and sorted indices as display
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
// Calculate which process was actually clicked based on filtered/sorted order
let visible_process_position = *params.scroll_offset + clicked_row;
if visible_process_position < idxs.len() {
let actual_process_index = idxs[visible_process_position];
let clicked_process = &m.top_processes[actual_process_index];
*params.selected_process_pid = Some(clicked_process.pid);
*params.selected_process_index = Some(actual_process_index);
}
}
}
// Clamp to valid range
per_core_clamp(
params.scroll_offset,
params.total_rows,
(content.height.saturating_sub(1)) as usize,
);
None
}

View File

@ -1,29 +0,0 @@
//! Swap gauge.
use crate::types::Metrics;
use crate::ui::util::human;
use ratatui::{
layout::Rect,
style::{Color, Style},
widgets::{Block, Borders, Gauge},
};
pub fn draw_swap(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
let (used, total, pct) = if let Some(mm) = m {
let pct = if mm.swap_total > 0 {
(mm.swap_used as f64 / mm.swap_total as f64 * 100.0) as u16
} else {
0
};
(mm.swap_used, mm.swap_total, pct)
} else {
(0, 0, 0)
};
let g = Gauge::default()
.block(Block::default().borders(Borders::ALL).title("Swap"))
.gauge_style(Style::default().fg(Color::Yellow))
.percent(pct)
.label(format!("{} / {}", human(used), human(total)));
f.render_widget(g, area);
}

View File

@ -1,88 +0,0 @@
//! Shared UI theme constants.
use ratatui::style::Color;
// Scrollbar colors (same look as before)
pub const SB_ARROW: Color = Color::Rgb(170, 170, 180);
pub const SB_TRACK: Color = Color::Rgb(170, 170, 180);
pub const SB_THUMB: Color = Color::Rgb(170, 170, 180);
// Modal palette
pub const MODAL_DIM_BG: Color = Color::Rgb(15, 15, 25);
pub const MODAL_BG: Color = Color::Rgb(26, 26, 46);
pub const MODAL_FG: Color = Color::Rgb(230, 230, 230);
pub const MODAL_TITLE_FG: Color = Color::Rgb(255, 102, 102); // soft red for title text
pub const MODAL_BORDER_FG: Color = Color::Rgb(204, 51, 51); // darker red border
pub const MODAL_ICON_PINK: Color = Color::Rgb(255, 182, 193); // light pink icons line
pub const MODAL_AGENT_FG: Color = Color::Rgb(220, 220, 255); // pale periwinkle
pub const MODAL_HINT_FG: Color = Color::Rgb(255, 215, 0); // gold for message icon
pub const MODAL_OFFLINE_LABEL_FG: Color = Color::Rgb(135, 206, 235); // sky blue label
pub const MODAL_RETRY_LABEL_FG: Color = Color::Rgb(255, 165, 0); // orange label
pub const MODAL_COUNTDOWN_LABEL_FG: Color = Color::Rgb(255, 192, 203); // pink label for countdown
// Buttons
pub const BTN_RETRY_BG_ACTIVE: Color = Color::Rgb(46, 204, 113); // modern green
pub const BTN_RETRY_FG_ACTIVE: Color = Color::Rgb(26, 26, 46);
pub const BTN_RETRY_FG_INACTIVE: Color = Color::Rgb(46, 204, 113);
pub const BTN_EXIT_BG_ACTIVE: Color = Color::Rgb(255, 255, 255); // modern red
pub const BTN_EXIT_FG_ACTIVE: Color = Color::Rgb(26, 26, 46);
pub const BTN_EXIT_FG_INACTIVE: Color = Color::Rgb(255, 255, 255);
// Process selection colors
pub const PROCESS_SELECTION_BG: Color = Color::Rgb(147, 112, 219); // Medium slate blue (purple)
pub const PROCESS_SELECTION_FG: Color = Color::Rgb(255, 255, 255); // White text for contrast
pub const PROCESS_TOOLTIP_BG: Color = Color::Rgb(147, 112, 219); // Same purple as selection
pub const PROCESS_TOOLTIP_FG: Color = Color::Rgb(255, 255, 255); // White text for contrast
// Process details modal colors (matches main UI aesthetic - no custom colors, terminal defaults)
pub const PROCESS_DETAILS_ACCENT: Color = Color::Rgb(147, 112, 219); // Purple accent for highlights
// Emoji / icon strings (centralized so they can be themed/swapped later)
pub const ICON_WARNING_TITLE: &str = " 🔌 CONNECTION ERROR ";
pub const ICON_CLUSTER: &str = "⚠️";
pub const ICON_MESSAGE: &str = "💭 ";
pub const ICON_OFFLINE_LABEL: &str = "⏱️ Offline for: ";
pub const ICON_RETRY_LABEL: &str = "🔄 Retry attempts: ";
pub const ICON_COUNTDOWN_LABEL: &str = "⏰ Next auto retry: ";
pub const BTN_RETRY_TEXT: &str = " 🔄 Retry ";
pub const BTN_EXIT_TEXT: &str = " ❌ Exit ";
// warning icon
pub const LARGE_ERROR_ICON: &[&str] = &[
" /\\ ",
" / \\ ",
" / !! \\ ",
" / !!!! \\ ",
" / !! \\ ",
" / !!!! \\ ",
" / !! \\ ",
" /______________\\ ",
];
//about logo
pub const ASCII_ART: &str = r#"
"#;

View File

@ -1,51 +0,0 @@
//! Small UI helpers: human-readable sizes, truncation, icons.
pub fn human(b: u64) -> String {
const K: f64 = 1024.0;
let b = b as f64;
if b < K {
return format!("{b:.0}B");
}
let kb = b / K;
if kb < K {
return format!("{kb:.1}KB");
}
let mb = kb / K;
if mb < K {
return format!("{mb:.1}MB");
}
let gb = mb / K;
if gb < K {
return format!("{gb:.1}GB");
}
let tb = gb / K;
format!("{tb:.2}TB")
}
pub fn truncate_middle(s: &str, max: usize) -> String {
if s.len() <= max {
return s.to_string();
}
if max <= 3 {
return "...".into();
}
let keep = max - 3;
let left = keep / 2;
let right = keep - left;
format!("{}...{}", &s[..left], &s[s.len() - right..])
}
pub fn disk_icon(name: &str) -> &'static str {
let n = name.to_ascii_lowercase();
if n.contains(':') {
"🗄️"
} else if n.contains("nvme") {
""
} else if n.starts_with("sd") {
"💽"
} else if n.contains("overlay") {
"📦"
} else {
"🖴"
}
}

View File

View File

@ -1,75 +0,0 @@
//! CLI arg parsing tests for socktop (client)
use std::process::Command;
// We test the parsing by invoking the binary with --help and ensuring the help mentions short and long flags.
// Also directly test the parse_args function via a tiny helper in a doctest-like fashion using a small
// reimplementation here kept in sync with main (compile-time test).
#[test]
fn test_help_mentions_short_and_long_flags() {
let output = Command::new(env!("CARGO_BIN_EXE_socktop"))
.arg("--help")
.output()
.expect("run socktop --help");
let text = format!(
"{}{}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
assert!(
text.contains("--tls-ca")
&& text.contains("-t")
&& text.contains("--profile")
&& text.contains("-P"),
"help text missing expected flags (--tls-ca/-t, --profile/-P)\n{text}"
);
}
#[test]
fn test_tlc_ca_arg_long_and_short_parsed() {
// Use --help combined with flags to avoid network and still exercise arg acceptance
let exe = env!("CARGO_BIN_EXE_socktop");
// Long form with help
let out = Command::new(exe)
.args(["--tls-ca", "/tmp/cert.pem", "--help"])
.output()
.expect("run socktop");
assert!(
out.status.success(),
"socktop --tls-ca … --help did not succeed"
);
let text = format!(
"{}{}",
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
assert!(text.contains("Usage:"));
// Short form with help
let out2 = Command::new(exe)
.args(["-t", "/tmp/cert.pem", "--help"])
.output()
.expect("run socktop");
assert!(out2.status.success(), "socktop -t … --help did not succeed");
let text2 = format!(
"{}{}",
String::from_utf8_lossy(&out2.stdout),
String::from_utf8_lossy(&out2.stderr)
);
assert!(text2.contains("Usage:"));
// Profile flags with help (should not error)
let out3 = Command::new(exe)
.args(["--profile", "dev", "--help"])
.output()
.expect("run socktop");
assert!(
out3.status.success(),
"socktop --profile dev --help did not succeed"
);
let text3 = format!(
"{}{}",
String::from_utf8_lossy(&out3.stdout),
String::from_utf8_lossy(&out3.stderr)
);
assert!(text3.contains("Usage:"));
}

View File

@ -1,46 +0,0 @@
//! Tests for modal formatting and duration helper.
use std::time::Duration;
// Bring the format_duration function into scope by duplicating logic (private in module). If desired,
// this could be moved to a shared util module; for now we re-assert expected behavior.
fn format_duration_ref(duration: Duration) -> String {
let total_secs = duration.as_secs();
let hours = total_secs / 3600;
let minutes = (total_secs % 3600) / 60;
let seconds = total_secs % 60;
if hours > 0 {
format!("{hours}h {minutes}m {seconds}s")
} else if minutes > 0 {
format!("{minutes}m {seconds}s")
} else {
format!("{seconds}s")
}
}
#[test]
fn test_format_duration_boundaries() {
assert_eq!(format_duration_ref(Duration::from_secs(0)), "0s");
assert_eq!(format_duration_ref(Duration::from_secs(59)), "59s");
assert_eq!(format_duration_ref(Duration::from_secs(60)), "1m 0s");
assert_eq!(format_duration_ref(Duration::from_secs(61)), "1m 1s");
assert_eq!(format_duration_ref(Duration::from_secs(3600)), "1h 0m 0s");
assert_eq!(format_duration_ref(Duration::from_secs(3661)), "1h 1m 1s");
}
// Basic test to ensure auto-retry countdown semantics are consistent for initial state.
#[test]
fn test_auto_retry_initial_none() {
// We can't construct App directly without pulling in whole UI; just assert logic mimic.
// For a more thorough test, refactor countdown logic into a pure function.
// This placeholder asserts desired initial semantics: when no disconnect/original time, countdown should be None.
// (When integrated, consider exposing a pure helper returning Option<u64>.)
let modal_active = false; // requirement: must be active for countdown
let disconnected_state = true; // assume disconnected state
let countdown = if disconnected_state && modal_active {
// would compute target
Some(0)
} else {
None
};
assert!(countdown.is_none());
}

View File

@ -1,124 +0,0 @@
//! Tests for profile load/save and resolution logic (non-interactive paths only)
use std::fs;
use std::sync::Mutex;
// Global lock to serialize tests that mutate process-wide environment variables.
static ENV_LOCK: Mutex<()> = Mutex::new(());
#[allow(dead_code)] // touch crate
fn touch() {
let _ = socktop::types::Metrics {
cpu_total: 0.0,
cpu_per_core: vec![],
mem_total: 0,
mem_used: 0,
swap_total: 0,
swap_used: 0,
process_count: None,
hostname: String::new(),
cpu_temp_c: None,
disks: vec![],
networks: vec![],
top_processes: vec![],
gpus: None,
};
}
// We re-import internal modules by copying minimal logic here because profiles.rs isn't public.
// Instead of exposing internals, we simulate profile saving through CLI invocations.
use std::process::Command;
fn run_socktop(args: &[&str]) -> (bool, String) {
let exe = env!("CARGO_BIN_EXE_socktop");
let output = Command::new(exe).args(args).output().expect("run socktop");
let ok = output.status.success();
let text = format!(
"{}{}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
(ok, text)
}
fn config_dir() -> std::path::PathBuf {
if let Some(xdg) = std::env::var_os("XDG_CONFIG_HOME") {
std::path::PathBuf::from(xdg).join("socktop")
} else {
dirs_next::config_dir()
.unwrap_or_else(|| std::path::PathBuf::from("."))
.join("socktop")
}
}
fn profiles_path() -> std::path::PathBuf {
config_dir().join("profiles.json")
}
#[test]
fn test_profile_created_on_first_use() {
let _guard = ENV_LOCK.lock().unwrap();
// Isolate config in a temp dir
let td = tempfile::tempdir().unwrap();
unsafe {
std::env::set_var("XDG_CONFIG_HOME", td.path());
}
// Ensure directory exists fresh
std::fs::create_dir_all(td.path().join("socktop")).unwrap();
let _ = fs::remove_file(profiles_path());
// Provide profile + url => should create profiles.json
let (_ok, _out) = run_socktop(&["--profile", "unittest", "ws://example:1/ws", "--dry-run"]);
// We pass --help to exit early after parsing (no network attempt)
let data = fs::read_to_string(profiles_path()).expect("profiles.json created");
assert!(
data.contains("unittest"),
"profiles.json missing profile entry: {data}"
);
}
#[test]
fn test_profile_overwrite_only_when_changed() {
let _guard = ENV_LOCK.lock().unwrap();
let td = tempfile::tempdir().unwrap();
unsafe {
std::env::set_var("XDG_CONFIG_HOME", td.path());
}
std::fs::create_dir_all(td.path().join("socktop")).unwrap();
let _ = fs::remove_file(profiles_path());
// Initial create
let (_ok, _out) = run_socktop(&["--profile", "prod", "ws://one/ws", "--dry-run"]); // create
let first = fs::read_to_string(profiles_path()).unwrap();
// Re-run identical (should not duplicate or corrupt)
let (_ok2, _out2) = run_socktop(&["--profile", "prod", "ws://one/ws", "--dry-run"]); // identical
let second = fs::read_to_string(profiles_path()).unwrap();
assert_eq!(
first, second,
"Profile file changed despite identical input"
);
// Overwrite with different URL using --save (no prompt path)
let (_ok3, _out3) = run_socktop(&["--profile", "prod", "--save", "ws://two/ws", "--dry-run"]);
let third = fs::read_to_string(profiles_path()).unwrap();
assert!(third.contains("two"), "Updated URL not written: {third}");
}
#[test]
fn test_profile_tls_ca_persisted() {
let _guard = ENV_LOCK.lock().unwrap();
let td = tempfile::tempdir().unwrap();
unsafe {
std::env::set_var("XDG_CONFIG_HOME", td.path());
}
std::fs::create_dir_all(td.path().join("socktop")).unwrap();
let _ = fs::remove_file(profiles_path());
let (_ok, _out) = run_socktop(&[
"--profile",
"secureX",
"--tls-ca",
"/tmp/cert.pem",
"wss://host/ws",
"--dry-run",
]);
let data = fs::read_to_string(profiles_path()).unwrap();
assert!(data.contains("secureX"));
assert!(data.contains("cert.pem"));
}

View File

@ -1,47 +0,0 @@
[package]
name = "socktop_agent"
version = "1.50.2"
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
description = "Socktop agent daemon. Serves host metrics over WebSocket."
edition = "2024"
license = "MIT"
readme = "README.md"
[dependencies]
# Tokio: Use minimal features instead of "full" to reduce binary size
# Only include: rt-multi-thread (async runtime), net (WebSocket), sync (Mutex/RwLock), macros (#[tokio::test])
# Excluded: io, fs, process, signal, time (not needed for this workload)
# Savings: ~200-300KB binary size, faster compile times
tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros"] }
axum = { version = "0.7", features = ["ws", "macros"] }
sysinfo = { version = "0.37", features = ["network", "disk", "component"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
futures-util = "0.3.31"
tracing = { version = "0.1", optional = true }
tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = true }
gfxinfo = "0.1.2"
once_cell = "1.19"
axum-server = { version = "0.7", features = ["tls-rustls"] }
rustls = { version = "0.23", features = ["aws-lc-rs"] }
rustls-pemfile = "2.1"
rcgen = "0.13"
anyhow = "1"
hostname = "0.3"
prost = { workspace = true }
time = { version = "0.3", default-features = false, features = ["formatting", "macros", "parsing" ] }
[features]
default = []
logging = ["tracing", "tracing-subscriber"]
[build-dependencies]
prost-build = "0.13"
tonic-build = { version = "0.12", default-features = false, optional = true }
protoc-bin-vendored = "3"
[dev-dependencies]
assert_cmd = "2.0"
tempfile = "3.10"
tokio-tungstenite = "0.21"

View File

@ -1,396 +0,0 @@
# socktop_agent (server)
Lightweight ondemand metrics WebSocket server for the socktop TUI.
Highlights:
- Collects system metrics only when requested (keeps idle CPU <1%)
- Optional TLS (selfsigned cert autogenerated & pinned by client)
- JSON for fast metrics / disks; protobuf (optionally gzipped) for processes
- Accurate perprocess CPU% on Linux via /proc jiffies delta
- Optional GPU & temperature metrics (disable via env vars)
- Simple token auth (?token=...) support
Run (no TLS):
```
cargo install socktop_agent
socktop_agent --port 3000
```
Enable TLS:
```
SOCKTOP_ENABLE_SSL=1 socktop_agent --port 8443
# cert/key stored under $XDG_DATA_HOME/socktop_agent/tls
```
Environment toggles:
- SOCKTOP_AGENT_GPU=0 (disable GPU collection)
- SOCKTOP_AGENT_TEMP=0 (disable temperature)
- SOCKTOP_TOKEN=secret (require token param from client)
- SOCKTOP_AGENT_METRICS_TTL_MS=250 (cache fast metrics window)
- SOCKTOP_AGENT_PROCESSES_TTL_MS=1000
- SOCKTOP_AGENT_DISKS_TTL_MS=1000
*NOTE ON ENV vars*
Generally these have been added for debugging purposes. you do not need to configure them, default values are tuned and GPU will deisable itself after the first poll if not available.
Systemd unit example & full docs:
https://github.com/jasonwitty/socktop
## WebSocket API Integration Guide
The socktop_agent exposes a WebSocket API that can be directly integrated with your own applications. This allows you to build custom monitoring dashboards or analysis tools using the agent's metrics.
### WebSocket Endpoint
```
ws://HOST:PORT/ws # Without TLS
wss://HOST:PORT/ws # With TLS
```
With authentication token (if configured):
```
ws://HOST:PORT/ws?token=YOUR_TOKEN
wss://HOST:PORT/ws?token=YOUR_TOKEN
```
### Communication Protocol
All communication uses JSON format for requests and responses, except for the process list which uses Protocol Buffers (protobuf) format with optional gzip compression.
#### Request Types
Send a JSON message with a `type` field to request specific metrics:
```json
{"type": "metrics"} // Request fast-changing metrics (CPU, memory, network)
{"type": "disks"} // Request disk information
{"type": "processes"} // Request process list (returns protobuf)
```
#### Response Formats
1. **Fast Metrics** (JSON):
```json
{
"cpu_total": 12.4,
"cpu_per_core": [11.2, 15.7],
"mem_total": 33554432,
"mem_used": 18321408,
"swap_total": 0,
"swap_used": 0,
"hostname": "myserver",
"cpu_temp_c": 42.5,
"networks": [{"name":"eth0","received":12345678,"transmitted":87654321}],
"gpus": [{"name":"nvidia-0","usage":56.7,"memory_total":8589934592,"memory_used":1073741824,"temp_c":65.0}]
}
```
2. **Disks** (JSON):
```json
[
{"name":"nvme0n1p2","total":512000000000,"available":320000000000},
{"name":"sda1","total":1000000000000,"available":750000000000}
]
```
3. **Processes** (Protocol Buffers):
Processes are returned in Protocol Buffers format, optionally gzip-compressed for large process lists. The protobuf schema is:
```protobuf
syntax = "proto3";
message Process {
uint32 pid = 1;
string name = 2;
float cpu_usage = 3;
uint64 mem_bytes = 4;
}
message ProcessList {
uint32 process_count = 1;
repeated Process processes = 2;
}
```
### Example Integration (JavaScript/Node.js)
```javascript
const WebSocket = require('ws');
// Connect to the agent
const ws = new WebSocket('ws://localhost:3000/ws');
ws.on('open', function open() {
console.log('Connected to socktop_agent');
// Request metrics immediately on connection
ws.send(JSON.stringify({type: 'metrics'}));
// Set up regular polling
setInterval(() => {
ws.send(JSON.stringify({type: 'metrics'}));
}, 1000);
// Request processes every 3 seconds
setInterval(() => {
ws.send(JSON.stringify({type: 'processes'}));
}, 3000);
});
ws.on('message', function incoming(data) {
// Check if the response is JSON or binary (protobuf)
try {
const jsonData = JSON.parse(data);
console.log('Received JSON data:', jsonData);
} catch (e) {
console.log('Received binary data (protobuf), length:', data.length);
// Process binary protobuf data with a library like protobufjs
}
});
ws.on('close', function close() {
console.log('Disconnected from socktop_agent');
});
```
### Example Integration (Python)
```python
import json
import asyncio
import websockets
async def monitor_system():
uri = "ws://localhost:3000/ws"
async with websockets.connect(uri) as websocket:
print("Connected to socktop_agent")
# Request initial metrics
await websocket.send(json.dumps({"type": "metrics"}))
# Set up regular polling
while True:
# Request metrics
await websocket.send(json.dumps({"type": "metrics"}))
# Receive and process response
response = await websocket.recv()
# Check if response is JSON or binary (protobuf)
try:
data = json.loads(response)
print(f"CPU: {data['cpu_total']}%, Memory: {data['mem_used']/data['mem_total']*100:.1f}%")
except json.JSONDecodeError:
print(f"Received binary data, length: {len(response)}")
# Process binary protobuf data with a library like protobuf
# Wait before next poll
await asyncio.sleep(1)
asyncio.run(monitor_system())
```
### Notes for Integration
1. **Error Handling**: The WebSocket connection may close unexpectedly; implement reconnection logic in your client.
2. **Rate Limiting**: Avoid excessive polling that could impact the system being monitored. Recommended intervals:
- Metrics: 500ms or slower
- Processes: 2000ms or slower
- Disks: 5000ms or slower
3. **Authentication**: If the agent is configured with a token, always include it in the WebSocket URL.
4. **Protocol Buffers Handling**: For processing the binary process list data, use a Protocol Buffers library for your language and the schema provided in the `proto/processes.proto` file.
5. **Compression**: Process lists may be gzip-compressed. Check if the response starts with the gzip magic bytes (`0x1f, 0x8b`) and decompress if necessary.
## LLM Integration Guide
If you're using an LLM to generate code for integrating with socktop_agent, this section provides structured information to help the model understand the API better.
### API Schema
```yaml
# WebSocket API Schema for socktop_agent
endpoint: ws://HOST:PORT/ws or wss://HOST:PORT/ws (with TLS)
authentication:
type: query parameter
parameter: token
example: ws://HOST:PORT/ws?token=YOUR_TOKEN
requests:
- type: metrics
format: JSON
example: {"type": "metrics"}
description: Fast-changing metrics (CPU, memory, network)
- type: disks
format: JSON
example: {"type": "disks"}
description: Disk information
- type: processes
format: JSON
example: {"type": "processes"}
description: Process list (returns protobuf)
responses:
- request_type: metrics
format: JSON
schema:
cpu_total: float # percentage of total CPU usage
cpu_per_core: [float] # array of per-core CPU usage percentages
mem_total: uint64 # total memory in bytes
mem_used: uint64 # used memory in bytes
swap_total: uint64 # total swap in bytes
swap_used: uint64 # used swap in bytes
hostname: string # system hostname
cpu_temp_c: float? # CPU temperature in Celsius (optional)
networks: [
{
name: string # network interface name
received: uint64 # total bytes received
transmitted: uint64 # total bytes transmitted
}
]
gpus: [
{
name: string # GPU device name
usage: float # GPU usage percentage
memory_total: uint64 # total GPU memory in bytes
memory_used: uint64 # used GPU memory in bytes
temp_c: float # GPU temperature in Celsius
}
]?
- request_type: disks
format: JSON
schema:
[
{
name: string # disk name
total: uint64 # total space in bytes
available: uint64 # available space in bytes
}
]
- request_type: processes
format: Protocol Buffers (optionally gzip-compressed)
schema: See protobuf definition below
```
### Protobuf Schema (processes.proto)
```protobuf
syntax = "proto3";
message Process {
uint32 pid = 1;
string name = 2;
float cpu_usage = 3;
uint64 mem_bytes = 4;
}
message ProcessList {
uint32 process_count = 1;
repeated Process processes = 2;
}
```
### Step-by-Step Integration Pseudocode
```
1. Establish WebSocket connection to ws://HOST:PORT/ws
- Add token if required: ws://HOST:PORT/ws?token=YOUR_TOKEN
2. For regular metrics updates:
- Send: {"type": "metrics"}
- Parse JSON response
- Extract CPU, memory, network info
3. For disk information:
- Send: {"type": "disks"}
- Parse JSON response
- Extract disk usage data
4. For process list:
- Send: {"type": "processes"}
- Check if response is binary
- If starts with 0x1f, 0x8b bytes:
- Decompress using gzip
- Parse binary data using protobuf schema
- Extract process information
5. Implement reconnection logic:
- On connection close/error
- Use exponential backoff
6. Respect rate limits:
- metrics: ≥ 500ms interval
- disks: ≥ 5000ms interval
- processes: ≥ 2000ms interval
```
### Common Implementation Patterns
**Pattern 1: Periodic Polling**
```javascript
// Set up separate timers for different metric types
const metricsInterval = setInterval(() => ws.send(JSON.stringify({type: 'metrics'})), 500);
const disksInterval = setInterval(() => ws.send(JSON.stringify({type: 'disks'})), 5000);
const processesInterval = setInterval(() => ws.send(JSON.stringify({type: 'processes'})), 2000);
// Clean up on disconnect
ws.on('close', () => {
clearInterval(metricsInterval);
clearInterval(disksInterval);
clearInterval(processesInterval);
});
```
**Pattern 2: Processing Binary Protobuf Data**
```javascript
// Using protobufjs
const root = protobuf.loadSync('processes.proto');
const ProcessList = root.lookupType('ProcessList');
ws.on('message', function(data) {
if (typeof data !== 'string') {
// Check for gzip compression
if (data[0] === 0x1f && data[1] === 0x8b) {
data = gunzipSync(data); // Use appropriate decompression library
}
// Decode protobuf
const processes = ProcessList.decode(new Uint8Array(data));
console.log(`Total processes: ${processes.process_count}`);
processes.processes.forEach(p => {
console.log(`PID: ${p.pid}, Name: ${p.name}, CPU: ${p.cpu_usage}%`);
});
}
});
```
**Pattern 3: Reconnection Logic**
```javascript
function connect() {
const ws = new WebSocket('ws://localhost:3000/ws');
ws.on('open', () => {
console.log('Connected');
// Start polling
});
ws.on('close', () => {
console.log('Connection lost, reconnecting...');
setTimeout(connect, 1000); // Reconnect after 1 second
});
// Handle other events...
}
connect();
```

View File

@ -1,14 +0,0 @@
fn main() {
// Vendored protoc for reproducible builds
let protoc = protoc_bin_vendored::protoc_bin_path().expect("protoc");
println!("cargo:rerun-if-changed=proto/processes.proto");
// Compile protobuf definitions for processes
let mut cfg = prost_build::Config::new();
cfg.out_dir(std::env::var("OUT_DIR").unwrap());
cfg.protoc_executable(protoc); // Use the vendored protoc directly
// Use local path (ensures file is inside published crate tarball)
cfg.compile_protos(&["proto/processes.proto"], &["proto"]) // relative to CARGO_MANIFEST_DIR
.expect("compile protos");
}

View File

@ -1,15 +0,0 @@
syntax = "proto3";
package socktop;
// All running processes. Sorting is done client-side.
message Processes {
uint64 process_count = 1; // total processes in the system
repeated Process rows = 2; // all processes
}
message Process {
uint32 pid = 1;
string name = 2;
float cpu_usage = 3; // 0..100
uint64 mem_bytes = 4; // RSS bytes
}

View File

@ -1,95 +0,0 @@
//! Caching for process metrics and journal entries
use std::collections::HashMap;
use std::time::{Duration, Instant};
use tokio::sync::RwLock;
use crate::types::{ProcessMetricsResponse, JournalResponse};
#[derive(Debug, Clone)]
struct CacheEntry<T> {
data: T,
cached_at: Instant,
ttl: Duration,
}
impl<T> CacheEntry<T> {
fn is_expired(&self) -> bool {
self.cached_at.elapsed() > self.ttl
}
}
#[derive(Debug)]
pub struct ProcessCache {
process_metrics: RwLock<HashMap<u32, CacheEntry<ProcessMetricsResponse>>>,
journal_entries: RwLock<HashMap<u32, CacheEntry<JournalResponse>>>,
}
impl ProcessCache {
pub fn new() -> Self {
Self {
process_metrics: RwLock::new(HashMap::new()),
journal_entries: RwLock::new(HashMap::new()),
}
}
/// Get cached process metrics if available and not expired (250ms TTL)
pub async fn get_process_metrics(&self, pid: u32) -> Option<ProcessMetricsResponse> {
let cache = self.process_metrics.read().await;
if let Some(entry) = cache.get(&pid) {
if !entry.is_expired() {
return Some(entry.data.clone());
}
}
None
}
/// Cache process metrics with 250ms TTL
pub async fn set_process_metrics(&self, pid: u32, data: ProcessMetricsResponse) {
let mut cache = self.process_metrics.write().await;
cache.insert(pid, CacheEntry {
data,
cached_at: Instant::now(),
ttl: Duration::from_millis(250),
});
}
/// Get cached journal entries if available and not expired (1s TTL)
pub async fn get_journal_entries(&self, pid: u32) -> Option<JournalResponse> {
let cache = self.journal_entries.read().await;
if let Some(entry) = cache.get(&pid) {
if !entry.is_expired() {
return Some(entry.data.clone());
}
}
None
}
/// Cache journal entries with 1s TTL
pub async fn set_journal_entries(&self, pid: u32, data: JournalResponse) {
let mut cache = self.journal_entries.write().await;
cache.insert(pid, CacheEntry {
data,
cached_at: Instant::now(),
ttl: Duration::from_secs(1),
});
}
/// Clean up expired entries periodically
pub async fn cleanup_expired(&self) {
{
let mut cache = self.process_metrics.write().await;
cache.retain(|_, entry| !entry.is_expired());
}
{
let mut cache = self.journal_entries.write().await;
cache.retain(|_, entry| !entry.is_expired());
}
}
}
impl Default for ProcessCache {
fn default() -> Self {
Self::new()
}
}

View File

@ -1,24 +0,0 @@
// gpu.rs
use gfxinfo::active_gpu;
#[derive(Debug, Clone, serde::Serialize)]
pub struct GpuMetrics {
pub name: String,
pub utilization_gpu_pct: u32, // 0..100
pub mem_used_bytes: u64,
pub mem_total_bytes: u64,
}
pub fn collect_all_gpus() -> Result<Vec<GpuMetrics>, Box<dyn std::error::Error>> {
let gpu = active_gpu()?; // Use ? to unwrap Result
let info = gpu.info();
let metrics = GpuMetrics {
name: gpu.model().to_string(),
utilization_gpu_pct: info.load_pct() as u32,
mem_used_bytes: info.used_vram(),
mem_total_bytes: info.total_vram(),
};
Ok(vec![metrics])
}

View File

@ -1,17 +0,0 @@
//! Library interface for socktop_agent functionality
//! This allows testing of agent functions.
pub mod gpu;
pub mod metrics;
pub mod proto;
pub mod state;
pub mod tls;
pub mod types;
pub mod ws;
// Re-export commonly used types and functions for testing
pub use metrics::{collect_journal_entries, collect_process_metrics};
pub use state::{AppState, CacheEntry};
pub use types::{
DetailedProcessInfo, JournalEntry, JournalResponse, LogLevel, ProcessMetricsResponse,
};

View File

@ -1,134 +0,0 @@
//! socktop agent entrypoint: sets up sysinfo handles and serves a WebSocket endpoint at /ws.
mod gpu;
mod metrics;
mod proto;
// sampler module removed (metrics now purely request-driven)
mod state;
mod types;
mod ws;
use axum::{Router, http::StatusCode, routing::get};
use std::net::SocketAddr;
use std::str::FromStr;
mod tls;
use state::AppState;
fn arg_flag(name: &str) -> bool {
std::env::args().any(|a| a == name)
}
fn arg_value(name: &str) -> Option<String> {
let mut it = std::env::args();
while let Some(a) = it.next() {
if a == name {
return it.next();
}
}
None
}
fn main() -> anyhow::Result<()> {
// Install rustls crypto provider before any TLS operations
// This is required when using axum-server's tls-rustls feature
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.ok(); // Ignore error if already installed
#[cfg(feature = "logging")]
tracing_subscriber::fmt::init();
// Configure Tokio runtime with optimized thread pool for reduced overhead.
//
// The agent is primarily I/O-bound (WebSocket, /proc file reads, sysinfo)
// with no CPU-intensive or blocking operations, so a smaller thread pool
// is beneficial:
//
// Benefits:
// - Lower memory footprint (~1-2MB per thread saved)
// - Reduced context switching overhead
// - Fewer idle threads consuming resources
// - Better for resource-constrained systems
//
// Trade-offs:
// - Slightly reduced throughput under very high concurrent connections
// - Could introduce latency if blocking operations are added (don't do this!)
//
// Default: 2 threads (sufficient for typical workloads with 1-10 clients)
// Override: Set SOCKTOP_WORKER_THREADS=4 to use more threads if needed
//
// Note: Default Tokio uses num_cpus threads which is excessive for this workload.
let worker_threads = std::env::var("SOCKTOP_WORKER_THREADS")
.ok()
.and_then(|s| s.parse::<usize>().ok())
.unwrap_or(2)
.clamp(1, 16); // Ensure 1-16 threads
let runtime = tokio::runtime::Builder::new_multi_thread()
.worker_threads(worker_threads)
.thread_name("socktop-agent")
.enable_all()
.build()?;
runtime.block_on(async_main())
}
async fn async_main() -> anyhow::Result<()> {
// Version flag (print and exit). Keep before heavy initialization.
if arg_flag("--version") || arg_flag("-V") {
println!("socktop_agent {}", env!("CARGO_PKG_VERSION"));
return Ok(());
}
let state = AppState::new();
// No background samplers: metrics collected on-demand per websocket request.
// Web app: route /ws to the websocket handler
async fn healthz() -> StatusCode {
println!("/healthz request");
StatusCode::OK
}
let app = Router::new()
.route("/ws", get(ws::ws_handler))
.route("/healthz", get(healthz))
.with_state(state.clone());
let enable_ssl =
arg_flag("--enableSSL") || std::env::var("SOCKTOP_ENABLE_SSL").ok().as_deref() == Some("1");
if enable_ssl {
// Port can be overridden by --port or SOCKTOP_PORT; default to 8443 when SSL
let port = arg_value("--port")
.or_else(|| arg_value("-p"))
.or_else(|| std::env::var("SOCKTOP_PORT").ok())
.and_then(|s| s.parse::<u16>().ok())
.unwrap_or(8443);
let (cert_path, key_path) = tls::ensure_self_signed_cert()?;
let cfg = axum_server::tls_rustls::RustlsConfig::from_pem_file(cert_path, key_path).await?;
let addr = SocketAddr::from_str(&format!("0.0.0.0:{port}"))?;
println!("socktop_agent: TLS enabled. Listening on wss://{addr}/ws");
axum_server::bind_rustls(addr, cfg)
.serve(app.into_make_service())
.await?;
return Ok(());
}
// Non-TLS HTTP/WS path
let port = arg_value("--port")
.or_else(|| arg_value("-p"))
.or_else(|| std::env::var("SOCKTOP_PORT").ok())
.and_then(|s| s.parse::<u16>().ok())
.unwrap_or(3000);
let addr = SocketAddr::from(([0, 0, 0, 0], port));
println!("socktop_agent: Listening on ws://{addr}/ws");
axum_server::bind(addr)
.serve(app.into_make_service())
.await?;
Ok(())
}
// Unit tests for CLI parsing moved to `tests/port_parse.rs`.

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More