clippy code clean up

clippy code clean up
This commit is contained in:
jasonwitty 2025-08-11 20:47:21 -07:00
parent 05276f9eea
commit c3f81eef25
5 changed files with 49 additions and 107 deletions

4
Cargo.lock generated
View File

@ -1555,9 +1555,9 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.36.1"
version = "0.37.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "252800745060e7b9ffb7b2badbd8b31cfa4aa2e61af879d0a3bf2a317c20217d"
checksum = "07cec4dc2d2e357ca1e610cfb07de2fa7a10fc3e9fe89f72545f3d244ea87753"
dependencies = [
"libc",
"memchr",

View File

@ -9,7 +9,7 @@ license = "MIT"
[dependencies]
tokio = { version = "1", features = ["full"] }
axum = { version = "0.7", features = ["ws", "macros"] }
sysinfo = "0.36.1"
sysinfo = { version = "0.37", features = ["network", "disk", "component"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
futures = "0.3"

View File

@ -9,8 +9,7 @@ mod ws;
mod gpu;
use axum::{routing::get, Router};
use std::{
collections::HashMap, net::SocketAddr, sync::atomic::AtomicUsize, sync::Arc, time::Duration,
use std::{ net::SocketAddr, sync::atomic::AtomicUsize, sync::Arc, time::Duration,
};
use sysinfo::{
Components, CpuRefreshKind, Disks, MemoryRefreshKind, Networks, ProcessRefreshKind,
@ -20,7 +19,7 @@ use tokio::sync::{Mutex, Notify, RwLock};
use tracing_subscriber::EnvFilter;
use sampler::spawn_sampler;
use state::{AppState, SharedTotals};
use state::{AppState};
use ws::ws_handler;
#[tokio::main]
@ -54,10 +53,6 @@ async fn main() {
// Shared state across requests
let state = AppState {
sys: Arc::new(Mutex::new(sys)),
nets: Arc::new(Mutex::new(nets)),
net_totals: Arc::new(Mutex::new(HashMap::<String, (u64, u64)>::new())) as SharedTotals,
components: Arc::new(Mutex::new(components)),
disks: Arc::new(Mutex::new(disks)),
last_json: Arc::new(RwLock::new(String::new())),
// new: adaptive sampling controls
client_count: Arc::new(AtomicUsize::new(0)),

View File

@ -1,113 +1,83 @@
//! Metrics collection using sysinfo. Keeps sysinfo handles in AppState to
//! avoid repeated allocations and allow efficient refreshes.
//! Metrics collection using sysinfo for socktop_agent.
use crate::gpu::collect_all_gpus;
use crate::state::AppState;
use crate::types::{DiskInfo, Metrics, NetworkInfo, ProcessInfo};
use sysinfo::{
System, Components,
ProcessRefreshKind, RefreshKind, MemoryRefreshKind, CpuRefreshKind, DiskRefreshKind,
NetworkRefreshKind,
};
use tracing::{warn, error};
use sysinfo::{Components, Disks, Networks, System};
use tracing::warn;
pub async fn collect_metrics(state: &AppState) -> Metrics {
// Lock sysinfo once; if poisoned, recover inner.
let mut sys = match state.sys.lock().await {
guard => guard, // Mutex from tokio::sync doesn't poison; this is safe
};
let mut sys = state.sys.lock().await;
// Refresh pieces (avoid heavy refresh_all if you already call periodically).
// Wrap in catch_unwind in case a crate panics internally.
if let Err(e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
// Newer sysinfo (0.36.x) wants explicit refresh kinds.
// Build a minimal RefreshKind instead of refresh_all() to keep it light.
let rk = RefreshKind::new()
.with_cpu(CpuRefreshKind::everything())
.with_memory(MemoryRefreshKind::new())
.with_disks(DiskRefreshKind::everything())
.with_networks(NetworkRefreshKind::everything())
.with_components(); // temps
sys.refresh_specifics(rk);
// Processes: need a separate call with the desired perprocess fields.
let prk = ProcessRefreshKind::new()
.with_cpu()
.with_memory()
.with_disk_usage(); // add/remove as needed
sys.refresh_processes_specifics(prk, |_| true, true);
sys.refresh_all();
})) {
warn!("system refresh panicked: {:?}", e);
warn!("sysinfo refresh panicked: {e:?}");
}
// Hostname
let hostname = sys.host_name().unwrap_or_else(|| "unknown".to_string());
// Hostname (associated fn on System in 0.37)
let hostname = System::host_name().unwrap_or_else(|| "unknown".to_string());
// CPU total & per-core
let cpu_total = sys.global_cpu_info().cpu_usage();
// CPU usage
let cpu_total = sys.global_cpu_usage();
let cpu_per_core: Vec<f32> = sys.cpus().iter().map(|c| c.cpu_usage()).collect();
// Memory / swap
let mem_total = sys.total_memory();
let mem_used = mem_total.saturating_sub(sys.available_memory());
let mem_used = mem_total.saturating_sub(sys.available_memory());
let swap_total = sys.total_swap();
let swap_used = sys.used_swap();
let swap_used = sys.used_swap();
// Temperature (first CPU-like component if any)
let cpu_temp_c = sys
.components()
.iter()
.filter(|c| {
let l = c.label().to_ascii_lowercase();
l.contains("cpu") || l.contains("package") || l.contains("core 0")
})
.map(|c| c.temperature() as f32)
.next();
// Temperature (via Components container)
let components = Components::new_with_refreshed_list();
let cpu_temp_c = components.iter().find_map(|c| {
let l = c.label().to_ascii_lowercase();
if l.contains("cpu") || l.contains("package") || l.contains("tctl") || l.contains("tdie") {
c.temperature()
} else {
None
}
});
// Disks
let disks: Vec<Disk> = sys
.disks()
// Disks (via Disks container)
let disks_list = Disks::new_with_refreshed_list();
let disks: Vec<DiskInfo> = disks_list
.iter()
.map(|d| Disk {
.map(|d| DiskInfo {
name: d.name().to_string_lossy().into_owned(),
total: d.total_space(),
available: d.available_space(),
})
.collect();
// Networks (cumulative)
let networks: Vec<Network> = sys
.networks()
// Networks (via Networks container) include interface name
let nets = Networks::new_with_refreshed_list();
let networks: Vec<NetworkInfo> = nets
.iter()
.map(|(_, data)| Network {
received: data.received(),
transmitted: data.transmitted(),
.map(|(name, data)| NetworkInfo {
name: name.to_string(),
received: data.total_received(),
transmitted: data.total_transmitted(),
})
.collect();
// Processes (top N by cpu)
// Processes (top N by CPU)
let mut procs: Vec<ProcessInfo> = sys
.processes()
.iter()
.map(|(pid, p)| ProcessInfo {
pid: pid.as_u32(),
name: p.name().to_string(),
.values()
.map(|p| ProcessInfo {
pid: p.pid().as_u32(),
name: p.name().to_string_lossy().into_owned(),
cpu_usage: p.cpu_usage(),
mem_bytes: p.memory(), // adjust if you use virtual_memory() earlier
mem_bytes: p.memory(),
})
.collect();
procs.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
procs.truncate(30);
// GPU metrics (never panic)
let gpus = match crate::gpu::collect_all_gpus() {
// GPU(s)
let gpus = match collect_all_gpus() {
Ok(v) if !v.is_empty() => Some(v),
Ok(_) => None,
Err(e) => {
@ -131,19 +101,4 @@ pub async fn collect_metrics(state: &AppState) -> Metrics {
top_processes: procs,
gpus,
}
}
// Pick the hottest CPU-like sensor (labels vary by platform)
pub fn best_cpu_temp(components: &Components) -> Option<f32> {
components
.iter()
.filter(|c| {
let label = c.label().to_lowercase();
label.contains("cpu")
|| label.contains("package")
|| label.contains("tctl")
|| label.contains("tdie")
})
.filter_map(|c| c.temperature())
.max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
}
}

View File

@ -1,24 +1,16 @@
//! Shared agent state: sysinfo handles and hot JSON cache.
use std::sync::atomic::AtomicUsize;
use std::{collections::HashMap, sync::Arc};
use sysinfo::{Components, Disks, Networks, System};
use std::sync::Arc;
use sysinfo::System;
use tokio::sync::{Mutex, Notify, RwLock};
pub type SharedSystem = Arc<Mutex<System>>;
pub type SharedNetworks = Arc<Mutex<Networks>>;
pub type SharedTotals = Arc<Mutex<HashMap<String, (u64, u64)>>>;
pub type SharedComponents = Arc<Mutex<Components>>;
pub type SharedDisks = Arc<Mutex<Disks>>;
#[derive(Clone)]
pub struct AppState {
// Persistent sysinfo handles
pub sys: SharedSystem,
pub nets: SharedNetworks,
pub net_totals: SharedTotals, // iface -> (rx_total, tx_total)
pub components: SharedComponents,
pub disks: SharedDisks,
// Last serialized JSON snapshot for fast WS responses
pub last_json: Arc<RwLock<String>>,