clippy code clean up

clippy code clean up
This commit is contained in:
jasonwitty 2025-08-11 20:47:21 -07:00
parent 05276f9eea
commit c3f81eef25
5 changed files with 49 additions and 107 deletions

4
Cargo.lock generated
View File

@ -1555,9 +1555,9 @@ dependencies = [
[[package]] [[package]]
name = "sysinfo" name = "sysinfo"
version = "0.36.1" version = "0.37.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "252800745060e7b9ffb7b2badbd8b31cfa4aa2e61af879d0a3bf2a317c20217d" checksum = "07cec4dc2d2e357ca1e610cfb07de2fa7a10fc3e9fe89f72545f3d244ea87753"
dependencies = [ dependencies = [
"libc", "libc",
"memchr", "memchr",

View File

@ -9,7 +9,7 @@ license = "MIT"
[dependencies] [dependencies]
tokio = { version = "1", features = ["full"] } tokio = { version = "1", features = ["full"] }
axum = { version = "0.7", features = ["ws", "macros"] } axum = { version = "0.7", features = ["ws", "macros"] }
sysinfo = "0.36.1" sysinfo = { version = "0.37", features = ["network", "disk", "component"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1" serde_json = "1"
futures = "0.3" futures = "0.3"

View File

@ -9,8 +9,7 @@ mod ws;
mod gpu; mod gpu;
use axum::{routing::get, Router}; use axum::{routing::get, Router};
use std::{ use std::{ net::SocketAddr, sync::atomic::AtomicUsize, sync::Arc, time::Duration,
collections::HashMap, net::SocketAddr, sync::atomic::AtomicUsize, sync::Arc, time::Duration,
}; };
use sysinfo::{ use sysinfo::{
Components, CpuRefreshKind, Disks, MemoryRefreshKind, Networks, ProcessRefreshKind, Components, CpuRefreshKind, Disks, MemoryRefreshKind, Networks, ProcessRefreshKind,
@ -20,7 +19,7 @@ use tokio::sync::{Mutex, Notify, RwLock};
use tracing_subscriber::EnvFilter; use tracing_subscriber::EnvFilter;
use sampler::spawn_sampler; use sampler::spawn_sampler;
use state::{AppState, SharedTotals}; use state::{AppState};
use ws::ws_handler; use ws::ws_handler;
#[tokio::main] #[tokio::main]
@ -54,10 +53,6 @@ async fn main() {
// Shared state across requests // Shared state across requests
let state = AppState { let state = AppState {
sys: Arc::new(Mutex::new(sys)), sys: Arc::new(Mutex::new(sys)),
nets: Arc::new(Mutex::new(nets)),
net_totals: Arc::new(Mutex::new(HashMap::<String, (u64, u64)>::new())) as SharedTotals,
components: Arc::new(Mutex::new(components)),
disks: Arc::new(Mutex::new(disks)),
last_json: Arc::new(RwLock::new(String::new())), last_json: Arc::new(RwLock::new(String::new())),
// new: adaptive sampling controls // new: adaptive sampling controls
client_count: Arc::new(AtomicUsize::new(0)), client_count: Arc::new(AtomicUsize::new(0)),

View File

@ -1,113 +1,83 @@
//! Metrics collection using sysinfo. Keeps sysinfo handles in AppState to //! Metrics collection using sysinfo for socktop_agent.
//! avoid repeated allocations and allow efficient refreshes.
use crate::gpu::collect_all_gpus; use crate::gpu::collect_all_gpus;
use crate::state::AppState; use crate::state::AppState;
use crate::types::{DiskInfo, Metrics, NetworkInfo, ProcessInfo}; use crate::types::{DiskInfo, Metrics, NetworkInfo, ProcessInfo};
use sysinfo::{Components, Disks, Networks, System};
use sysinfo::{ use tracing::warn;
System, Components,
ProcessRefreshKind, RefreshKind, MemoryRefreshKind, CpuRefreshKind, DiskRefreshKind,
NetworkRefreshKind,
};
use tracing::{warn, error};
pub async fn collect_metrics(state: &AppState) -> Metrics { pub async fn collect_metrics(state: &AppState) -> Metrics {
// Lock sysinfo once; if poisoned, recover inner. let mut sys = state.sys.lock().await;
let mut sys = match state.sys.lock().await {
guard => guard, // Mutex from tokio::sync doesn't poison; this is safe
};
// Refresh pieces (avoid heavy refresh_all if you already call periodically).
// Wrap in catch_unwind in case a crate panics internally.
if let Err(e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { if let Err(e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
// Newer sysinfo (0.36.x) wants explicit refresh kinds. sys.refresh_all();
// Build a minimal RefreshKind instead of refresh_all() to keep it light.
let rk = RefreshKind::new()
.with_cpu(CpuRefreshKind::everything())
.with_memory(MemoryRefreshKind::new())
.with_disks(DiskRefreshKind::everything())
.with_networks(NetworkRefreshKind::everything())
.with_components(); // temps
sys.refresh_specifics(rk);
// Processes: need a separate call with the desired perprocess fields.
let prk = ProcessRefreshKind::new()
.with_cpu()
.with_memory()
.with_disk_usage(); // add/remove as needed
sys.refresh_processes_specifics(prk, |_| true, true);
})) { })) {
warn!("system refresh panicked: {:?}", e); warn!("sysinfo refresh panicked: {e:?}");
} }
// Hostname // Hostname (associated fn on System in 0.37)
let hostname = sys.host_name().unwrap_or_else(|| "unknown".to_string()); let hostname = System::host_name().unwrap_or_else(|| "unknown".to_string());
// CPU total & per-core // CPU usage
let cpu_total = sys.global_cpu_info().cpu_usage(); let cpu_total = sys.global_cpu_usage();
let cpu_per_core: Vec<f32> = sys.cpus().iter().map(|c| c.cpu_usage()).collect(); let cpu_per_core: Vec<f32> = sys.cpus().iter().map(|c| c.cpu_usage()).collect();
// Memory / swap // Memory / swap
let mem_total = sys.total_memory(); let mem_total = sys.total_memory();
let mem_used = mem_total.saturating_sub(sys.available_memory()); let mem_used = mem_total.saturating_sub(sys.available_memory());
let swap_total = sys.total_swap(); let swap_total = sys.total_swap();
let swap_used = sys.used_swap(); let swap_used = sys.used_swap();
// Temperature (first CPU-like component if any) // Temperature (via Components container)
let cpu_temp_c = sys let components = Components::new_with_refreshed_list();
.components() let cpu_temp_c = components.iter().find_map(|c| {
.iter() let l = c.label().to_ascii_lowercase();
.filter(|c| { if l.contains("cpu") || l.contains("package") || l.contains("tctl") || l.contains("tdie") {
let l = c.label().to_ascii_lowercase(); c.temperature()
l.contains("cpu") || l.contains("package") || l.contains("core 0") } else {
}) None
.map(|c| c.temperature() as f32) }
.next(); });
// Disks // Disks (via Disks container)
let disks: Vec<Disk> = sys let disks_list = Disks::new_with_refreshed_list();
.disks() let disks: Vec<DiskInfo> = disks_list
.iter() .iter()
.map(|d| Disk { .map(|d| DiskInfo {
name: d.name().to_string_lossy().into_owned(), name: d.name().to_string_lossy().into_owned(),
total: d.total_space(), total: d.total_space(),
available: d.available_space(), available: d.available_space(),
}) })
.collect(); .collect();
// Networks (cumulative) // Networks (via Networks container) include interface name
let networks: Vec<Network> = sys let nets = Networks::new_with_refreshed_list();
.networks() let networks: Vec<NetworkInfo> = nets
.iter() .iter()
.map(|(_, data)| Network { .map(|(name, data)| NetworkInfo {
received: data.received(), name: name.to_string(),
transmitted: data.transmitted(), received: data.total_received(),
transmitted: data.total_transmitted(),
}) })
.collect(); .collect();
// Processes (top N by cpu) // Processes (top N by CPU)
let mut procs: Vec<ProcessInfo> = sys let mut procs: Vec<ProcessInfo> = sys
.processes() .processes()
.iter() .values()
.map(|(pid, p)| ProcessInfo { .map(|p| ProcessInfo {
pid: pid.as_u32(), pid: p.pid().as_u32(),
name: p.name().to_string(), name: p.name().to_string_lossy().into_owned(),
cpu_usage: p.cpu_usage(), cpu_usage: p.cpu_usage(),
mem_bytes: p.memory(), // adjust if you use virtual_memory() earlier mem_bytes: p.memory(),
}) })
.collect(); .collect();
procs.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal)); procs.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
procs.truncate(30); procs.truncate(30);
// GPU metrics (never panic) // GPU(s)
let gpus = match crate::gpu::collect_all_gpus() { let gpus = match collect_all_gpus() {
Ok(v) if !v.is_empty() => Some(v), Ok(v) if !v.is_empty() => Some(v),
Ok(_) => None, Ok(_) => None,
Err(e) => { Err(e) => {
@ -131,19 +101,4 @@ pub async fn collect_metrics(state: &AppState) -> Metrics {
top_processes: procs, top_processes: procs,
gpus, gpus,
} }
} }
// Pick the hottest CPU-like sensor (labels vary by platform)
pub fn best_cpu_temp(components: &Components) -> Option<f32> {
components
.iter()
.filter(|c| {
let label = c.label().to_lowercase();
label.contains("cpu")
|| label.contains("package")
|| label.contains("tctl")
|| label.contains("tdie")
})
.filter_map(|c| c.temperature())
.max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
}

View File

@ -1,24 +1,16 @@
//! Shared agent state: sysinfo handles and hot JSON cache. //! Shared agent state: sysinfo handles and hot JSON cache.
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::{collections::HashMap, sync::Arc}; use std::sync::Arc;
use sysinfo::{Components, Disks, Networks, System}; use sysinfo::System;
use tokio::sync::{Mutex, Notify, RwLock}; use tokio::sync::{Mutex, Notify, RwLock};
pub type SharedSystem = Arc<Mutex<System>>; pub type SharedSystem = Arc<Mutex<System>>;
pub type SharedNetworks = Arc<Mutex<Networks>>;
pub type SharedTotals = Arc<Mutex<HashMap<String, (u64, u64)>>>;
pub type SharedComponents = Arc<Mutex<Components>>;
pub type SharedDisks = Arc<Mutex<Disks>>;
#[derive(Clone)] #[derive(Clone)]
pub struct AppState { pub struct AppState {
// Persistent sysinfo handles // Persistent sysinfo handles
pub sys: SharedSystem, pub sys: SharedSystem,
pub nets: SharedNetworks,
pub net_totals: SharedTotals, // iface -> (rx_total, tx_total)
pub components: SharedComponents,
pub disks: SharedDisks,
// Last serialized JSON snapshot for fast WS responses // Last serialized JSON snapshot for fast WS responses
pub last_json: Arc<RwLock<String>>, pub last_json: Arc<RwLock<String>>,