socktop_agent: bump version to 1.40.64
This commit is contained in:
parent
7caf2f4bfb
commit
ab3bb33711
2
Cargo.lock
generated
2
Cargo.lock
generated
@ -2187,7 +2187,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "socktop_agent"
|
||||
version = "1.40.63"
|
||||
version = "1.40.64"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert_cmd",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "socktop_agent"
|
||||
version = "1.40.63"
|
||||
version = "1.40.64"
|
||||
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
||||
description = "Remote system monitor over WebSocket, TUI like top"
|
||||
edition = "2021"
|
||||
|
||||
@ -108,8 +108,8 @@ pub async fn collect_fast_metrics(state: &AppState) -> Metrics {
|
||||
{
|
||||
let cache = state.cache_metrics.lock().await;
|
||||
if cache.is_fresh(ttl) {
|
||||
if let Some(c) = cache.take_clone() {
|
||||
return c;
|
||||
if let Some(c) = cache.get() {
|
||||
return c.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -239,8 +239,8 @@ pub async fn collect_disks(state: &AppState) -> Vec<DiskInfo> {
|
||||
{
|
||||
let cache = state.cache_disks.lock().await;
|
||||
if cache.is_fresh(ttl) {
|
||||
if let Some(v) = cache.take_clone() {
|
||||
return v;
|
||||
if let Some(v) = cache.get() {
|
||||
return v.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -308,8 +308,8 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
{
|
||||
let cache = state.cache_processes.lock().await;
|
||||
if cache.is_fresh(ttl) {
|
||||
if let Some(v) = cache.take_clone() {
|
||||
return v;
|
||||
if let Some(c) = cache.get() {
|
||||
return c.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -404,11 +404,24 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
/// Collect all processes (non-Linux): optimized for reduced allocations and selective updates.
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
// Adaptive TTL based on system load
|
||||
let sys_guard = state.sys.lock().await;
|
||||
let load = sys_guard.global_cpu_usage();
|
||||
drop(sys_guard);
|
||||
// Serve from cache if fresh
|
||||
{
|
||||
let cache = state.cache_processes.lock().await;
|
||||
if cache.is_fresh(StdDuration::from_millis(2_000)) {
|
||||
// Use fixed TTL for cache check
|
||||
if let Some(c) = cache.get() {
|
||||
return c.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Single efficient refresh with optimized CPU collection
|
||||
let (total_count, procs) = {
|
||||
let mut sys = state.sys.lock().await;
|
||||
// Get load first for TTL calculation
|
||||
let load = sys.global_cpu_usage();
|
||||
|
||||
// Adaptive TTL based on system load - will be used for next cache cycle
|
||||
let ttl_ms: u64 = if let Ok(v) = std::env::var("SOCKTOP_AGENT_PROCESSES_TTL_MS") {
|
||||
v.parse().unwrap_or(2_000)
|
||||
} else {
|
||||
@ -423,32 +436,21 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
1_000 // High load
|
||||
}
|
||||
};
|
||||
let ttl = StdDuration::from_millis(ttl_ms);
|
||||
|
||||
// Serve from cache if fresh
|
||||
{
|
||||
let cache = state.cache_processes.lock().await;
|
||||
if cache.is_fresh(ttl) {
|
||||
if let Some(v) = cache.take_clone() {
|
||||
return v;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Single efficient refresh with optimized CPU collection
|
||||
let (total_count, procs) = {
|
||||
let mut sys = state.sys.lock().await;
|
||||
let kind = ProcessRefreshKind::nothing().with_memory();
|
||||
|
||||
// Optimize refresh strategy based on system load
|
||||
if load > 5.0 {
|
||||
//if load > 5.0 {
|
||||
|
||||
//JW too complicated. simplify to remove strange behavior
|
||||
|
||||
// For active systems, get accurate CPU metrics
|
||||
sys.refresh_processes_specifics(ProcessesToUpdate::All, false, kind.with_cpu());
|
||||
} else {
|
||||
// For idle systems, just get basic process info
|
||||
sys.refresh_processes_specifics(ProcessesToUpdate::All, false, kind);
|
||||
sys.refresh_cpu_usage();
|
||||
}
|
||||
|
||||
// } else {
|
||||
// // For idle systems, just get basic process info
|
||||
// sys.refresh_processes_specifics(ProcessesToUpdate::All, false, kind);
|
||||
// sys.refresh_cpu_usage();
|
||||
// }
|
||||
|
||||
let total_count = sys.processes().len();
|
||||
let cpu_count = sys.cpus().len() as f32;
|
||||
@ -482,12 +484,14 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
});
|
||||
}
|
||||
|
||||
// Sort by CPU usage
|
||||
proc_cache.reusable_vec.sort_by(|a, b| {
|
||||
b.cpu_usage
|
||||
.partial_cmp(&a.cpu_usage)
|
||||
.unwrap_or(std::cmp::Ordering::Equal)
|
||||
});
|
||||
//JW no need to sort here; client does the sorting
|
||||
|
||||
// // Sort by CPU usage
|
||||
// proc_cache.reusable_vec.sort_by(|a, b| {
|
||||
// b.cpu_usage
|
||||
// .partial_cmp(&a.cpu_usage)
|
||||
// .unwrap_or(std::cmp::Ordering::Equal)
|
||||
// });
|
||||
|
||||
// Clean up old process names cache when it grows too large
|
||||
let cache_cleanup_threshold = std::env::var("SOCKTOP_AGENT_NAME_CACHE_CLEANUP_THRESHOLD")
|
||||
@ -507,8 +511,8 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
);
|
||||
}
|
||||
|
||||
// Get all processes, but keep the original ordering by CPU usage
|
||||
(total_count, proc_cache.reusable_vec.clone())
|
||||
// Get all processes, take ownership of the vec (will be replaced with empty vec)
|
||||
(total_count, std::mem::take(&mut proc_cache.reusable_vec))
|
||||
};
|
||||
|
||||
let payload = ProcessesPayload {
|
||||
|
||||
@ -1,96 +0,0 @@
|
||||
/// Collect all processes (non-Linux): optimized for reduced allocations and selective updates.
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
// Adaptive TTL based on system load
|
||||
let sys_guard = state.sys.lock().await;
|
||||
let load = sys_guard.global_cpu_usage();
|
||||
drop(sys_guard);
|
||||
|
||||
let ttl_ms: u64 = if let Ok(v) = std::env::var("SOCKTOP_AGENT_PROCESSES_TTL_MS") {
|
||||
v.parse().unwrap_or(2_000)
|
||||
} else {
|
||||
// Adaptive TTL: longer when system is idle
|
||||
if load < 10.0 {
|
||||
4_000 // Light load
|
||||
} else if load < 30.0 {
|
||||
2_000 // Medium load
|
||||
} else {
|
||||
1_000 // High load
|
||||
}
|
||||
};
|
||||
let ttl = StdDuration::from_millis(ttl_ms);
|
||||
|
||||
// Serve from cache if fresh
|
||||
{
|
||||
let cache = state.cache_processes.lock().await;
|
||||
if cache.is_fresh(ttl) {
|
||||
if let Some(v) = cache.take_clone() {
|
||||
return v;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Single efficient refresh: only update processes using significant CPU
|
||||
let (total_count, procs) = {
|
||||
let mut sys = state.sys.lock().await;
|
||||
let kind = ProcessRefreshKind::nothing().with_cpu().with_memory();
|
||||
|
||||
// Only refresh processes using >0.1% CPU
|
||||
sys.refresh_processes_specifics(
|
||||
ProcessesToUpdate::new().with_cpu_usage_higher_than(0.1),
|
||||
false,
|
||||
kind
|
||||
);
|
||||
sys.refresh_cpu_usage();
|
||||
|
||||
let total_count = sys.processes().len();
|
||||
|
||||
// Reuse allocations via process cache
|
||||
let mut proc_cache = state.proc_cache.lock().await;
|
||||
proc_cache.reusable_vec.clear();
|
||||
|
||||
// Filter and collect processes with meaningful CPU usage
|
||||
for p in sys.processes().values() {
|
||||
let raw = p.cpu_usage();
|
||||
if raw > 0.1 { // Skip negligible CPU users
|
||||
let pid = p.pid().as_u32();
|
||||
|
||||
// Reuse cached name if available
|
||||
let name = if let Some(cached) = proc_cache.names.get(&pid) {
|
||||
cached.clone()
|
||||
} else {
|
||||
let new_name = p.name().to_string_lossy().into_owned();
|
||||
proc_cache.names.insert(pid, new_name.clone());
|
||||
new_name
|
||||
};
|
||||
|
||||
proc_cache.reusable_vec.push(ProcessInfo {
|
||||
pid,
|
||||
name,
|
||||
cpu_usage: raw.clamp(0.0, 100.0),
|
||||
mem_bytes: p.memory(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up old process names periodically
|
||||
if total_count > proc_cache.names.len() + 100 {
|
||||
proc_cache.names.retain(|pid, _|
|
||||
sys.processes().contains_key(&sysinfo::Pid::from_u32(*pid))
|
||||
);
|
||||
}
|
||||
|
||||
(total_count, proc_cache.reusable_vec.clone())
|
||||
};
|
||||
|
||||
let payload = ProcessesPayload {
|
||||
process_count: total_count,
|
||||
top_processes: procs,
|
||||
};
|
||||
|
||||
{
|
||||
let mut cache = state.cache_processes.lock().await;
|
||||
cache.set(payload.clone());
|
||||
}
|
||||
payload
|
||||
}
|
||||
@ -85,11 +85,8 @@ impl<T> CacheEntry<T> {
|
||||
self.value = Some(v);
|
||||
self.at = Some(Instant::now());
|
||||
}
|
||||
pub fn take_clone(&self) -> Option<T>
|
||||
where
|
||||
T: Clone,
|
||||
{
|
||||
self.value.clone()
|
||||
pub fn get(&self) -> Option<&T> {
|
||||
self.value.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user