non linux optimizations for macbook
This commit is contained in:
parent
f37b8d9ff4
commit
8ce00a5dad
2
Cargo.lock
generated
2
Cargo.lock
generated
@ -2187,7 +2187,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "socktop_agent"
|
name = "socktop_agent"
|
||||||
version = "1.40.2"
|
version = "1.40.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"assert_cmd",
|
"assert_cmd",
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "socktop_agent"
|
name = "socktop_agent"
|
||||||
version = "1.40.2"
|
version = "1.40.3"
|
||||||
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
||||||
description = "Remote system monitor over WebSocket, TUI like top"
|
description = "Remote system monitor over WebSocket, TUI like top"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|||||||
@ -17,17 +17,33 @@ use sysinfo::{ProcessRefreshKind, ProcessesToUpdate};
|
|||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
// Optional normalization: divide per-process cpu_usage by logical core count so a fully
|
// Optional normalization: divide per-process cpu_usage by logical core count so a fully
|
||||||
// saturated multi-core process reports near 100% instead of N*100%. Enabled by default on
|
// saturated multi-core process reports near 100% instead of N*100%. Disabled by default on
|
||||||
// non-Linux (macOS/Windows) to counter per-core summing; disable with SOCKTOP_AGENT_NORMALIZE_CPU=0.
|
// non-Linux because Activity Monitor / Task Manager semantics allow per-process >100% (multi-core).
|
||||||
|
// Enable with SOCKTOP_AGENT_NORMALIZE_CPU=1 if you prefer a single-core 0..100% scale.
|
||||||
#[cfg(not(target_os = "linux"))]
|
#[cfg(not(target_os = "linux"))]
|
||||||
fn normalize_cpu_enabled() -> bool {
|
fn normalize_cpu_enabled() -> bool {
|
||||||
static ON: OnceCell<bool> = OnceCell::new();
|
static ON: OnceCell<bool> = OnceCell::new();
|
||||||
*ON.get_or_init(|| {
|
*ON.get_or_init(|| {
|
||||||
std::env::var("SOCKTOP_AGENT_NORMALIZE_CPU")
|
std::env::var("SOCKTOP_AGENT_NORMALIZE_CPU")
|
||||||
.map(|v| v != "0")
|
.map(|v| v != "0")
|
||||||
.unwrap_or(true)
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
// Smoothed scaling factor cache (non-Linux) to prevent jitter when reconciling
|
||||||
|
// summed per-process CPU usage with global CPU usage.
|
||||||
|
#[cfg(not(target_os = "linux"))]
|
||||||
|
static SCALE_SMOOTH: OnceCell<Mutex<Option<f32>>> = OnceCell::new();
|
||||||
|
|
||||||
|
#[cfg(not(target_os = "linux"))]
|
||||||
|
fn smooth_scale_factor(target: f32) -> f32 {
|
||||||
|
let lock = SCALE_SMOOTH.get_or_init(|| Mutex::new(None));
|
||||||
|
let mut guard = lock.lock().unwrap();
|
||||||
|
let new = guard
|
||||||
|
.map(|prev| prev * 0.6 + target * 0.4)
|
||||||
|
.unwrap_or(target);
|
||||||
|
*guard = Some(new);
|
||||||
|
new
|
||||||
|
}
|
||||||
// Runtime toggles (read once)
|
// Runtime toggles (read once)
|
||||||
fn gpu_enabled() -> bool {
|
fn gpu_enabled() -> bool {
|
||||||
static ON: OnceCell<bool> = OnceCell::new();
|
static ON: OnceCell<bool> = OnceCell::new();
|
||||||
@ -313,7 +329,8 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
|||||||
let ttl_ms: u64 = std::env::var("SOCKTOP_AGENT_PROCESSES_TTL_MS")
|
let ttl_ms: u64 = std::env::var("SOCKTOP_AGENT_PROCESSES_TTL_MS")
|
||||||
.ok()
|
.ok()
|
||||||
.and_then(|v| v.parse().ok())
|
.and_then(|v| v.parse().ok())
|
||||||
.unwrap_or(1_000);
|
// Higher default (1500ms) on non-Linux to lower overhead while keeping responsiveness.
|
||||||
|
.unwrap_or(1_500);
|
||||||
let ttl = StdDuration::from_millis(ttl_ms);
|
let ttl = StdDuration::from_millis(ttl_ms);
|
||||||
{
|
{
|
||||||
let cache = state.cache_processes.lock().await;
|
let cache = state.cache_processes.lock().await;
|
||||||
@ -453,16 +470,14 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
|||||||
sys.refresh_cpu_usage();
|
sys.refresh_cpu_usage();
|
||||||
let total_count = sys.processes().len();
|
let total_count = sys.processes().len();
|
||||||
let norm = normalize_cpu_enabled();
|
let norm = normalize_cpu_enabled();
|
||||||
let cores = if norm {
|
let cores = sys.cpus().len().max(1) as f32;
|
||||||
sys.cpus().len().max(1) as f32
|
|
||||||
} else {
|
|
||||||
1.0
|
|
||||||
};
|
|
||||||
let mut list: Vec<ProcessInfo> = sys
|
let mut list: Vec<ProcessInfo> = sys
|
||||||
.processes()
|
.processes()
|
||||||
.values()
|
.values()
|
||||||
.map(|p| {
|
.map(|p| {
|
||||||
let raw = p.cpu_usage();
|
let raw = p.cpu_usage();
|
||||||
|
// If normalization enabled: present 0..100% single-core scale.
|
||||||
|
// Else keep raw (which may exceed 100 on multi-core usage) for familiarity with OS tools.
|
||||||
let cpu = if norm {
|
let cpu = if norm {
|
||||||
(raw / cores).clamp(0.0, 100.0)
|
(raw / cores).clamp(0.0, 100.0)
|
||||||
} else {
|
} else {
|
||||||
@ -476,20 +491,22 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
// Automatic scaling (enabled by default): if sum of per-process CPU exceeds global
|
// Global reconciliation (default ON) only when NOT using core normalization.
|
||||||
// CPU by >5%, scale all process CPU values proportionally so the sum matches global.
|
if !norm
|
||||||
if std::env::var("SOCKTOP_AGENT_SCALE_PROC_CPU")
|
&& std::env::var("SOCKTOP_AGENT_SCALE_PROC_CPU")
|
||||||
.map(|v| v != "0")
|
.map(|v| v != "0")
|
||||||
.unwrap_or(true)
|
.unwrap_or(true)
|
||||||
{
|
{
|
||||||
let sum: f32 = list.iter().map(|p| p.cpu_usage).sum();
|
let sum: f32 = list.iter().map(|p| p.cpu_usage).sum();
|
||||||
let global = sys.global_cpu_usage();
|
let global = sys.global_cpu_usage();
|
||||||
if sum > 0.0 && global > 0.0 {
|
if sum > 0.0 && global > 0.0 {
|
||||||
let scale = global / sum;
|
// target scale so that sum * scale ~= global
|
||||||
if scale < 0.95 {
|
let target_scale = (global / sum).min(1.0);
|
||||||
// only scale if we're at least 5% over
|
// Only scale if we're more than 10% over.
|
||||||
|
if target_scale < 0.9 {
|
||||||
|
let s = smooth_scale_factor(target_scale);
|
||||||
for p in &mut list {
|
for p in &mut list {
|
||||||
p.cpu_usage = (p.cpu_usage * scale).clamp(0.0, 100.0);
|
p.cpu_usage = (p.cpu_usage * s).clamp(0.0, global.max(100.0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user