Fix remaining clippy warnings in socktop_agent
This commit is contained in:
parent
764c25846f
commit
eed04f1d5c
@ -74,13 +74,13 @@ fn cached_temp() -> Option<f32> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn set_temp(v: Option<f32>) {
|
fn set_temp(v: Option<f32>) {
|
||||||
if let Some(lock) = TEMP.get() {
|
if let Some(lock) = TEMP.get()
|
||||||
if let Ok(mut c) = lock.lock() {
|
&& let Ok(mut c) = lock.lock()
|
||||||
|
{
|
||||||
c.v = v;
|
c.v = v;
|
||||||
c.at = Some(Instant::now());
|
c.at = Some(Instant::now());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
fn cached_gpus() -> Option<Vec<crate::gpu::GpuMetrics>> {
|
fn cached_gpus() -> Option<Vec<crate::gpu::GpuMetrics>> {
|
||||||
if !gpu_enabled() {
|
if !gpu_enabled() {
|
||||||
@ -98,13 +98,13 @@ fn cached_gpus() -> Option<Vec<crate::gpu::GpuMetrics>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn set_gpus(v: Option<Vec<crate::gpu::GpuMetrics>>) {
|
fn set_gpus(v: Option<Vec<crate::gpu::GpuMetrics>>) {
|
||||||
if let Some(lock) = GPUC.get() {
|
if let Some(lock) = GPUC.get()
|
||||||
if let Ok(mut c) = lock.lock() {
|
&& let Ok(mut c) = lock.lock()
|
||||||
|
{
|
||||||
c.v = v.clone();
|
c.v = v.clone();
|
||||||
c.at = Some(Instant::now());
|
c.at = Some(Instant::now());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Collect only fast-changing metrics (CPU/mem/net + optional temps/gpus).
|
// Collect only fast-changing metrics (CPU/mem/net + optional temps/gpus).
|
||||||
pub async fn collect_fast_metrics(state: &AppState) -> Metrics {
|
pub async fn collect_fast_metrics(state: &AppState) -> Metrics {
|
||||||
@ -116,12 +116,12 @@ pub async fn collect_fast_metrics(state: &AppState) -> Metrics {
|
|||||||
let ttl = StdDuration::from_millis(ttl_ms);
|
let ttl = StdDuration::from_millis(ttl_ms);
|
||||||
{
|
{
|
||||||
let cache = state.cache_metrics.lock().await;
|
let cache = state.cache_metrics.lock().await;
|
||||||
if cache.is_fresh(ttl) {
|
if cache.is_fresh(ttl)
|
||||||
if let Some(c) = cache.get() {
|
&& let Some(c) = cache.get()
|
||||||
|
{
|
||||||
return c.clone();
|
return c.clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
let mut sys = state.sys.lock().await;
|
let mut sys = state.sys.lock().await;
|
||||||
if let Err(e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
if let Err(e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
||||||
sys.refresh_cpu_usage();
|
sys.refresh_cpu_usage();
|
||||||
@ -278,12 +278,12 @@ pub async fn collect_disks(state: &AppState) -> Vec<DiskInfo> {
|
|||||||
let ttl = StdDuration::from_millis(ttl_ms);
|
let ttl = StdDuration::from_millis(ttl_ms);
|
||||||
{
|
{
|
||||||
let cache = state.cache_disks.lock().await;
|
let cache = state.cache_disks.lock().await;
|
||||||
if cache.is_fresh(ttl) {
|
if cache.is_fresh(ttl)
|
||||||
if let Some(v) = cache.get() {
|
&& let Some(v) = cache.get()
|
||||||
|
{
|
||||||
return v.clone();
|
return v.clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
let mut disks_list = state.disks.lock().await;
|
let mut disks_list = state.disks.lock().await;
|
||||||
disks_list.refresh(false); // don't drop missing disks
|
disks_list.refresh(false); // don't drop missing disks
|
||||||
let disks: Vec<DiskInfo> = disks_list
|
let disks: Vec<DiskInfo> = disks_list
|
||||||
@ -347,12 +347,12 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
|||||||
let ttl = StdDuration::from_millis(ttl_ms);
|
let ttl = StdDuration::from_millis(ttl_ms);
|
||||||
{
|
{
|
||||||
let cache = state.cache_processes.lock().await;
|
let cache = state.cache_processes.lock().await;
|
||||||
if cache.is_fresh(ttl) {
|
if cache.is_fresh(ttl)
|
||||||
if let Some(c) = cache.get() {
|
&& let Some(c) = cache.get()
|
||||||
|
{
|
||||||
return c.clone();
|
return c.clone();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Reuse shared System to avoid reallocation; refresh processes fully.
|
// Reuse shared System to avoid reallocation; refresh processes fully.
|
||||||
let mut sys_guard = state.sys.lock().await;
|
let mut sys_guard = state.sys.lock().await;
|
||||||
let sys = &mut *sys_guard;
|
let sys = &mut *sys_guard;
|
||||||
|
|||||||
@ -40,13 +40,13 @@ pub async fn ws_handler(
|
|||||||
Query(q): Query<HashMap<String, String>>,
|
Query(q): Query<HashMap<String, String>>,
|
||||||
) -> Response {
|
) -> Response {
|
||||||
// optional auth
|
// optional auth
|
||||||
if let Some(expected) = state.auth_token.as_ref() {
|
if let Some(expected) = state.auth_token.as_ref()
|
||||||
if q.get("token") != Some(expected) {
|
&& q.get("token") != Some(expected)
|
||||||
|
{
|
||||||
return ws.on_upgrade(|socket| async move {
|
return ws.on_upgrade(|socket| async move {
|
||||||
let _ = socket.close().await;
|
let _ = socket.close().await;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
|
||||||
ws.on_upgrade(move |socket| handle_socket(socket, state))
|
ws.on_upgrade(move |socket| handle_socket(socket, state))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user