Compare commits
34 Commits
feature/ex
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 3024816525 | |||
| 1d7bc42d59 | |||
| 518ae8c2bf | |||
| 6eb1809309 | |||
| 1c01902a71 | |||
| 9d302ad475 | |||
| 7875f132f7 | |||
| 0d789fb97c | |||
| 5ddaed298b | |||
| 1528568c30 | |||
| 6f238cdf25 | |||
| ffe451edaa | |||
| c9bde52cb1 | |||
| 0603746d7c | |||
| 25632f3427 | |||
| e51cdb0c50 | |||
| 1cb05d404b | |||
| 4196066e57 | |||
| 47e96c7d92 | |||
| bae2ecb79a | |||
| bd0d15a1ae | |||
| 689498c5f4 | |||
| 34e260a612 | |||
| 47eff3a75c | |||
| 0210b49219 | |||
| 70a150152c | |||
| f4b54db399 | |||
| e857cfc665 | |||
| e66008f341 | |||
| a238ce320b | |||
| b635f5d7f4 | |||
| 18b41c1b45 | |||
| b74242e6d9 | |||
| 4e378b882a |
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,3 +1,7 @@
|
||||
/target
|
||||
.vscode/
|
||||
/socktop-wasm-test/target
|
||||
|
||||
# Documentation files from development sessions (context-specific, not for public repo)
|
||||
/OPTIMIZATION_PROCESS_DETAILS.md
|
||||
/THREAD_SUPPORT.md
|
||||
|
||||
995
Cargo.lock
generated
995
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
21
LICENSE
Normal file
21
LICENSE
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Witty One Off
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@ -1,8 +0,0 @@
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
||||
Error: Address already in use (os error 98)
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
||||
Error: Address already in use (os error 98)
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8443/ws
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8443/ws
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "socktop"
|
||||
version = "1.40.0"
|
||||
version = "1.50.0"
|
||||
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
||||
description = "Remote system monitor over WebSocket, TUI like top"
|
||||
edition = "2024"
|
||||
@ -9,7 +9,7 @@ readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
# socktop connector for agent communication
|
||||
socktop_connector = { path = "../socktop_connector" }
|
||||
socktop_connector = "1.50.0"
|
||||
|
||||
tokio = { workspace = true }
|
||||
futures-util = { workspace = true }
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -3,8 +3,9 @@
|
||||
mod app;
|
||||
mod history;
|
||||
mod profiles;
|
||||
mod retry;
|
||||
mod types;
|
||||
mod ui;
|
||||
mod ui; // pure retry timing logic
|
||||
|
||||
use app::App;
|
||||
use profiles::{ProfileEntry, ProfileRequest, ResolveProfile, load_profiles, save_profiles};
|
||||
|
||||
114
socktop/src/retry.rs
Normal file
114
socktop/src/retry.rs
Normal file
@ -0,0 +1,114 @@
|
||||
//! Pure retry timing logic (decoupled from App state / UI) for testability.
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Result of computing retry timing.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct RetryTiming {
|
||||
pub should_retry_now: bool,
|
||||
/// Seconds until next retry (Some(0) means ready now); None means inactive/no countdown.
|
||||
pub seconds_until_retry: Option<u64>,
|
||||
}
|
||||
|
||||
/// Compute retry timing given connection state inputs.
|
||||
///
|
||||
/// Inputs:
|
||||
/// - `disconnected`: true when connection_state == Disconnected.
|
||||
/// - `modal_active`: requires the connection error modal be visible to show countdown / trigger auto retry.
|
||||
/// - `original_disconnect_time`: time we first noticed disconnect.
|
||||
/// - `last_auto_retry`: time we last performed an automatic retry.
|
||||
/// - `now`: current time (injected for determinism / tests).
|
||||
/// - `interval`: retry interval duration.
|
||||
pub(crate) fn compute_retry_timing(
|
||||
disconnected: bool,
|
||||
modal_active: bool,
|
||||
original_disconnect_time: Option<Instant>,
|
||||
last_auto_retry: Option<Instant>,
|
||||
now: Instant,
|
||||
interval: Duration,
|
||||
) -> RetryTiming {
|
||||
if !disconnected || !modal_active {
|
||||
return RetryTiming {
|
||||
should_retry_now: false,
|
||||
seconds_until_retry: None,
|
||||
};
|
||||
}
|
||||
|
||||
let baseline = match last_auto_retry.or(original_disconnect_time) {
|
||||
Some(b) => b,
|
||||
None => {
|
||||
return RetryTiming {
|
||||
should_retry_now: false,
|
||||
seconds_until_retry: None,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
let elapsed = now.saturating_duration_since(baseline);
|
||||
if elapsed >= interval {
|
||||
RetryTiming {
|
||||
should_retry_now: true,
|
||||
seconds_until_retry: Some(0),
|
||||
}
|
||||
} else {
|
||||
let remaining = interval - elapsed;
|
||||
RetryTiming {
|
||||
should_retry_now: false,
|
||||
seconds_until_retry: Some(remaining.as_secs()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn inactive_when_not_disconnected() {
|
||||
let now = Instant::now();
|
||||
let rt = compute_retry_timing(false, true, Some(now), None, now, Duration::from_secs(30));
|
||||
assert!(!rt.should_retry_now);
|
||||
assert_eq!(rt.seconds_until_retry, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn countdown_progress_and_ready() {
|
||||
let base = Instant::now();
|
||||
let rt1 = compute_retry_timing(
|
||||
true,
|
||||
true,
|
||||
Some(base),
|
||||
None,
|
||||
base + Duration::from_secs(10),
|
||||
Duration::from_secs(30),
|
||||
);
|
||||
assert!(!rt1.should_retry_now);
|
||||
assert_eq!(rt1.seconds_until_retry, Some(20));
|
||||
let rt2 = compute_retry_timing(
|
||||
true,
|
||||
true,
|
||||
Some(base),
|
||||
None,
|
||||
base + Duration::from_secs(30),
|
||||
Duration::from_secs(30),
|
||||
);
|
||||
assert!(rt2.should_retry_now);
|
||||
assert_eq!(rt2.seconds_until_retry, Some(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn uses_last_auto_retry_as_baseline() {
|
||||
let base: Instant = Instant::now();
|
||||
let last = base + Duration::from_secs(30); // one prior retry
|
||||
// 10s after last retry => 20s remaining
|
||||
let rt = compute_retry_timing(
|
||||
true,
|
||||
true,
|
||||
Some(base),
|
||||
Some(last),
|
||||
last + Duration::from_secs(10),
|
||||
Duration::from_secs(30),
|
||||
);
|
||||
assert!(!rt.should_retry_now);
|
||||
assert_eq!(rt.seconds_until_retry, Some(20));
|
||||
}
|
||||
}
|
||||
1849
socktop/src/ui/.modal.rs.backup
Normal file
1849
socktop/src/ui/.modal.rs.backup
Normal file
File diff suppressed because it is too large
Load Diff
@ -42,8 +42,8 @@ pub fn per_core_content_area(area: Rect) -> Rect {
|
||||
/// Handles key events for per-core CPU bars.
|
||||
pub fn per_core_handle_key(scroll_offset: &mut usize, key: KeyEvent, page_size: usize) {
|
||||
match key.code {
|
||||
KeyCode::Up => *scroll_offset = scroll_offset.saturating_sub(1),
|
||||
KeyCode::Down => *scroll_offset = scroll_offset.saturating_add(1),
|
||||
KeyCode::Left => *scroll_offset = scroll_offset.saturating_sub(1),
|
||||
KeyCode::Right => *scroll_offset = scroll_offset.saturating_add(1),
|
||||
KeyCode::PageUp => {
|
||||
let step = page_size.max(1);
|
||||
*scroll_offset = scroll_offset.saturating_sub(step);
|
||||
@ -240,20 +240,61 @@ pub fn draw_cpu_avg_graph(
|
||||
hist: &std::collections::VecDeque<u64>,
|
||||
m: Option<&Metrics>,
|
||||
) {
|
||||
// Calculate average CPU over the monitoring period
|
||||
let avg_cpu = if !hist.is_empty() {
|
||||
let sum: u64 = hist.iter().sum();
|
||||
sum as f64 / hist.len() as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let title = if let Some(mm) = m {
|
||||
format!("CPU avg (now: {:>5.1}%)", mm.cpu_total)
|
||||
format!("CPU (now: {:>5.1}% | avg: {:>5.1}%)", mm.cpu_total, avg_cpu)
|
||||
} else {
|
||||
"CPU avg".into()
|
||||
};
|
||||
|
||||
// Build the top-right info (CPU temp and polling intervals)
|
||||
let top_right_info = if let Some(mm) = m {
|
||||
mm.cpu_temp_c
|
||||
.map(|t| {
|
||||
let icon = if t < 50.0 {
|
||||
"😎"
|
||||
} else if t < 85.0 {
|
||||
"⚠️"
|
||||
} else {
|
||||
"🔥"
|
||||
};
|
||||
format!("CPU Temp: {t:.1}°C {icon}")
|
||||
})
|
||||
.unwrap_or_else(|| "CPU Temp: N/A".into())
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
let max_points = area.width.saturating_sub(2) as usize;
|
||||
let start = hist.len().saturating_sub(max_points);
|
||||
let data: Vec<u64> = hist.iter().skip(start).cloned().collect();
|
||||
|
||||
// Render the sparkline with title on left
|
||||
let spark = Sparkline::default()
|
||||
.block(Block::default().borders(Borders::ALL).title(title))
|
||||
.data(&data)
|
||||
.max(100)
|
||||
.style(Style::default().fg(Color::Cyan));
|
||||
f.render_widget(spark, area);
|
||||
|
||||
// Render the top-right info as text overlay in the top-right corner
|
||||
if !top_right_info.is_empty() {
|
||||
let info_area = Rect {
|
||||
x: area.x + area.width.saturating_sub(top_right_info.len() as u16 + 2),
|
||||
y: area.y,
|
||||
width: top_right_info.len() as u16 + 1,
|
||||
height: 1,
|
||||
};
|
||||
let info_line = Line::from(Span::raw(top_right_info));
|
||||
f.render_widget(Paragraph::new(info_line), info_area);
|
||||
}
|
||||
}
|
||||
|
||||
/// Draws the per-core CPU bars with sparklines and trends.
|
||||
|
||||
@ -24,8 +24,16 @@ pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Filter duplicates by keeping first occurrence of each unique name
|
||||
let mut seen_names = std::collections::HashSet::new();
|
||||
let unique_disks: Vec<_> = mm
|
||||
.disks
|
||||
.iter()
|
||||
.filter(|d| seen_names.insert(d.name.clone()))
|
||||
.collect();
|
||||
|
||||
let per_disk_h = 3u16;
|
||||
let max_cards = (inner.height / per_disk_h).min(mm.disks.len() as u16) as usize;
|
||||
let max_cards = (inner.height / per_disk_h).min(unique_disks.len() as u16) as usize;
|
||||
|
||||
let constraints: Vec<Constraint> = (0..max_cards)
|
||||
.map(|_| Constraint::Length(per_disk_h))
|
||||
@ -36,7 +44,7 @@ pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
|
||||
.split(inner);
|
||||
|
||||
for (i, slot) in rows.iter().enumerate() {
|
||||
let d = &mm.disks[i];
|
||||
let d = unique_disks[i];
|
||||
let used = d.total.saturating_sub(d.available);
|
||||
let ratio = if d.total > 0 {
|
||||
used as f64 / d.total as f64
|
||||
@ -53,23 +61,43 @@ pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
|
||||
ratatui::style::Color::Red
|
||||
};
|
||||
|
||||
// Add indentation for partitions
|
||||
let indent = if d.is_partition { "└─" } else { "" };
|
||||
|
||||
// Add temperature if available
|
||||
let temp_str = d
|
||||
.temperature
|
||||
.map(|t| format!(" {}°C", t.round() as i32))
|
||||
.unwrap_or_default();
|
||||
|
||||
let title = format!(
|
||||
"{} {} {} / {} ({}%)",
|
||||
"{}{}{}{} {} / {} ({}%)",
|
||||
indent,
|
||||
disk_icon(&d.name),
|
||||
truncate_middle(&d.name, (slot.width.saturating_sub(6)) as usize / 2),
|
||||
temp_str,
|
||||
human(used),
|
||||
human(d.total),
|
||||
pct
|
||||
);
|
||||
|
||||
// Indent the entire card (block) for partitions to align with └─ prefix (4 chars)
|
||||
let card_indent = if d.is_partition { 4 } else { 0 };
|
||||
let card_rect = Rect {
|
||||
x: slot.x + card_indent,
|
||||
y: slot.y,
|
||||
width: slot.width.saturating_sub(card_indent),
|
||||
height: slot.height,
|
||||
};
|
||||
|
||||
let card = Block::default().borders(Borders::ALL).title(title);
|
||||
f.render_widget(card, *slot);
|
||||
f.render_widget(card, card_rect);
|
||||
|
||||
let inner_card = Rect {
|
||||
x: slot.x + 1,
|
||||
y: slot.y + 1,
|
||||
width: slot.width.saturating_sub(2),
|
||||
height: slot.height.saturating_sub(2),
|
||||
x: card_rect.x + 1,
|
||||
y: card_rect.y + 1,
|
||||
width: card_rect.width.saturating_sub(2),
|
||||
height: card_rect.height.saturating_sub(2),
|
||||
};
|
||||
if inner_card.height == 0 {
|
||||
continue;
|
||||
|
||||
@ -3,7 +3,8 @@
|
||||
use crate::types::Metrics;
|
||||
use ratatui::{
|
||||
layout::Rect,
|
||||
widgets::{Block, Borders},
|
||||
text::{Line, Span},
|
||||
widgets::{Block, Borders, Paragraph},
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
@ -17,20 +18,7 @@ pub fn draw_header(
|
||||
procs_interval: Duration,
|
||||
) {
|
||||
let base = if let Some(mm) = m {
|
||||
let temp = mm
|
||||
.cpu_temp_c
|
||||
.map(|t| {
|
||||
let icon = if t < 50.0 {
|
||||
"😎"
|
||||
} else if t < 85.0 {
|
||||
"⚠️"
|
||||
} else {
|
||||
"🔥"
|
||||
};
|
||||
format!("CPU Temp: {t:.1}°C {icon}")
|
||||
})
|
||||
.unwrap_or_else(|| "CPU Temp: N/A".into());
|
||||
format!("socktop — host: {} | {}", mm.hostname, temp)
|
||||
format!("socktop — host: {}", mm.hostname)
|
||||
} else {
|
||||
"socktop — connecting...".into()
|
||||
};
|
||||
@ -38,15 +26,30 @@ pub fn draw_header(
|
||||
let tls_txt = if is_tls { "🔒 TLS" } else { "🔒✗ TLS" };
|
||||
// Token indicator
|
||||
let tok_txt = if has_token { "🔑 token" } else { "" };
|
||||
let mi = metrics_interval.as_millis();
|
||||
let pi = procs_interval.as_millis();
|
||||
let intervals = format!("⏱ {mi}ms metrics | {pi}ms procs");
|
||||
let mut parts = vec![base, tls_txt.into()];
|
||||
if !tok_txt.is_empty() {
|
||||
parts.push(tok_txt.into());
|
||||
}
|
||||
parts.push(intervals);
|
||||
parts.push("(q to quit)".into());
|
||||
parts.push("(a: about, h: help, q: quit)".into());
|
||||
let title = parts.join(" | ");
|
||||
|
||||
// Render the block with left-aligned title
|
||||
f.render_widget(Block::default().title(title).borders(Borders::BOTTOM), area);
|
||||
|
||||
// Render polling intervals on the right side
|
||||
let mi = metrics_interval.as_millis();
|
||||
let pi = procs_interval.as_millis();
|
||||
let intervals = format!("⏱ {mi}ms metrics | {pi}ms procs");
|
||||
let intervals_width = intervals.len() as u16;
|
||||
|
||||
if area.width > intervals_width + 2 {
|
||||
let right_area = Rect {
|
||||
x: area.x + area.width.saturating_sub(intervals_width + 1),
|
||||
y: area.y,
|
||||
width: intervals_width,
|
||||
height: 1,
|
||||
};
|
||||
let intervals_line = Line::from(Span::raw(intervals));
|
||||
f.render_widget(Paragraph::new(intervals_line), right_area);
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,6 +5,11 @@ pub mod disks;
|
||||
pub mod gpu;
|
||||
pub mod header;
|
||||
pub mod mem;
|
||||
pub mod modal;
|
||||
pub mod modal_connection;
|
||||
pub mod modal_format;
|
||||
pub mod modal_process;
|
||||
pub mod modal_types;
|
||||
pub mod net;
|
||||
pub mod processes;
|
||||
pub mod swap;
|
||||
|
||||
634
socktop/src/ui/modal.rs
Normal file
634
socktop/src/ui/modal.rs
Normal file
@ -0,0 +1,634 @@
|
||||
//! Modal window system for socktop TUI application
|
||||
|
||||
use super::theme::MODAL_DIM_BG;
|
||||
use crossterm::event::KeyCode;
|
||||
use ratatui::{
|
||||
Frame,
|
||||
layout::{Alignment, Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::Line,
|
||||
widgets::{Block, Borders, Clear, Paragraph, Wrap},
|
||||
};
|
||||
|
||||
// Re-export types from modal_types
|
||||
pub use super::modal_types::{
|
||||
ModalAction, ModalButton, ModalType, ProcessHistoryData, ProcessModalData,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ModalManager {
|
||||
stack: Vec<ModalType>,
|
||||
pub(super) active_button: ModalButton,
|
||||
pub thread_scroll_offset: usize,
|
||||
pub journal_scroll_offset: usize,
|
||||
pub thread_scroll_max: usize,
|
||||
pub journal_scroll_max: usize,
|
||||
pub help_scroll_offset: usize,
|
||||
}
|
||||
|
||||
impl ModalManager {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
stack: Vec::new(),
|
||||
active_button: ModalButton::Retry,
|
||||
thread_scroll_offset: 0,
|
||||
journal_scroll_offset: 0,
|
||||
thread_scroll_max: 0,
|
||||
journal_scroll_max: 0,
|
||||
help_scroll_offset: 0,
|
||||
}
|
||||
}
|
||||
pub fn is_active(&self) -> bool {
|
||||
!self.stack.is_empty()
|
||||
}
|
||||
|
||||
pub fn current_modal(&self) -> Option<&ModalType> {
|
||||
self.stack.last()
|
||||
}
|
||||
|
||||
pub fn push_modal(&mut self, modal: ModalType) {
|
||||
self.stack.push(modal);
|
||||
self.active_button = match self.stack.last() {
|
||||
Some(ModalType::ConnectionError { .. }) => ModalButton::Retry,
|
||||
Some(ModalType::ProcessDetails { .. }) => {
|
||||
// Reset scroll state for new process details
|
||||
self.thread_scroll_offset = 0;
|
||||
self.journal_scroll_offset = 0;
|
||||
self.thread_scroll_max = 0;
|
||||
self.journal_scroll_max = 0;
|
||||
ModalButton::Ok
|
||||
}
|
||||
Some(ModalType::About) => ModalButton::Ok,
|
||||
Some(ModalType::Help) => {
|
||||
// Reset scroll state for help modal
|
||||
self.help_scroll_offset = 0;
|
||||
ModalButton::Ok
|
||||
}
|
||||
Some(ModalType::Confirmation { .. }) => ModalButton::Confirm,
|
||||
Some(ModalType::Info { .. }) => ModalButton::Ok,
|
||||
None => ModalButton::Ok,
|
||||
};
|
||||
}
|
||||
pub fn pop_modal(&mut self) -> Option<ModalType> {
|
||||
let m = self.stack.pop();
|
||||
if let Some(next) = self.stack.last() {
|
||||
self.active_button = match next {
|
||||
ModalType::ConnectionError { .. } => ModalButton::Retry,
|
||||
ModalType::ProcessDetails { .. } => ModalButton::Ok,
|
||||
ModalType::About => ModalButton::Ok,
|
||||
ModalType::Help => ModalButton::Ok,
|
||||
ModalType::Confirmation { .. } => ModalButton::Confirm,
|
||||
ModalType::Info { .. } => ModalButton::Ok,
|
||||
};
|
||||
}
|
||||
m
|
||||
}
|
||||
pub fn update_connection_error_countdown(&mut self, new_countdown: Option<u64>) {
|
||||
if let Some(ModalType::ConnectionError {
|
||||
auto_retry_countdown,
|
||||
..
|
||||
}) = self.stack.last_mut()
|
||||
{
|
||||
*auto_retry_countdown = new_countdown;
|
||||
}
|
||||
}
|
||||
pub fn handle_key(&mut self, key: KeyCode) -> ModalAction {
|
||||
if !self.is_active() {
|
||||
return ModalAction::None;
|
||||
}
|
||||
match key {
|
||||
KeyCode::Esc => {
|
||||
self.pop_modal();
|
||||
ModalAction::Cancel
|
||||
}
|
||||
KeyCode::Enter => self.handle_enter(),
|
||||
KeyCode::Tab | KeyCode::Right => {
|
||||
self.next_button();
|
||||
ModalAction::None
|
||||
}
|
||||
KeyCode::BackTab | KeyCode::Left => {
|
||||
self.prev_button();
|
||||
ModalAction::None
|
||||
}
|
||||
KeyCode::Char('r') | KeyCode::Char('R') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ConnectionError { .. })) {
|
||||
ModalAction::RetryConnection
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('q') | KeyCode::Char('Q') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ConnectionError { .. })) {
|
||||
ModalAction::ExitApp
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('x') | KeyCode::Char('X') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
// Close all ProcessDetails modals at once (handles parent navigation chain)
|
||||
while matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.pop_modal();
|
||||
}
|
||||
ModalAction::Dismiss
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('j') | KeyCode::Char('J') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.thread_scroll_offset = self
|
||||
.thread_scroll_offset
|
||||
.saturating_add(1)
|
||||
.min(self.thread_scroll_max);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('k') | KeyCode::Char('K') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.thread_scroll_offset = self.thread_scroll_offset.saturating_sub(1);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('d') | KeyCode::Char('D') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.thread_scroll_offset = self
|
||||
.thread_scroll_offset
|
||||
.saturating_add(10)
|
||||
.min(self.thread_scroll_max);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('u') | KeyCode::Char('U') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.thread_scroll_offset = self.thread_scroll_offset.saturating_sub(10);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('[') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.journal_scroll_offset = self.journal_scroll_offset.saturating_sub(1);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char(']') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.journal_scroll_offset = self
|
||||
.journal_scroll_offset
|
||||
.saturating_add(1)
|
||||
.min(self.journal_scroll_max);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('p') | KeyCode::Char('P') => {
|
||||
// Switch to parent process if it exists
|
||||
if let Some(ModalType::ProcessDetails { pid }) = self.stack.last() {
|
||||
// We need to get the parent PID from the process details
|
||||
// For now, return a special action that the app can handle
|
||||
// The app has access to the process details and can extract parent_pid
|
||||
ModalAction::SwitchToParentProcess(*pid)
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Up => {
|
||||
if matches!(self.stack.last(), Some(ModalType::Help)) {
|
||||
self.help_scroll_offset = self.help_scroll_offset.saturating_sub(1);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Down => {
|
||||
if matches!(self.stack.last(), Some(ModalType::Help)) {
|
||||
self.help_scroll_offset = self.help_scroll_offset.saturating_add(1);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
_ => ModalAction::None,
|
||||
}
|
||||
}
|
||||
fn handle_enter(&mut self) -> ModalAction {
|
||||
match (&self.stack.last(), &self.active_button) {
|
||||
(Some(ModalType::ConnectionError { .. }), ModalButton::Retry) => {
|
||||
ModalAction::RetryConnection
|
||||
}
|
||||
(Some(ModalType::ConnectionError { .. }), ModalButton::Exit) => ModalAction::ExitApp,
|
||||
(Some(ModalType::ProcessDetails { .. }), ModalButton::Ok) => {
|
||||
self.pop_modal();
|
||||
ModalAction::Dismiss
|
||||
}
|
||||
(Some(ModalType::About), ModalButton::Ok) => {
|
||||
self.pop_modal();
|
||||
ModalAction::Dismiss
|
||||
}
|
||||
(Some(ModalType::Help), ModalButton::Ok) => {
|
||||
self.pop_modal();
|
||||
ModalAction::Dismiss
|
||||
}
|
||||
(Some(ModalType::Confirmation { .. }), ModalButton::Confirm) => ModalAction::Confirm,
|
||||
(Some(ModalType::Confirmation { .. }), ModalButton::Cancel) => ModalAction::Cancel,
|
||||
(Some(ModalType::Info { .. }), ModalButton::Ok) => {
|
||||
self.pop_modal();
|
||||
ModalAction::Dismiss
|
||||
}
|
||||
_ => ModalAction::None,
|
||||
}
|
||||
}
|
||||
fn next_button(&mut self) {
|
||||
self.active_button = match (&self.stack.last(), &self.active_button) {
|
||||
(Some(ModalType::ConnectionError { .. }), ModalButton::Retry) => ModalButton::Exit,
|
||||
(Some(ModalType::ConnectionError { .. }), ModalButton::Exit) => ModalButton::Retry,
|
||||
(Some(ModalType::Confirmation { .. }), ModalButton::Confirm) => ModalButton::Cancel,
|
||||
(Some(ModalType::Confirmation { .. }), ModalButton::Cancel) => ModalButton::Confirm,
|
||||
_ => self.active_button.clone(),
|
||||
};
|
||||
}
|
||||
fn prev_button(&mut self) {
|
||||
self.next_button();
|
||||
}
|
||||
|
||||
pub fn render(&mut self, f: &mut Frame, data: ProcessModalData) {
|
||||
if let Some(m) = self.stack.last().cloned() {
|
||||
self.render_background_dim(f);
|
||||
self.render_modal_content(f, &m, data);
|
||||
}
|
||||
}
|
||||
|
||||
fn render_background_dim(&self, f: &mut Frame) {
|
||||
let area = f.area();
|
||||
f.render_widget(Clear, area);
|
||||
f.render_widget(
|
||||
Block::default()
|
||||
.style(Style::default().bg(MODAL_DIM_BG).fg(MODAL_DIM_BG))
|
||||
.borders(Borders::NONE),
|
||||
area,
|
||||
);
|
||||
}
|
||||
|
||||
fn render_modal_content(&mut self, f: &mut Frame, modal: &ModalType, data: ProcessModalData) {
|
||||
let area = f.area();
|
||||
// Different sizes for different modal types
|
||||
let modal_area = match modal {
|
||||
ModalType::ProcessDetails { .. } => {
|
||||
// Process details modal uses almost full screen (95% width, 90% height)
|
||||
self.centered_rect(95, 90, area)
|
||||
}
|
||||
ModalType::About => {
|
||||
// About modal uses medium size
|
||||
self.centered_rect(90, 90, area)
|
||||
}
|
||||
ModalType::Help => {
|
||||
// Help modal uses medium size
|
||||
self.centered_rect(70, 80, area)
|
||||
}
|
||||
_ => {
|
||||
// Other modals use smaller size
|
||||
self.centered_rect(70, 50, area)
|
||||
}
|
||||
};
|
||||
f.render_widget(Clear, modal_area);
|
||||
match modal {
|
||||
ModalType::ConnectionError {
|
||||
message,
|
||||
disconnected_at,
|
||||
retry_count,
|
||||
auto_retry_countdown,
|
||||
} => self.render_connection_error(
|
||||
f,
|
||||
modal_area,
|
||||
message,
|
||||
*disconnected_at,
|
||||
*retry_count,
|
||||
*auto_retry_countdown,
|
||||
),
|
||||
ModalType::ProcessDetails { pid } => {
|
||||
self.render_process_details(f, modal_area, *pid, data)
|
||||
}
|
||||
ModalType::About => self.render_about(f, modal_area),
|
||||
ModalType::Help => self.render_help(f, modal_area),
|
||||
ModalType::Confirmation {
|
||||
title,
|
||||
message,
|
||||
confirm_text,
|
||||
cancel_text,
|
||||
} => self.render_confirmation(f, modal_area, title, message, confirm_text, cancel_text),
|
||||
ModalType::Info { title, message } => self.render_info(f, modal_area, title, message),
|
||||
}
|
||||
}
|
||||
|
||||
fn render_confirmation(
|
||||
&self,
|
||||
f: &mut Frame,
|
||||
area: Rect,
|
||||
title: &str,
|
||||
message: &str,
|
||||
confirm_text: &str,
|
||||
cancel_text: &str,
|
||||
) {
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Min(1), Constraint::Length(3)])
|
||||
.split(area);
|
||||
let block = Block::default()
|
||||
.title(format!(" {title} "))
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().bg(Color::Black));
|
||||
f.render_widget(block, area);
|
||||
f.render_widget(
|
||||
Paragraph::new(message)
|
||||
.style(Style::default().fg(Color::White))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: true }),
|
||||
chunks[0],
|
||||
);
|
||||
let buttons = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)])
|
||||
.split(chunks[1]);
|
||||
let confirm_style = if self.active_button == ModalButton::Confirm {
|
||||
Style::default()
|
||||
.bg(Color::Green)
|
||||
.fg(Color::Black)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Green)
|
||||
};
|
||||
let cancel_style = if self.active_button == ModalButton::Cancel {
|
||||
Style::default()
|
||||
.bg(Color::Red)
|
||||
.fg(Color::Black)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Red)
|
||||
};
|
||||
f.render_widget(
|
||||
Paragraph::new(confirm_text)
|
||||
.style(confirm_style)
|
||||
.alignment(Alignment::Center),
|
||||
buttons[0],
|
||||
);
|
||||
f.render_widget(
|
||||
Paragraph::new(cancel_text)
|
||||
.style(cancel_style)
|
||||
.alignment(Alignment::Center),
|
||||
buttons[1],
|
||||
);
|
||||
}
|
||||
|
||||
fn render_info(&self, f: &mut Frame, area: Rect, title: &str, message: &str) {
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Min(1), Constraint::Length(3)])
|
||||
.split(area);
|
||||
let block = Block::default()
|
||||
.title(format!(" {title} "))
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().bg(Color::Black));
|
||||
f.render_widget(block, area);
|
||||
f.render_widget(
|
||||
Paragraph::new(message)
|
||||
.style(Style::default().fg(Color::White))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: true }),
|
||||
chunks[0],
|
||||
);
|
||||
let ok_style = if self.active_button == ModalButton::Ok {
|
||||
Style::default()
|
||||
.bg(Color::Blue)
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Blue)
|
||||
};
|
||||
f.render_widget(
|
||||
Paragraph::new("[ Enter ] OK")
|
||||
.style(ok_style)
|
||||
.alignment(Alignment::Center),
|
||||
chunks[1],
|
||||
);
|
||||
}
|
||||
|
||||
fn render_about(&self, f: &mut Frame, area: Rect) {
|
||||
//get ASCII art from a constant stored in theme.rs
|
||||
use super::theme::ASCII_ART;
|
||||
|
||||
let version = env!("CARGO_PKG_VERSION");
|
||||
|
||||
let about_text = format!(
|
||||
"{}\n\
|
||||
Version {}\n\
|
||||
\n\
|
||||
A terminal first remote monitoring tool\n\
|
||||
\n\
|
||||
Website: https://socktop.io\n\
|
||||
GitHub: https://github.com/jasonwitty/socktop\n\
|
||||
\n\
|
||||
License: MIT License\n\
|
||||
\n\
|
||||
Created by Jason Witty\n\
|
||||
jasonpwitty+socktop@proton.me",
|
||||
ASCII_ART, version
|
||||
);
|
||||
|
||||
// Render the border block
|
||||
let block = Block::default()
|
||||
.title(" About socktop ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().bg(Color::Black).fg(Color::DarkGray));
|
||||
f.render_widget(block, area);
|
||||
|
||||
// Calculate inner area manually to avoid any parent styling
|
||||
let inner_area = Rect {
|
||||
x: area.x + 1,
|
||||
y: area.y + 1,
|
||||
width: area.width.saturating_sub(2),
|
||||
height: area.height.saturating_sub(2), // Leave room for button at bottom
|
||||
};
|
||||
|
||||
// Render content area with explicit black background
|
||||
f.render_widget(
|
||||
Paragraph::new(about_text)
|
||||
.style(Style::default().fg(Color::Cyan).bg(Color::Black))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: false }),
|
||||
inner_area,
|
||||
);
|
||||
|
||||
// Button area
|
||||
let button_area = Rect {
|
||||
x: area.x + 1,
|
||||
y: area.y + area.height.saturating_sub(2),
|
||||
width: area.width.saturating_sub(2),
|
||||
height: 1,
|
||||
};
|
||||
|
||||
let ok_style = if self.active_button == ModalButton::Ok {
|
||||
Style::default()
|
||||
.bg(Color::Blue)
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Blue).bg(Color::Black)
|
||||
};
|
||||
|
||||
f.render_widget(
|
||||
Paragraph::new("[ Enter ] Close")
|
||||
.style(ok_style)
|
||||
.alignment(Alignment::Center),
|
||||
button_area,
|
||||
);
|
||||
}
|
||||
|
||||
fn render_help(&self, f: &mut Frame, area: Rect) {
|
||||
let help_lines = vec![
|
||||
"GLOBAL",
|
||||
" q/Q/Esc ........ Quit │ a/A ....... About │ h/H ....... Help",
|
||||
"",
|
||||
"PROCESS LIST",
|
||||
" / .............. Start/edit fuzzy search",
|
||||
" c/C ............ Clear search filter",
|
||||
" ↑/↓ ............ Select/navigate processes",
|
||||
" Enter .......... Open Process Details",
|
||||
" x/X ............ Clear selection",
|
||||
" Click header ... Sort by column (CPU/Mem)",
|
||||
" Click row ...... Select process",
|
||||
"",
|
||||
"SEARCH MODE (after pressing /)",
|
||||
" Type ........... Enter search query (fuzzy match)",
|
||||
" ↑/↓ ............ Navigate results while typing",
|
||||
" Esc ............ Cancel search and clear filter",
|
||||
" Enter .......... Apply filter and select first result",
|
||||
"",
|
||||
"CPU PER-CORE",
|
||||
" ←/→ ............ Scroll cores │ PgUp/PgDn ... Page up/down",
|
||||
" Home/End ....... Jump to first/last core",
|
||||
"",
|
||||
"PROCESS DETAILS MODAL",
|
||||
" x/X ............ Close modal (all parent modals)",
|
||||
" p/P ............ Navigate to parent process",
|
||||
" j/k ............ Scroll threads ↓/↑ (1 line)",
|
||||
" d/u ............ Scroll threads ↓/↑ (10 lines)",
|
||||
" [ / ] .......... Scroll journal ↑/↓",
|
||||
" Esc/Enter ...... Close modal",
|
||||
"",
|
||||
"MODAL NAVIGATION",
|
||||
" Tab/→ .......... Next button │ Shift+Tab/← ... Previous button",
|
||||
" Enter .......... Confirm/OK │ Esc ............ Cancel/Close",
|
||||
];
|
||||
|
||||
// Render the border block
|
||||
let block = Block::default()
|
||||
.title(" Hotkey Help (use ↑/↓ to scroll) ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().bg(Color::Black).fg(Color::DarkGray));
|
||||
f.render_widget(block, area);
|
||||
|
||||
// Split into content area and button area
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Min(1), Constraint::Length(1)])
|
||||
.split(Rect {
|
||||
x: area.x + 1,
|
||||
y: area.y + 1,
|
||||
width: area.width.saturating_sub(2),
|
||||
height: area.height.saturating_sub(2),
|
||||
});
|
||||
|
||||
let content_area = chunks[0];
|
||||
let button_area = chunks[1];
|
||||
|
||||
// Calculate visible window
|
||||
let visible_height = content_area.height as usize;
|
||||
let total_lines = help_lines.len();
|
||||
let max_scroll = total_lines.saturating_sub(visible_height);
|
||||
let scroll_offset = self.help_scroll_offset.min(max_scroll);
|
||||
|
||||
// Get visible lines
|
||||
let visible_lines: Vec<Line> = help_lines
|
||||
.iter()
|
||||
.skip(scroll_offset)
|
||||
.take(visible_height)
|
||||
.map(|s| Line::from(*s))
|
||||
.collect();
|
||||
|
||||
// Render scrollable content
|
||||
f.render_widget(
|
||||
Paragraph::new(visible_lines)
|
||||
.style(Style::default().fg(Color::Cyan).bg(Color::Black))
|
||||
.alignment(Alignment::Left),
|
||||
content_area,
|
||||
);
|
||||
|
||||
// Render scrollbar if needed
|
||||
if total_lines > visible_height {
|
||||
use ratatui::widgets::{Scrollbar, ScrollbarOrientation, ScrollbarState};
|
||||
|
||||
let scrollbar_area = Rect {
|
||||
x: area.x + area.width.saturating_sub(2),
|
||||
y: area.y + 1,
|
||||
width: 1,
|
||||
height: area.height.saturating_sub(2),
|
||||
};
|
||||
|
||||
let mut scrollbar_state = ScrollbarState::new(max_scroll).position(scroll_offset);
|
||||
|
||||
let scrollbar = Scrollbar::new(ScrollbarOrientation::VerticalRight)
|
||||
.begin_symbol(Some("↑"))
|
||||
.end_symbol(Some("↓"))
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
f.render_stateful_widget(scrollbar, scrollbar_area, &mut scrollbar_state);
|
||||
}
|
||||
|
||||
// Button area
|
||||
let ok_style = if self.active_button == ModalButton::Ok {
|
||||
Style::default()
|
||||
.bg(Color::Blue)
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Blue).bg(Color::Black)
|
||||
};
|
||||
|
||||
f.render_widget(
|
||||
Paragraph::new("[ Enter ] Close")
|
||||
.style(ok_style)
|
||||
.alignment(Alignment::Center),
|
||||
button_area,
|
||||
);
|
||||
}
|
||||
|
||||
fn centered_rect(&self, percent_x: u16, percent_y: u16, r: Rect) -> Rect {
|
||||
let vert = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([
|
||||
Constraint::Percentage((100 - percent_y) / 2),
|
||||
Constraint::Percentage(percent_y),
|
||||
Constraint::Percentage((100 - percent_y) / 2),
|
||||
])
|
||||
.split(r);
|
||||
Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([
|
||||
Constraint::Percentage((100 - percent_x) / 2),
|
||||
Constraint::Percentage(percent_x),
|
||||
Constraint::Percentage((100 - percent_x) / 2),
|
||||
])
|
||||
.split(vert[1])[1]
|
||||
}
|
||||
}
|
||||
297
socktop/src/ui/modal_connection.rs
Normal file
297
socktop/src/ui/modal_connection.rs
Normal file
@ -0,0 +1,297 @@
|
||||
//! Connection error modal rendering
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
use super::modal_format::format_duration;
|
||||
use super::theme::{
|
||||
BTN_EXIT_BG_ACTIVE, BTN_EXIT_FG_ACTIVE, BTN_EXIT_FG_INACTIVE, BTN_EXIT_TEXT,
|
||||
BTN_RETRY_BG_ACTIVE, BTN_RETRY_FG_ACTIVE, BTN_RETRY_FG_INACTIVE, BTN_RETRY_TEXT, ICON_CLUSTER,
|
||||
ICON_COUNTDOWN_LABEL, ICON_MESSAGE, ICON_OFFLINE_LABEL, ICON_RETRY_LABEL, ICON_WARNING_TITLE,
|
||||
LARGE_ERROR_ICON, MODAL_AGENT_FG, MODAL_BG, MODAL_BORDER_FG, MODAL_COUNTDOWN_LABEL_FG,
|
||||
MODAL_FG, MODAL_HINT_FG, MODAL_ICON_PINK, MODAL_OFFLINE_LABEL_FG, MODAL_RETRY_LABEL_FG,
|
||||
MODAL_TITLE_FG,
|
||||
};
|
||||
use ratatui::{
|
||||
Frame,
|
||||
layout::{Alignment, Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::{Line, Span, Text},
|
||||
widgets::{Block, Borders, Paragraph, Wrap},
|
||||
};
|
||||
|
||||
use super::modal::{ModalButton, ModalManager};
|
||||
|
||||
impl ModalManager {
|
||||
pub(super) fn render_connection_error(
|
||||
&self,
|
||||
f: &mut Frame,
|
||||
area: Rect,
|
||||
message: &str,
|
||||
disconnected_at: Instant,
|
||||
retry_count: u32,
|
||||
auto_retry_countdown: Option<u64>,
|
||||
) {
|
||||
let duration_text = format_duration(disconnected_at.elapsed());
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([
|
||||
Constraint::Length(3),
|
||||
Constraint::Min(4),
|
||||
Constraint::Length(4),
|
||||
])
|
||||
.split(area);
|
||||
let block = Block::default()
|
||||
.title(ICON_WARNING_TITLE)
|
||||
.title_style(
|
||||
Style::default()
|
||||
.fg(MODAL_TITLE_FG)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
.borders(Borders::ALL)
|
||||
.border_style(Style::default().fg(MODAL_BORDER_FG))
|
||||
.style(Style::default().bg(MODAL_BG).fg(MODAL_FG));
|
||||
f.render_widget(block, area);
|
||||
|
||||
let content_area = chunks[1];
|
||||
let max_w = content_area.width.saturating_sub(15) as usize;
|
||||
let clean_message = if message.to_lowercase().contains("hostname verification")
|
||||
|| message.contains("socktop_connector")
|
||||
{
|
||||
"Connection failed - hostname verification disabled".to_string()
|
||||
} else if message.contains("Failed to fetch metrics:") {
|
||||
if let Some(p) = message.find(':') {
|
||||
let ess = message[p + 1..].trim();
|
||||
if ess.len() > max_w {
|
||||
format!("{}...", &ess[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
ess.to_string()
|
||||
}
|
||||
} else {
|
||||
"Connection error".to_string()
|
||||
}
|
||||
} else if message.starts_with("Retry failed:") {
|
||||
if let Some(p) = message.find(':') {
|
||||
let ess = message[p + 1..].trim();
|
||||
if ess.len() > max_w {
|
||||
format!("{}...", &ess[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
ess.to_string()
|
||||
}
|
||||
} else {
|
||||
"Retry failed".to_string()
|
||||
}
|
||||
} else if message.len() > max_w {
|
||||
format!("{}...", &message[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
message.to_string()
|
||||
};
|
||||
let truncate = |s: &str| {
|
||||
if s.len() > max_w {
|
||||
format!("{}...", &s[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
s.to_string()
|
||||
}
|
||||
};
|
||||
let agent_text = truncate("📡 Cannot connect to socktop agent");
|
||||
let message_text = truncate(&clean_message);
|
||||
let duration_display = truncate(&duration_text);
|
||||
let retry_display = truncate(&retry_count.to_string());
|
||||
let countdown_text = auto_retry_countdown.map(|c| {
|
||||
if c == 0 {
|
||||
"Auto retry now...".to_string()
|
||||
} else {
|
||||
format!("{c}s")
|
||||
}
|
||||
});
|
||||
|
||||
// Determine if we have enough space (height + width) to show large centered icon
|
||||
let icon_max_width = LARGE_ERROR_ICON
|
||||
.iter()
|
||||
.map(|l| l.trim().chars().count())
|
||||
.max()
|
||||
.unwrap_or(0) as u16;
|
||||
let large_allowed = content_area.height >= (LARGE_ERROR_ICON.len() as u16 + 8)
|
||||
&& content_area.width >= icon_max_width + 6; // small margin for borders/padding
|
||||
let mut icon_lines: Vec<Line> = Vec::new();
|
||||
if large_allowed {
|
||||
for &raw in LARGE_ERROR_ICON.iter() {
|
||||
let trimmed = raw.trim();
|
||||
icon_lines.push(Line::from(
|
||||
trimmed
|
||||
.chars()
|
||||
.map(|ch| {
|
||||
if ch == '!' {
|
||||
Span::styled(
|
||||
ch.to_string(),
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
} else if ch == '/' || ch == '\\' || ch == '_' {
|
||||
// keep outline in pink
|
||||
Span::styled(
|
||||
ch.to_string(),
|
||||
Style::default()
|
||||
.fg(MODAL_ICON_PINK)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
} else if ch == ' ' {
|
||||
Span::raw(" ")
|
||||
} else {
|
||||
Span::styled(ch.to_string(), Style::default().fg(MODAL_ICON_PINK))
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
));
|
||||
}
|
||||
icon_lines.push(Line::from("")); // blank spacer line below icon
|
||||
}
|
||||
|
||||
let mut info_lines: Vec<Line> = Vec::new();
|
||||
if !large_allowed {
|
||||
info_lines.push(Line::from(vec![Span::styled(
|
||||
ICON_CLUSTER,
|
||||
Style::default().fg(MODAL_ICON_PINK),
|
||||
)]));
|
||||
info_lines.push(Line::from(""));
|
||||
}
|
||||
info_lines.push(Line::from(vec![Span::styled(
|
||||
&agent_text,
|
||||
Style::default().fg(MODAL_AGENT_FG),
|
||||
)]));
|
||||
info_lines.push(Line::from(""));
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(ICON_MESSAGE, Style::default().fg(MODAL_HINT_FG)),
|
||||
Span::styled(&message_text, Style::default().fg(MODAL_AGENT_FG)),
|
||||
]));
|
||||
info_lines.push(Line::from(""));
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(
|
||||
ICON_OFFLINE_LABEL,
|
||||
Style::default().fg(MODAL_OFFLINE_LABEL_FG),
|
||||
),
|
||||
Span::styled(
|
||||
&duration_display,
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
]));
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(ICON_RETRY_LABEL, Style::default().fg(MODAL_RETRY_LABEL_FG)),
|
||||
Span::styled(
|
||||
&retry_display,
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
]));
|
||||
if let Some(cd) = &countdown_text {
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(
|
||||
ICON_COUNTDOWN_LABEL,
|
||||
Style::default().fg(MODAL_COUNTDOWN_LABEL_FG),
|
||||
),
|
||||
Span::styled(
|
||||
cd,
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
]));
|
||||
}
|
||||
|
||||
let constrained = Rect {
|
||||
x: content_area.x + 2,
|
||||
y: content_area.y,
|
||||
width: content_area.width.saturating_sub(4),
|
||||
height: content_area.height,
|
||||
};
|
||||
if large_allowed {
|
||||
let split = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([
|
||||
Constraint::Length(icon_lines.len() as u16),
|
||||
Constraint::Min(0),
|
||||
])
|
||||
.split(constrained);
|
||||
// Center the icon block; each line already trimmed so per-line centering keeps shape
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(icon_lines))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: false }),
|
||||
split[0],
|
||||
);
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(info_lines))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: true }),
|
||||
split[1],
|
||||
);
|
||||
} else {
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(info_lines))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: true }),
|
||||
constrained,
|
||||
);
|
||||
}
|
||||
|
||||
let button_area = Rect {
|
||||
x: chunks[2].x,
|
||||
y: chunks[2].y,
|
||||
width: chunks[2].width,
|
||||
height: chunks[2].height.saturating_sub(1),
|
||||
};
|
||||
self.render_connection_error_buttons(f, button_area);
|
||||
}
|
||||
|
||||
fn render_connection_error_buttons(&self, f: &mut Frame, area: Rect) {
|
||||
let button_chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([
|
||||
Constraint::Percentage(30),
|
||||
Constraint::Percentage(15),
|
||||
Constraint::Percentage(10),
|
||||
Constraint::Percentage(15),
|
||||
Constraint::Percentage(30),
|
||||
])
|
||||
.split(area);
|
||||
let retry_style = if self.active_button == ModalButton::Retry {
|
||||
Style::default()
|
||||
.bg(BTN_RETRY_BG_ACTIVE)
|
||||
.fg(BTN_RETRY_FG_ACTIVE)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default()
|
||||
.fg(BTN_RETRY_FG_INACTIVE)
|
||||
.add_modifier(Modifier::DIM)
|
||||
};
|
||||
let exit_style = if self.active_button == ModalButton::Exit {
|
||||
Style::default()
|
||||
.bg(BTN_EXIT_BG_ACTIVE)
|
||||
.fg(BTN_EXIT_FG_ACTIVE)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default()
|
||||
.fg(BTN_EXIT_FG_INACTIVE)
|
||||
.add_modifier(Modifier::DIM)
|
||||
};
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(Line::from(vec![Span::styled(
|
||||
BTN_RETRY_TEXT,
|
||||
retry_style,
|
||||
)])))
|
||||
.alignment(Alignment::Center),
|
||||
button_chunks[1],
|
||||
);
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(Line::from(vec![Span::styled(
|
||||
BTN_EXIT_TEXT,
|
||||
exit_style,
|
||||
)])))
|
||||
.alignment(Alignment::Center),
|
||||
button_chunks[3],
|
||||
);
|
||||
}
|
||||
}
|
||||
112
socktop/src/ui/modal_format.rs
Normal file
112
socktop/src/ui/modal_format.rs
Normal file
@ -0,0 +1,112 @@
|
||||
//! Formatting utilities for process details modal
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
/// Format uptime in human-readable form
|
||||
pub fn format_uptime(secs: u64) -> String {
|
||||
let days = secs / 86400;
|
||||
let hours = (secs % 86400) / 3600;
|
||||
let minutes = (secs % 3600) / 60;
|
||||
let seconds = secs % 60;
|
||||
|
||||
if days > 0 {
|
||||
format!("{days}d {hours}h {minutes}m")
|
||||
} else if hours > 0 {
|
||||
format!("{hours}h {minutes}m {seconds}s")
|
||||
} else if minutes > 0 {
|
||||
format!("{minutes}m {seconds}s")
|
||||
} else {
|
||||
format!("{seconds}s")
|
||||
}
|
||||
}
|
||||
|
||||
/// Format duration in human-readable form
|
||||
pub fn format_duration(duration: Duration) -> String {
|
||||
let total = duration.as_secs();
|
||||
let h = total / 3600;
|
||||
let m = (total % 3600) / 60;
|
||||
let s = total % 60;
|
||||
if h > 0 {
|
||||
format!("{h}h {m}m {s}s")
|
||||
} else if m > 0 {
|
||||
format!("{m}m {s}s")
|
||||
} else {
|
||||
format!("{s}s")
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalize CPU usage to 0-100% by dividing by thread count
|
||||
pub fn normalize_cpu_usage(cpu_usage: f32, thread_count: u32) -> f32 {
|
||||
let threads = thread_count.max(1) as f32;
|
||||
(cpu_usage / threads).min(100.0)
|
||||
}
|
||||
|
||||
/// Calculate dynamic Y-axis maximum in 10% increments
|
||||
pub fn calculate_dynamic_y_max(max_value: f64) -> f64 {
|
||||
((max_value / 10.0).ceil() * 10.0).clamp(10.0, 100.0)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_format_uptime_seconds() {
|
||||
assert_eq!(format_uptime(45), "45s");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_uptime_minutes() {
|
||||
assert_eq!(format_uptime(125), "2m 5s");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_uptime_hours() {
|
||||
assert_eq!(format_uptime(3665), "1h 1m 5s");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_uptime_days() {
|
||||
assert_eq!(format_uptime(90061), "1d 1h 1m");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_cpu_single_thread() {
|
||||
assert_eq!(normalize_cpu_usage(50.0, 1), 50.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_cpu_multi_thread() {
|
||||
assert_eq!(normalize_cpu_usage(400.0, 4), 100.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_cpu_zero_threads() {
|
||||
// Should default to 1 thread to avoid division by zero
|
||||
assert_eq!(normalize_cpu_usage(100.0, 0), 100.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_cpu_caps_at_100() {
|
||||
assert_eq!(normalize_cpu_usage(150.0, 1), 100.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dynamic_y_max_rounds_up() {
|
||||
assert_eq!(calculate_dynamic_y_max(15.0), 20.0);
|
||||
assert_eq!(calculate_dynamic_y_max(25.0), 30.0);
|
||||
assert_eq!(calculate_dynamic_y_max(5.0), 10.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dynamic_y_max_minimum() {
|
||||
assert_eq!(calculate_dynamic_y_max(0.0), 10.0);
|
||||
assert_eq!(calculate_dynamic_y_max(3.0), 10.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dynamic_y_max_caps_at_100() {
|
||||
assert_eq!(calculate_dynamic_y_max(95.0), 100.0);
|
||||
assert_eq!(calculate_dynamic_y_max(100.0), 100.0);
|
||||
}
|
||||
}
|
||||
1156
socktop/src/ui/modal_process.rs
Normal file
1156
socktop/src/ui/modal_process.rs
Normal file
File diff suppressed because it is too large
Load Diff
77
socktop/src/ui/modal_types.rs
Normal file
77
socktop/src/ui/modal_types.rs
Normal file
@ -0,0 +1,77 @@
|
||||
//! Type definitions for modal system
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
/// History data for process metrics rendering
|
||||
pub struct ProcessHistoryData<'a> {
|
||||
pub cpu: &'a std::collections::VecDeque<f32>,
|
||||
pub mem: &'a std::collections::VecDeque<u64>,
|
||||
pub io_read: &'a std::collections::VecDeque<u64>,
|
||||
pub io_write: &'a std::collections::VecDeque<u64>,
|
||||
}
|
||||
|
||||
/// Process data for modal rendering
|
||||
pub struct ProcessModalData<'a> {
|
||||
pub details: Option<&'a socktop_connector::ProcessMetricsResponse>,
|
||||
pub journal: Option<&'a socktop_connector::JournalResponse>,
|
||||
pub history: ProcessHistoryData<'a>,
|
||||
pub max_mem_bytes: u64,
|
||||
pub unsupported: bool,
|
||||
}
|
||||
|
||||
/// Parameters for rendering scatter plot
|
||||
pub(super) struct ScatterPlotParams<'a> {
|
||||
pub process: &'a socktop_connector::DetailedProcessInfo,
|
||||
pub main_user_ms: f64,
|
||||
pub main_system_ms: f64,
|
||||
pub max_user: f64,
|
||||
pub max_system: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ModalType {
|
||||
ConnectionError {
|
||||
message: String,
|
||||
disconnected_at: Instant,
|
||||
retry_count: u32,
|
||||
auto_retry_countdown: Option<u64>,
|
||||
},
|
||||
ProcessDetails {
|
||||
pid: u32,
|
||||
},
|
||||
About,
|
||||
Help,
|
||||
#[allow(dead_code)]
|
||||
Confirmation {
|
||||
title: String,
|
||||
message: String,
|
||||
confirm_text: String,
|
||||
cancel_text: String,
|
||||
},
|
||||
#[allow(dead_code)]
|
||||
Info {
|
||||
title: String,
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ModalAction {
|
||||
None, // Modal didn't handle the key, pass to main window
|
||||
Handled, // Modal handled the key, don't pass to main window
|
||||
RetryConnection,
|
||||
ExitApp,
|
||||
Confirm,
|
||||
Cancel,
|
||||
Dismiss,
|
||||
SwitchToParentProcess(u32), // Switch to viewing parent process details
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ModalButton {
|
||||
Retry,
|
||||
Exit,
|
||||
Confirm,
|
||||
Cancel,
|
||||
Ok,
|
||||
}
|
||||
@ -12,9 +12,72 @@ use std::cmp::Ordering;
|
||||
|
||||
use crate::types::Metrics;
|
||||
use crate::ui::cpu::{per_core_clamp, per_core_handle_scrollbar_mouse};
|
||||
use crate::ui::theme::{SB_ARROW, SB_THUMB, SB_TRACK};
|
||||
use crate::ui::theme::{
|
||||
PROCESS_SELECTION_BG, PROCESS_SELECTION_FG, PROCESS_TOOLTIP_BG, PROCESS_TOOLTIP_FG, SB_ARROW,
|
||||
SB_THUMB, SB_TRACK,
|
||||
};
|
||||
use crate::ui::util::human;
|
||||
|
||||
/// Simple fuzzy matching: returns true if all characters in needle appear in haystack in order (case-insensitive)
|
||||
fn fuzzy_match(haystack: &str, needle: &str) -> bool {
|
||||
if needle.is_empty() {
|
||||
return true;
|
||||
}
|
||||
let haystack_lower = haystack.to_lowercase();
|
||||
let needle_lower = needle.to_lowercase();
|
||||
let mut haystack_chars = haystack_lower.chars();
|
||||
|
||||
for needle_char in needle_lower.chars() {
|
||||
if !haystack_chars.any(|c| c == needle_char) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Get filtered and sorted process indices based on search query and sort order
|
||||
pub fn get_filtered_sorted_indices(
|
||||
metrics: &Metrics,
|
||||
search_query: &str,
|
||||
sort_by: ProcSortBy,
|
||||
) -> Vec<usize> {
|
||||
// Filter processes by search query (fuzzy match)
|
||||
let mut filtered_idxs: Vec<usize> = if search_query.is_empty() {
|
||||
(0..metrics.top_processes.len()).collect()
|
||||
} else {
|
||||
(0..metrics.top_processes.len())
|
||||
.filter(|&i| fuzzy_match(&metrics.top_processes[i].name, search_query))
|
||||
.collect()
|
||||
};
|
||||
|
||||
// Sort filtered rows
|
||||
match sort_by {
|
||||
ProcSortBy::CpuDesc => filtered_idxs.sort_by(|&a, &b| {
|
||||
let aa = metrics.top_processes[a].cpu_usage;
|
||||
let bb = metrics.top_processes[b].cpu_usage;
|
||||
bb.partial_cmp(&aa).unwrap_or(Ordering::Equal)
|
||||
}),
|
||||
ProcSortBy::MemDesc => filtered_idxs.sort_by(|&a, &b| {
|
||||
let aa = metrics.top_processes[a].mem_bytes;
|
||||
let bb = metrics.top_processes[b].mem_bytes;
|
||||
bb.cmp(&aa)
|
||||
}),
|
||||
}
|
||||
|
||||
filtered_idxs
|
||||
}
|
||||
|
||||
/// Parameters for drawing the top processes table
|
||||
pub struct ProcessDisplayParams<'a> {
|
||||
pub metrics: Option<&'a Metrics>,
|
||||
pub scroll_offset: usize,
|
||||
pub sort_by: ProcSortBy,
|
||||
pub selected_process_pid: Option<u32>,
|
||||
pub selected_process_index: Option<usize>,
|
||||
pub search_query: &'a str,
|
||||
pub search_active: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum ProcSortBy {
|
||||
#[default]
|
||||
@ -31,28 +94,61 @@ const COLS: [Constraint; 5] = [
|
||||
Constraint::Length(8), // Mem %
|
||||
];
|
||||
|
||||
pub fn draw_top_processes(
|
||||
f: &mut ratatui::Frame<'_>,
|
||||
area: Rect,
|
||||
m: Option<&Metrics>,
|
||||
scroll_offset: usize,
|
||||
sort_by: ProcSortBy,
|
||||
) {
|
||||
pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, params: ProcessDisplayParams) {
|
||||
// Draw outer block and title
|
||||
let Some(mm) = m else { return };
|
||||
let Some(mm) = params.metrics else { return };
|
||||
let total = mm.process_count.unwrap_or(mm.top_processes.len());
|
||||
let block = Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.title(format!("Top Processes ({total} total)"));
|
||||
f.render_widget(block, area);
|
||||
|
||||
// Inner area and content area (reserve 2 columns for scrollbar)
|
||||
// Inner area (reserve space for search box if active)
|
||||
let inner = Rect {
|
||||
x: area.x + 1,
|
||||
y: area.y + 1,
|
||||
width: area.width.saturating_sub(2),
|
||||
height: area.height.saturating_sub(2),
|
||||
};
|
||||
|
||||
// Draw search box if active
|
||||
let content_start_y = if params.search_active || !params.search_query.is_empty() {
|
||||
let search_area = Rect {
|
||||
x: inner.x,
|
||||
y: inner.y,
|
||||
width: inner.width,
|
||||
height: 3, // Height for border + content
|
||||
};
|
||||
|
||||
let search_text = if params.search_active {
|
||||
format!("Search: {}_", params.search_query)
|
||||
} else {
|
||||
format!(
|
||||
"Filter: {} (press / to edit, c to clear)",
|
||||
params.search_query
|
||||
)
|
||||
};
|
||||
|
||||
let search_block = Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_style(Style::default().fg(Color::Yellow));
|
||||
let search_paragraph = Paragraph::new(search_text)
|
||||
.block(search_block)
|
||||
.style(Style::default().fg(Color::Yellow));
|
||||
f.render_widget(search_paragraph, search_area);
|
||||
|
||||
inner.y + 3
|
||||
} else {
|
||||
inner.y
|
||||
};
|
||||
|
||||
// Content area (reserve 2 columns for scrollbar)
|
||||
let inner = Rect {
|
||||
x: inner.x,
|
||||
y: content_start_y,
|
||||
width: inner.width,
|
||||
height: inner.height.saturating_sub(content_start_y - (area.y + 1)),
|
||||
};
|
||||
if inner.height < 1 || inner.width < 3 {
|
||||
return;
|
||||
}
|
||||
@ -63,27 +159,15 @@ pub fn draw_top_processes(
|
||||
height: inner.height,
|
||||
};
|
||||
|
||||
// Sort rows (by CPU% or Mem bytes), descending.
|
||||
let mut idxs: Vec<usize> = (0..mm.top_processes.len()).collect();
|
||||
match sort_by {
|
||||
ProcSortBy::CpuDesc => idxs.sort_by(|&a, &b| {
|
||||
let aa = mm.top_processes[a].cpu_usage;
|
||||
let bb = mm.top_processes[b].cpu_usage;
|
||||
bb.partial_cmp(&aa).unwrap_or(Ordering::Equal)
|
||||
}),
|
||||
ProcSortBy::MemDesc => idxs.sort_by(|&a, &b| {
|
||||
let aa = mm.top_processes[a].mem_bytes;
|
||||
let bb = mm.top_processes[b].mem_bytes;
|
||||
bb.cmp(&aa)
|
||||
}),
|
||||
}
|
||||
// Get filtered and sorted indices
|
||||
let idxs = get_filtered_sorted_indices(mm, params.search_query, params.sort_by);
|
||||
|
||||
// Scrolling
|
||||
let total_rows = idxs.len();
|
||||
let header_rows = 1usize;
|
||||
let viewport_rows = content.height.saturating_sub(header_rows as u16) as usize;
|
||||
let max_off = total_rows.saturating_sub(viewport_rows);
|
||||
let offset = scroll_offset.min(max_off);
|
||||
let offset = params.scroll_offset.min(max_off);
|
||||
let show_n = total_rows.saturating_sub(offset).min(viewport_rows);
|
||||
|
||||
// Build visible rows
|
||||
@ -110,12 +194,29 @@ pub fn draw_top_processes(
|
||||
_ => Color::Red,
|
||||
};
|
||||
|
||||
let emphasis = if (cpu_val - peak_cpu).abs() < f32::EPSILON {
|
||||
let mut emphasis = if (cpu_val - peak_cpu).abs() < f32::EPSILON {
|
||||
Style::default().add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default()
|
||||
};
|
||||
|
||||
// Check if this process is selected - prioritize PID matching
|
||||
let is_selected = if let Some(selected_pid) = params.selected_process_pid {
|
||||
selected_pid == p.pid
|
||||
} else if let Some(selected_idx) = params.selected_process_index {
|
||||
selected_idx == ix // ix is the absolute index in the sorted list
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// Apply selection highlighting
|
||||
if is_selected {
|
||||
emphasis = emphasis
|
||||
.bg(PROCESS_SELECTION_BG)
|
||||
.fg(PROCESS_SELECTION_FG)
|
||||
.add_modifier(Modifier::BOLD);
|
||||
}
|
||||
|
||||
let cpu_str = fmt_cpu_pct(cpu_val);
|
||||
|
||||
ratatui::widgets::Row::new(vec![
|
||||
@ -131,11 +232,11 @@ pub fn draw_top_processes(
|
||||
});
|
||||
|
||||
// Header with sort indicator
|
||||
let cpu_hdr = match sort_by {
|
||||
let cpu_hdr = match params.sort_by {
|
||||
ProcSortBy::CpuDesc => "CPU % •",
|
||||
_ => "CPU %",
|
||||
};
|
||||
let mem_hdr = match sort_by {
|
||||
let mem_hdr = match params.sort_by {
|
||||
ProcSortBy::MemDesc => "Mem •",
|
||||
_ => "Mem",
|
||||
};
|
||||
@ -151,6 +252,47 @@ pub fn draw_top_processes(
|
||||
.column_spacing(1);
|
||||
f.render_widget(table, content);
|
||||
|
||||
// Draw tooltip if a process is selected
|
||||
if let Some(selected_pid) = params.selected_process_pid {
|
||||
// Find the selected process to get its name
|
||||
let process_info = if let Some(metrics) = params.metrics {
|
||||
metrics
|
||||
.top_processes
|
||||
.iter()
|
||||
.find(|p| p.pid == selected_pid)
|
||||
.map(|p| format!("PID {} • {}", p.pid, p.name))
|
||||
.unwrap_or_else(|| format!("PID {selected_pid}"))
|
||||
} else {
|
||||
format!("PID {selected_pid}")
|
||||
};
|
||||
|
||||
let tooltip_text = format!("{process_info} | Enter for details • X to unselect");
|
||||
let tooltip_width = tooltip_text.len() as u16 + 2; // Add padding
|
||||
let tooltip_height = 3;
|
||||
|
||||
// Position tooltip at bottom-right of the processes area
|
||||
if area.width > tooltip_width + 2 && area.height > tooltip_height + 1 {
|
||||
let tooltip_area = Rect {
|
||||
x: area.x + area.width.saturating_sub(tooltip_width + 1),
|
||||
y: area.y + area.height.saturating_sub(tooltip_height + 1),
|
||||
width: tooltip_width,
|
||||
height: tooltip_height,
|
||||
};
|
||||
|
||||
let tooltip_block = Block::default().borders(Borders::ALL).style(
|
||||
Style::default()
|
||||
.bg(PROCESS_TOOLTIP_BG)
|
||||
.fg(PROCESS_TOOLTIP_FG),
|
||||
);
|
||||
|
||||
let tooltip_paragraph = Paragraph::new(tooltip_text)
|
||||
.block(tooltip_block)
|
||||
.wrap(ratatui::widgets::Wrap { trim: true });
|
||||
|
||||
f.render_widget(tooltip_paragraph, tooltip_area);
|
||||
}
|
||||
}
|
||||
|
||||
// Draw scrollbar like CPU pane
|
||||
let scroll_area = Rect {
|
||||
x: inner.x + inner.width.saturating_sub(1),
|
||||
@ -191,6 +333,18 @@ fn fmt_cpu_pct(v: f32) -> String {
|
||||
}
|
||||
|
||||
/// Handle keyboard scrolling (Up/Down/PageUp/PageDown/Home/End)
|
||||
/// Parameters for process key event handling
|
||||
pub struct ProcessKeyParams<'a> {
|
||||
pub selected_process_pid: &'a mut Option<u32>,
|
||||
pub selected_process_index: &'a mut Option<usize>,
|
||||
pub key: crossterm::event::KeyEvent,
|
||||
pub metrics: Option<&'a Metrics>,
|
||||
pub sort_by: ProcSortBy,
|
||||
pub search_query: &'a str,
|
||||
}
|
||||
|
||||
/// LEGACY: Use processes_handle_key_with_selection for enhanced functionality
|
||||
#[allow(dead_code)]
|
||||
pub fn processes_handle_key(
|
||||
scroll_offset: &mut usize,
|
||||
key: crossterm::event::KeyEvent,
|
||||
@ -199,8 +353,105 @@ pub fn processes_handle_key(
|
||||
crate::ui::cpu::per_core_handle_key(scroll_offset, key, page_size);
|
||||
}
|
||||
|
||||
pub fn processes_handle_key_with_selection(params: ProcessKeyParams) -> bool {
|
||||
use crossterm::event::KeyCode;
|
||||
|
||||
match params.key.code {
|
||||
KeyCode::Up => {
|
||||
// Navigate through filtered and sorted results
|
||||
if let Some(m) = params.metrics {
|
||||
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
|
||||
|
||||
if idxs.is_empty() {
|
||||
// No filtered results, clear selection
|
||||
*params.selected_process_index = None;
|
||||
*params.selected_process_pid = None;
|
||||
} else if params.selected_process_index.is_none()
|
||||
|| params.selected_process_pid.is_none()
|
||||
{
|
||||
// No selection - select the first process in filtered/sorted order
|
||||
let first_idx = idxs[0];
|
||||
*params.selected_process_index = Some(first_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||
} else if let Some(current_idx) = *params.selected_process_index {
|
||||
// Find current position in filtered/sorted list
|
||||
if let Some(pos) = idxs.iter().position(|&idx| idx == current_idx) {
|
||||
if pos > 0 {
|
||||
// Move up in filtered/sorted list
|
||||
let new_idx = idxs[pos - 1];
|
||||
*params.selected_process_index = Some(new_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[new_idx].pid);
|
||||
}
|
||||
} else {
|
||||
// Current selection not in filtered list, select first result
|
||||
let first_idx = idxs[0];
|
||||
*params.selected_process_index = Some(first_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
true // Handled
|
||||
}
|
||||
KeyCode::Down => {
|
||||
// Navigate through filtered and sorted results
|
||||
if let Some(m) = params.metrics {
|
||||
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
|
||||
|
||||
if idxs.is_empty() {
|
||||
// No filtered results, clear selection
|
||||
*params.selected_process_index = None;
|
||||
*params.selected_process_pid = None;
|
||||
} else if params.selected_process_index.is_none()
|
||||
|| params.selected_process_pid.is_none()
|
||||
{
|
||||
// No selection - select the first process in filtered/sorted order
|
||||
let first_idx = idxs[0];
|
||||
*params.selected_process_index = Some(first_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||
} else if let Some(current_idx) = *params.selected_process_index {
|
||||
// Find current position in filtered/sorted list
|
||||
if let Some(pos) = idxs.iter().position(|&idx| idx == current_idx) {
|
||||
if pos + 1 < idxs.len() {
|
||||
// Move down in filtered/sorted list
|
||||
let new_idx = idxs[pos + 1];
|
||||
*params.selected_process_index = Some(new_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[new_idx].pid);
|
||||
}
|
||||
} else {
|
||||
// Current selection not in filtered list, select first result
|
||||
let first_idx = idxs[0];
|
||||
*params.selected_process_index = Some(first_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
true // Handled
|
||||
}
|
||||
KeyCode::Char('x') | KeyCode::Char('X') => {
|
||||
// Unselect any selected process
|
||||
if params.selected_process_pid.is_some() || params.selected_process_index.is_some() {
|
||||
*params.selected_process_pid = None;
|
||||
*params.selected_process_index = None;
|
||||
true // Handled
|
||||
} else {
|
||||
false // No selection to clear
|
||||
}
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
// Signal that Enter was pressed with a selection
|
||||
params.selected_process_pid.is_some() // Return true if we have a selection to handle
|
||||
}
|
||||
_ => {
|
||||
// No other keys handled - let scrollbar handle all navigation
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle mouse for content scrolling and scrollbar dragging.
|
||||
/// Returns Some(new_sort) if the header "CPU %" or "Mem" was clicked.
|
||||
/// LEGACY: Use processes_handle_mouse_with_selection for enhanced functionality
|
||||
#[allow(dead_code)]
|
||||
pub fn processes_handle_mouse(
|
||||
scroll_offset: &mut usize,
|
||||
drag: &mut Option<crate::ui::cpu::PerCoreScrollDrag>,
|
||||
@ -264,3 +515,124 @@ pub fn processes_handle_mouse(
|
||||
);
|
||||
None
|
||||
}
|
||||
|
||||
/// Parameters for process mouse event handling
|
||||
pub struct ProcessMouseParams<'a> {
|
||||
pub scroll_offset: &'a mut usize,
|
||||
pub selected_process_pid: &'a mut Option<u32>,
|
||||
pub selected_process_index: &'a mut Option<usize>,
|
||||
pub drag: &'a mut Option<crate::ui::cpu::PerCoreScrollDrag>,
|
||||
pub mouse: MouseEvent,
|
||||
pub area: Rect,
|
||||
pub total_rows: usize,
|
||||
pub metrics: Option<&'a Metrics>,
|
||||
pub sort_by: ProcSortBy,
|
||||
pub search_query: &'a str,
|
||||
}
|
||||
|
||||
/// Enhanced mouse handler that also manages process selection
|
||||
/// Returns Some(new_sort) if the header was clicked, or handles row selection
|
||||
pub fn processes_handle_mouse_with_selection(params: ProcessMouseParams) -> Option<ProcSortBy> {
|
||||
// Inner and content areas (match draw_top_processes)
|
||||
let inner = Rect {
|
||||
x: params.area.x + 1,
|
||||
y: params.area.y + 1,
|
||||
width: params.area.width.saturating_sub(2),
|
||||
height: params.area.height.saturating_sub(2),
|
||||
};
|
||||
if inner.height == 0 || inner.width <= 2 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Calculate content area - must match draw_top_processes exactly!
|
||||
// If search is active or query exists, content starts after search box (3 lines)
|
||||
let search_active = !params.search_query.is_empty();
|
||||
let content_start_y = if search_active { inner.y + 3 } else { inner.y };
|
||||
|
||||
let content = Rect {
|
||||
x: inner.x,
|
||||
y: content_start_y,
|
||||
width: inner.width.saturating_sub(2),
|
||||
height: inner
|
||||
.height
|
||||
.saturating_sub(if search_active { 3 } else { 0 }),
|
||||
};
|
||||
|
||||
// Scrollbar interactions (click arrows/page/drag)
|
||||
per_core_handle_scrollbar_mouse(
|
||||
params.scroll_offset,
|
||||
params.drag,
|
||||
params.mouse,
|
||||
params.area,
|
||||
params.total_rows,
|
||||
);
|
||||
|
||||
// Wheel scrolling when inside the content
|
||||
crate::ui::cpu::per_core_handle_mouse(
|
||||
params.scroll_offset,
|
||||
params.mouse,
|
||||
content,
|
||||
content.height as usize,
|
||||
);
|
||||
|
||||
// Header click to change sort
|
||||
let header_area = Rect {
|
||||
x: content.x,
|
||||
y: content.y,
|
||||
width: content.width,
|
||||
height: 1,
|
||||
};
|
||||
let inside_header = params.mouse.row == header_area.y
|
||||
&& params.mouse.column >= header_area.x
|
||||
&& params.mouse.column < header_area.x + header_area.width;
|
||||
|
||||
if inside_header && matches!(params.mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
|
||||
// Split header into the same columns
|
||||
let cols = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints(COLS.to_vec())
|
||||
.split(header_area);
|
||||
if params.mouse.column >= cols[2].x && params.mouse.column < cols[2].x + cols[2].width {
|
||||
return Some(ProcSortBy::CpuDesc);
|
||||
}
|
||||
if params.mouse.column >= cols[3].x && params.mouse.column < cols[3].x + cols[3].width {
|
||||
return Some(ProcSortBy::MemDesc);
|
||||
}
|
||||
}
|
||||
|
||||
// Row click for process selection
|
||||
let data_start_row = content.y + 1; // Skip header
|
||||
let data_area_height = content.height.saturating_sub(1); // Exclude header
|
||||
|
||||
if matches!(params.mouse.kind, MouseEventKind::Down(MouseButton::Left))
|
||||
&& params.mouse.row >= data_start_row
|
||||
&& params.mouse.row < data_start_row + data_area_height
|
||||
&& params.mouse.column >= content.x
|
||||
&& params.mouse.column < content.x + content.width
|
||||
{
|
||||
let clicked_row = (params.mouse.row - data_start_row) as usize;
|
||||
|
||||
// Find the actual process using the same filtering/sorting logic as the drawing code
|
||||
if let Some(m) = params.metrics {
|
||||
// Use the same filtered and sorted indices as display
|
||||
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
|
||||
|
||||
// Calculate which process was actually clicked based on filtered/sorted order
|
||||
let visible_process_position = *params.scroll_offset + clicked_row;
|
||||
if visible_process_position < idxs.len() {
|
||||
let actual_process_index = idxs[visible_process_position];
|
||||
let clicked_process = &m.top_processes[actual_process_index];
|
||||
*params.selected_process_pid = Some(clicked_process.pid);
|
||||
*params.selected_process_index = Some(actual_process_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clamp to valid range
|
||||
per_core_clamp(
|
||||
params.scroll_offset,
|
||||
params.total_rows,
|
||||
(content.height.saturating_sub(1)) as usize,
|
||||
);
|
||||
None
|
||||
}
|
||||
|
||||
@ -6,3 +6,83 @@ use ratatui::style::Color;
|
||||
pub const SB_ARROW: Color = Color::Rgb(170, 170, 180);
|
||||
pub const SB_TRACK: Color = Color::Rgb(170, 170, 180);
|
||||
pub const SB_THUMB: Color = Color::Rgb(170, 170, 180);
|
||||
|
||||
// Modal palette
|
||||
pub const MODAL_DIM_BG: Color = Color::Rgb(15, 15, 25);
|
||||
pub const MODAL_BG: Color = Color::Rgb(26, 26, 46);
|
||||
pub const MODAL_FG: Color = Color::Rgb(230, 230, 230);
|
||||
pub const MODAL_TITLE_FG: Color = Color::Rgb(255, 102, 102); // soft red for title text
|
||||
pub const MODAL_BORDER_FG: Color = Color::Rgb(204, 51, 51); // darker red border
|
||||
|
||||
pub const MODAL_ICON_PINK: Color = Color::Rgb(255, 182, 193); // light pink icons line
|
||||
pub const MODAL_AGENT_FG: Color = Color::Rgb(220, 220, 255); // pale periwinkle
|
||||
pub const MODAL_HINT_FG: Color = Color::Rgb(255, 215, 0); // gold for message icon
|
||||
pub const MODAL_OFFLINE_LABEL_FG: Color = Color::Rgb(135, 206, 235); // sky blue label
|
||||
pub const MODAL_RETRY_LABEL_FG: Color = Color::Rgb(255, 165, 0); // orange label
|
||||
pub const MODAL_COUNTDOWN_LABEL_FG: Color = Color::Rgb(255, 192, 203); // pink label for countdown
|
||||
|
||||
// Buttons
|
||||
pub const BTN_RETRY_BG_ACTIVE: Color = Color::Rgb(46, 204, 113); // modern green
|
||||
pub const BTN_RETRY_FG_ACTIVE: Color = Color::Rgb(26, 26, 46);
|
||||
pub const BTN_RETRY_FG_INACTIVE: Color = Color::Rgb(46, 204, 113);
|
||||
|
||||
pub const BTN_EXIT_BG_ACTIVE: Color = Color::Rgb(255, 255, 255); // modern red
|
||||
pub const BTN_EXIT_FG_ACTIVE: Color = Color::Rgb(26, 26, 46);
|
||||
pub const BTN_EXIT_FG_INACTIVE: Color = Color::Rgb(255, 255, 255);
|
||||
|
||||
// Process selection colors
|
||||
pub const PROCESS_SELECTION_BG: Color = Color::Rgb(147, 112, 219); // Medium slate blue (purple)
|
||||
pub const PROCESS_SELECTION_FG: Color = Color::Rgb(255, 255, 255); // White text for contrast
|
||||
pub const PROCESS_TOOLTIP_BG: Color = Color::Rgb(147, 112, 219); // Same purple as selection
|
||||
pub const PROCESS_TOOLTIP_FG: Color = Color::Rgb(255, 255, 255); // White text for contrast
|
||||
|
||||
// Process details modal colors (matches main UI aesthetic - no custom colors, terminal defaults)
|
||||
pub const PROCESS_DETAILS_ACCENT: Color = Color::Rgb(147, 112, 219); // Purple accent for highlights
|
||||
|
||||
// Emoji / icon strings (centralized so they can be themed/swapped later)
|
||||
pub const ICON_WARNING_TITLE: &str = " 🔌 CONNECTION ERROR ";
|
||||
pub const ICON_CLUSTER: &str = "⚠️";
|
||||
pub const ICON_MESSAGE: &str = "💭 ";
|
||||
pub const ICON_OFFLINE_LABEL: &str = "⏱️ Offline for: ";
|
||||
pub const ICON_RETRY_LABEL: &str = "🔄 Retry attempts: ";
|
||||
pub const ICON_COUNTDOWN_LABEL: &str = "⏰ Next auto retry: ";
|
||||
pub const BTN_RETRY_TEXT: &str = " 🔄 Retry ";
|
||||
pub const BTN_EXIT_TEXT: &str = " ❌ Exit ";
|
||||
|
||||
// warning icon
|
||||
pub const LARGE_ERROR_ICON: &[&str] = &[
|
||||
" /\\ ",
|
||||
" / \\ ",
|
||||
" / !! \\ ",
|
||||
" / !!!! \\ ",
|
||||
" / !! \\ ",
|
||||
" / !!!! \\ ",
|
||||
" / !! \\ ",
|
||||
" /______________\\ ",
|
||||
];
|
||||
|
||||
//about logo
|
||||
pub const ASCII_ART: &str = r#"
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣠⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⣀⣤⣶⣾⠿⠿⠛⠃⠀⠀⠀⠀⠀⣀⣀⣠⡄⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠘⠛⢉⣠⣴⣾⣿⠿⠆⢰⣾⡿⠿⠛⠛⠋⠁⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⠟⠋⣁⣤⣤⣶⠀⣠⣤⣶⣾⣿⣿⡿⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣶⣿⣿⣿⣿⣿⡆⠘⠛⢉⣁⣤⣤⣤⡀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⡀⢾⣿⣿⣿⣿⣿⡇⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣿⣿⣧⠈⢿⣿⣿⣿⣿⣷⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⣧⠈⢿⣿⣿⣿⣿⡄⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⠿⠋⣁⠀⢿⣿⣿⣿⣷⡀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣴⣿⣿⣿⣿⡟⢁⣴⣿⣿⡇⢸⣿⣿⡿⠟⠃⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⢀⣠⣴⣿⣿⣿⣿⣿⣿⡟⢀⣿⣿⣿⡟⢀⣾⠟⢁⣤⣶⣿⠀⠀
|
||||
⠀⠀⠀⠀⣠⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠸⡿⠟⢋⣠⣾⠃⣰⣿⣿⣿⡟⠀⠀
|
||||
⠀⠀⣴⣄⠙⣿⣿⣿⣿⣿⡿⠿⠛⠋⣉⣁⣤⣴⣶⣿⣿⣿⠀⣿⡿⠟⠋⠀⠀⠀
|
||||
⠀⠀⣿⣿⡆⠹⠟⠋⣁⣤⡄⢰⣿⠿⠟⠛⠋⠉⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||
⠀⠀⠈⠉⠁⠀⠀⠀⠙⠛⠃⠈⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||
|
||||
███████╗ ██████╗ ██████╗████████╗ ██████╗ ██████╗
|
||||
██╔════╝██╔═══██╗██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗
|
||||
███████╗██║ ██║██║ ██║ ██║ ██║██████╔╝
|
||||
╚════██║██║ ██║██║ ██║ ██║ ██║██╔═══╝
|
||||
███████║╚██████╔╝╚██████╗ ██║ ╚██████╔╝██║
|
||||
╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═╝
|
||||
"#;
|
||||
|
||||
46
socktop/tests/modal_tests.rs
Normal file
46
socktop/tests/modal_tests.rs
Normal file
@ -0,0 +1,46 @@
|
||||
//! Tests for modal formatting and duration helper.
|
||||
use std::time::Duration;
|
||||
|
||||
// Bring the format_duration function into scope by duplicating logic (private in module). If desired,
|
||||
// this could be moved to a shared util module; for now we re-assert expected behavior.
|
||||
fn format_duration_ref(duration: Duration) -> String {
|
||||
let total_secs = duration.as_secs();
|
||||
let hours = total_secs / 3600;
|
||||
let minutes = (total_secs % 3600) / 60;
|
||||
let seconds = total_secs % 60;
|
||||
if hours > 0 {
|
||||
format!("{hours}h {minutes}m {seconds}s")
|
||||
} else if minutes > 0 {
|
||||
format!("{minutes}m {seconds}s")
|
||||
} else {
|
||||
format!("{seconds}s")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_duration_boundaries() {
|
||||
assert_eq!(format_duration_ref(Duration::from_secs(0)), "0s");
|
||||
assert_eq!(format_duration_ref(Duration::from_secs(59)), "59s");
|
||||
assert_eq!(format_duration_ref(Duration::from_secs(60)), "1m 0s");
|
||||
assert_eq!(format_duration_ref(Duration::from_secs(61)), "1m 1s");
|
||||
assert_eq!(format_duration_ref(Duration::from_secs(3600)), "1h 0m 0s");
|
||||
assert_eq!(format_duration_ref(Duration::from_secs(3661)), "1h 1m 1s");
|
||||
}
|
||||
|
||||
// Basic test to ensure auto-retry countdown semantics are consistent for initial state.
|
||||
#[test]
|
||||
fn test_auto_retry_initial_none() {
|
||||
// We can't construct App directly without pulling in whole UI; just assert logic mimic.
|
||||
// For a more thorough test, refactor countdown logic into a pure function.
|
||||
// This placeholder asserts desired initial semantics: when no disconnect/original time, countdown should be None.
|
||||
// (When integrated, consider exposing a pure helper returning Option<u64>.)
|
||||
let modal_active = false; // requirement: must be active for countdown
|
||||
let disconnected_state = true; // assume disconnected state
|
||||
let countdown = if disconnected_state && modal_active {
|
||||
// would compute target
|
||||
Some(0)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
assert!(countdown.is_none());
|
||||
}
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "socktop_agent"
|
||||
version = "1.40.70"
|
||||
version = "1.50.2"
|
||||
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
||||
description = "Socktop agent daemon. Serves host metrics over WebSocket."
|
||||
edition = "2024"
|
||||
@ -8,31 +8,39 @@ license = "MIT"
|
||||
readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
# Tokio: Use minimal features instead of "full" to reduce binary size
|
||||
# Only include: rt-multi-thread (async runtime), net (WebSocket), sync (Mutex/RwLock), macros (#[tokio::test])
|
||||
# Excluded: io, fs, process, signal, time (not needed for this workload)
|
||||
# Savings: ~200-300KB binary size, faster compile times
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros"] }
|
||||
axum = { version = "0.7", features = ["ws", "macros"] }
|
||||
sysinfo = { version = "0.37", features = ["network", "disk", "component"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
|
||||
futures-util = "0.3.31"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
# nvml-wrapper removed (unused; GPU metrics via gfxinfo only now)
|
||||
tracing = { version = "0.1", optional = true }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = true }
|
||||
gfxinfo = "0.1.2"
|
||||
once_cell = "1.19"
|
||||
axum-server = { version = "0.6", features = ["tls-rustls"] }
|
||||
rustls = "0.23"
|
||||
axum-server = { version = "0.7", features = ["tls-rustls"] }
|
||||
rustls = { version = "0.23", features = ["aws-lc-rs"] }
|
||||
rustls-pemfile = "2.1"
|
||||
rcgen = "0.13" # pure-Rust self-signed cert generation (replaces openssl vendored build)
|
||||
rcgen = "0.13"
|
||||
anyhow = "1"
|
||||
hostname = "0.3"
|
||||
prost = { workspace = true }
|
||||
time = { version = "0.3", default-features = false, features = ["formatting", "macros", "parsing" ] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
logging = ["tracing", "tracing-subscriber"]
|
||||
|
||||
[build-dependencies]
|
||||
prost-build = "0.13"
|
||||
tonic-build = { version = "0.12", default-features = false, optional = true }
|
||||
protoc-bin-vendored = "3"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2.0"
|
||||
tempfile = "3.10"
|
||||
|
||||
95
socktop_agent/src/cache.rs
Normal file
95
socktop_agent/src/cache.rs
Normal file
@ -0,0 +1,95 @@
|
||||
//! Caching for process metrics and journal entries
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::types::{ProcessMetricsResponse, JournalResponse};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct CacheEntry<T> {
|
||||
data: T,
|
||||
cached_at: Instant,
|
||||
ttl: Duration,
|
||||
}
|
||||
|
||||
impl<T> CacheEntry<T> {
|
||||
fn is_expired(&self) -> bool {
|
||||
self.cached_at.elapsed() > self.ttl
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ProcessCache {
|
||||
process_metrics: RwLock<HashMap<u32, CacheEntry<ProcessMetricsResponse>>>,
|
||||
journal_entries: RwLock<HashMap<u32, CacheEntry<JournalResponse>>>,
|
||||
}
|
||||
|
||||
impl ProcessCache {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
process_metrics: RwLock::new(HashMap::new()),
|
||||
journal_entries: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get cached process metrics if available and not expired (250ms TTL)
|
||||
pub async fn get_process_metrics(&self, pid: u32) -> Option<ProcessMetricsResponse> {
|
||||
let cache = self.process_metrics.read().await;
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
if !entry.is_expired() {
|
||||
return Some(entry.data.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Cache process metrics with 250ms TTL
|
||||
pub async fn set_process_metrics(&self, pid: u32, data: ProcessMetricsResponse) {
|
||||
let mut cache = self.process_metrics.write().await;
|
||||
cache.insert(pid, CacheEntry {
|
||||
data,
|
||||
cached_at: Instant::now(),
|
||||
ttl: Duration::from_millis(250),
|
||||
});
|
||||
}
|
||||
|
||||
/// Get cached journal entries if available and not expired (1s TTL)
|
||||
pub async fn get_journal_entries(&self, pid: u32) -> Option<JournalResponse> {
|
||||
let cache = self.journal_entries.read().await;
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
if !entry.is_expired() {
|
||||
return Some(entry.data.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Cache journal entries with 1s TTL
|
||||
pub async fn set_journal_entries(&self, pid: u32, data: JournalResponse) {
|
||||
let mut cache = self.journal_entries.write().await;
|
||||
cache.insert(pid, CacheEntry {
|
||||
data,
|
||||
cached_at: Instant::now(),
|
||||
ttl: Duration::from_secs(1),
|
||||
});
|
||||
}
|
||||
|
||||
/// Clean up expired entries periodically
|
||||
pub async fn cleanup_expired(&self) {
|
||||
{
|
||||
let mut cache = self.process_metrics.write().await;
|
||||
cache.retain(|_, entry| !entry.is_expired());
|
||||
}
|
||||
{
|
||||
let mut cache = self.journal_entries.write().await;
|
||||
cache.retain(|_, entry| !entry.is_expired());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProcessCache {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
17
socktop_agent/src/lib.rs
Normal file
17
socktop_agent/src/lib.rs
Normal file
@ -0,0 +1,17 @@
|
||||
//! Library interface for socktop_agent functionality
|
||||
//! This allows testing of agent functions.
|
||||
|
||||
pub mod gpu;
|
||||
pub mod metrics;
|
||||
pub mod proto;
|
||||
pub mod state;
|
||||
pub mod tls;
|
||||
pub mod types;
|
||||
pub mod ws;
|
||||
|
||||
// Re-export commonly used types and functions for testing
|
||||
pub use metrics::{collect_journal_entries, collect_process_metrics};
|
||||
pub use state::{AppState, CacheEntry};
|
||||
pub use types::{
|
||||
DetailedProcessInfo, JournalEntry, JournalResponse, LogLevel, ProcessMetricsResponse,
|
||||
};
|
||||
@ -29,10 +29,53 @@ fn arg_value(name: &str) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
fn main() -> anyhow::Result<()> {
|
||||
// Install rustls crypto provider before any TLS operations
|
||||
// This is required when using axum-server's tls-rustls feature
|
||||
rustls::crypto::aws_lc_rs::default_provider()
|
||||
.install_default()
|
||||
.ok(); // Ignore error if already installed
|
||||
|
||||
#[cfg(feature = "logging")]
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
// Configure Tokio runtime with optimized thread pool for reduced overhead.
|
||||
//
|
||||
// The agent is primarily I/O-bound (WebSocket, /proc file reads, sysinfo)
|
||||
// with no CPU-intensive or blocking operations, so a smaller thread pool
|
||||
// is beneficial:
|
||||
//
|
||||
// Benefits:
|
||||
// - Lower memory footprint (~1-2MB per thread saved)
|
||||
// - Reduced context switching overhead
|
||||
// - Fewer idle threads consuming resources
|
||||
// - Better for resource-constrained systems
|
||||
//
|
||||
// Trade-offs:
|
||||
// - Slightly reduced throughput under very high concurrent connections
|
||||
// - Could introduce latency if blocking operations are added (don't do this!)
|
||||
//
|
||||
// Default: 2 threads (sufficient for typical workloads with 1-10 clients)
|
||||
// Override: Set SOCKTOP_WORKER_THREADS=4 to use more threads if needed
|
||||
//
|
||||
// Note: Default Tokio uses num_cpus threads which is excessive for this workload.
|
||||
|
||||
let worker_threads = std::env::var("SOCKTOP_WORKER_THREADS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse::<usize>().ok())
|
||||
.unwrap_or(2)
|
||||
.clamp(1, 16); // Ensure 1-16 threads
|
||||
|
||||
let runtime = tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(worker_threads)
|
||||
.thread_name("socktop-agent")
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
runtime.block_on(async_main())
|
||||
}
|
||||
|
||||
async fn async_main() -> anyhow::Result<()> {
|
||||
// Version flag (print and exit). Keep before heavy initialization.
|
||||
if arg_flag("--version") || arg_flag("-V") {
|
||||
println!("socktop_agent {}", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
@ -2,7 +2,10 @@
|
||||
|
||||
use crate::gpu::collect_all_gpus;
|
||||
use crate::state::AppState;
|
||||
use crate::types::{DiskInfo, Metrics, NetworkInfo, ProcessInfo, ProcessesPayload};
|
||||
use crate::types::{
|
||||
DetailedProcessInfo, DiskInfo, JournalEntry, JournalResponse, LogLevel, Metrics, NetworkInfo,
|
||||
ProcessInfo, ProcessMetricsResponse, ProcessesPayload,
|
||||
};
|
||||
use once_cell::sync::OnceCell;
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::collections::HashMap;
|
||||
@ -10,13 +13,56 @@ use std::collections::HashMap;
|
||||
use std::fs;
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::io;
|
||||
use std::process::Command;
|
||||
use std::sync::Mutex;
|
||||
use std::time::Duration as StdDuration;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
use sysinfo::{ProcessRefreshKind, ProcessesToUpdate};
|
||||
#[cfg(feature = "logging")]
|
||||
use tracing::warn;
|
||||
|
||||
// NOTE: CPU normalization env removed; non-Linux now always reports per-process share (0..100) as given by sysinfo.
|
||||
|
||||
// Helper functions to get CPU time from /proc/stat on Linux
|
||||
#[cfg(target_os = "linux")]
|
||||
fn get_cpu_time_user(pid: u32) -> u64 {
|
||||
if let Ok(stat) = fs::read_to_string(format!("/proc/{pid}/stat")) {
|
||||
let fields: Vec<&str> = stat.split_whitespace().collect();
|
||||
if fields.len() > 13 {
|
||||
// Field 13 (0-indexed) is utime (user CPU time in clock ticks)
|
||||
if let Ok(utime) = fields[13].parse::<u64>() {
|
||||
// Convert clock ticks to milliseconds (assuming 100 Hz)
|
||||
return utime * 10; // 1 tick = 10ms at 100 Hz
|
||||
}
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn get_cpu_time_system(pid: u32) -> u64 {
|
||||
if let Ok(stat) = fs::read_to_string(format!("/proc/{pid}/stat")) {
|
||||
let fields: Vec<&str> = stat.split_whitespace().collect();
|
||||
if fields.len() > 14 {
|
||||
// Field 14 (0-indexed) is stime (system CPU time in clock ticks)
|
||||
if let Ok(stime) = fields[14].parse::<u64>() {
|
||||
// Convert clock ticks to milliseconds (assuming 100 Hz)
|
||||
return stime * 10; // 1 tick = 10ms at 100 Hz
|
||||
}
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn get_cpu_time_user(_pid: u32) -> u64 {
|
||||
0 // Not implemented for non-Linux platforms
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn get_cpu_time_system(_pid: u32) -> u64 {
|
||||
0 // Not implemented for non-Linux platforms
|
||||
}
|
||||
// Runtime toggles (read once)
|
||||
fn gpu_enabled() -> bool {
|
||||
static ON: OnceCell<bool> = OnceCell::new();
|
||||
@ -123,11 +169,12 @@ pub async fn collect_fast_metrics(state: &AppState) -> Metrics {
|
||||
}
|
||||
}
|
||||
let mut sys = state.sys.lock().await;
|
||||
if let Err(e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
||||
if let Err(_e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
||||
sys.refresh_cpu_usage();
|
||||
sys.refresh_memory();
|
||||
})) {
|
||||
warn!("sysinfo selective refresh panicked: {e:?}");
|
||||
#[cfg(feature = "logging")]
|
||||
warn!("sysinfo selective refresh panicked: {_e:?}");
|
||||
}
|
||||
|
||||
// Get or initialize hostname once
|
||||
@ -221,8 +268,9 @@ pub async fn collect_fast_metrics(state: &AppState) -> Metrics {
|
||||
let v = match collect_all_gpus() {
|
||||
Ok(v) if !v.is_empty() => Some(v),
|
||||
Ok(_) => None,
|
||||
Err(e) => {
|
||||
warn!("gpu collection failed: {e}");
|
||||
Err(_e) => {
|
||||
#[cfg(feature = "logging")]
|
||||
warn!("gpu collection failed: {_e}");
|
||||
None
|
||||
}
|
||||
};
|
||||
@ -286,14 +334,199 @@ pub async fn collect_disks(state: &AppState) -> Vec<DiskInfo> {
|
||||
}
|
||||
let mut disks_list = state.disks.lock().await;
|
||||
disks_list.refresh(false); // don't drop missing disks
|
||||
let disks: Vec<DiskInfo> = disks_list
|
||||
|
||||
// Collect disk temperatures from components
|
||||
// NVMe temps show up as "Composite" under different chip names
|
||||
let disk_temps = {
|
||||
let mut components = state.components.lock().await;
|
||||
components.refresh(true); // true = refresh values, not just the list
|
||||
|
||||
let mut composite_temps = Vec::new();
|
||||
|
||||
for c in components.iter() {
|
||||
let label = c.label().to_ascii_lowercase();
|
||||
|
||||
// Collect all "Composite" temperatures (these are NVMe drives)
|
||||
// Labels are like "nvme Composite CT1000N7BSS503" or "nvme Composite Sabrent Rocket 4.0"
|
||||
if label.contains("composite")
|
||||
&& let Some(temp) = c.temperature()
|
||||
{
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("Found Composite temp: {}°C", temp);
|
||||
composite_temps.push(temp);
|
||||
}
|
||||
}
|
||||
|
||||
// Store composite temps indexed by their order (nvme0n1, nvme1n1, nvme2n1, etc.)
|
||||
let mut temps = std::collections::HashMap::new();
|
||||
for (idx, temp) in composite_temps.iter().enumerate() {
|
||||
let key = format!("nvme{}n1", idx);
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("Mapping {} -> {}°C", key, temp);
|
||||
temps.insert(key, *temp);
|
||||
}
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("Final disk_temps map: {:?}", temps);
|
||||
temps
|
||||
};
|
||||
|
||||
// First collect all partitions from sysinfo, deduplicating by device name
|
||||
// (same partition can be mounted at multiple mount points)
|
||||
let mut seen_partitions = std::collections::HashSet::new();
|
||||
let partitions: Vec<DiskInfo> = disks_list
|
||||
.iter()
|
||||
.map(|d| DiskInfo {
|
||||
name: d.name().to_string_lossy().into_owned(),
|
||||
total: d.total_space(),
|
||||
available: d.available_space(),
|
||||
.filter_map(|d| {
|
||||
let name = d.name().to_string_lossy().into_owned();
|
||||
|
||||
// Skip if we've already seen this partition/device
|
||||
if !seen_partitions.insert(name.clone()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Determine if this is a partition
|
||||
let is_partition = name.contains("p1")
|
||||
|| name.contains("p2")
|
||||
|| name.contains("p3")
|
||||
|| name.ends_with('1')
|
||||
|| name.ends_with('2')
|
||||
|| name.ends_with('3')
|
||||
|| name.ends_with('4')
|
||||
|| name.ends_with('5')
|
||||
|| name.ends_with('6')
|
||||
|| name.ends_with('7')
|
||||
|| name.ends_with('8')
|
||||
|| name.ends_with('9');
|
||||
|
||||
// Try to find temperature for this disk
|
||||
let temperature = disk_temps.iter().find_map(|(key, &temp)| {
|
||||
if name.starts_with(key) {
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("Matched {} with key {} -> {}°C", name, key, temp);
|
||||
Some(temp)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
if temperature.is_none() && !name.starts_with("loop") && !name.starts_with("ram") {
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("No temperature found for disk: {}", name);
|
||||
}
|
||||
|
||||
Some(DiskInfo {
|
||||
name,
|
||||
total: d.total_space(),
|
||||
available: d.available_space(),
|
||||
temperature,
|
||||
is_partition,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Now create parent disk entries by aggregating partition data
|
||||
let mut parent_disks: std::collections::HashMap<String, (u64, u64, Option<f32>)> =
|
||||
std::collections::HashMap::new();
|
||||
|
||||
for partition in &partitions {
|
||||
if partition.is_partition {
|
||||
// Extract parent disk name
|
||||
// nvme0n1p1 -> nvme0n1, sda1 -> sda, mmcblk0p1 -> mmcblk0
|
||||
let parent_name = if let Some(pos) = partition.name.rfind('p') {
|
||||
// Check if character after 'p' is a digit
|
||||
if partition
|
||||
.name
|
||||
.chars()
|
||||
.nth(pos + 1)
|
||||
.is_some_and(|c| c.is_ascii_digit())
|
||||
{
|
||||
&partition.name[..pos]
|
||||
} else {
|
||||
// Handle sda1, sdb2, etc (just trim trailing digit)
|
||||
partition.name.trim_end_matches(char::is_numeric)
|
||||
}
|
||||
} else {
|
||||
// Handle sda1, sdb2, etc (just trim trailing digit)
|
||||
partition.name.trim_end_matches(char::is_numeric)
|
||||
};
|
||||
|
||||
// Look up temperature for the PARENT disk, not the partition
|
||||
// Strip /dev/ prefix if present for matching
|
||||
let parent_name_for_match = parent_name.strip_prefix("/dev/").unwrap_or(parent_name);
|
||||
let parent_temp = disk_temps.iter().find_map(|(key, &temp)| {
|
||||
if parent_name_for_match.starts_with(key) {
|
||||
Some(temp)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
// Aggregate partition stats into parent
|
||||
let entry = parent_disks
|
||||
.entry(parent_name.to_string())
|
||||
.or_insert((0, 0, parent_temp));
|
||||
entry.0 += partition.total;
|
||||
entry.1 += partition.available;
|
||||
// Keep temperature if any partition has it (or if we just found one)
|
||||
if entry.2.is_none() {
|
||||
entry.2 = parent_temp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create parent disk entries
|
||||
let mut disks: Vec<DiskInfo> = parent_disks
|
||||
.into_iter()
|
||||
.map(|(name, (total, available, temperature))| DiskInfo {
|
||||
name,
|
||||
total,
|
||||
available,
|
||||
temperature,
|
||||
is_partition: false,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Sort parent disks by name
|
||||
disks.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
// Add partitions after their parent disk
|
||||
for partition in partitions {
|
||||
if partition.is_partition {
|
||||
// Find parent disk index
|
||||
let parent_name = if let Some(pos) = partition.name.rfind('p') {
|
||||
if partition
|
||||
.name
|
||||
.chars()
|
||||
.nth(pos + 1)
|
||||
.is_some_and(|c| c.is_ascii_digit())
|
||||
{
|
||||
&partition.name[..pos]
|
||||
} else {
|
||||
partition.name.trim_end_matches(char::is_numeric)
|
||||
}
|
||||
} else {
|
||||
partition.name.trim_end_matches(char::is_numeric)
|
||||
};
|
||||
|
||||
// Find where to insert this partition (after its parent)
|
||||
if let Some(parent_idx) = disks.iter().position(|d| d.name == parent_name) {
|
||||
// Insert after parent and any existing partitions of that parent
|
||||
let mut insert_idx = parent_idx + 1;
|
||||
while insert_idx < disks.len()
|
||||
&& disks[insert_idx].is_partition
|
||||
&& disks[insert_idx].name.starts_with(parent_name)
|
||||
{
|
||||
insert_idx += 1;
|
||||
}
|
||||
disks.insert(insert_idx, partition);
|
||||
} else {
|
||||
// Parent not found (shouldn't happen), just add at end
|
||||
disks.push(partition);
|
||||
}
|
||||
} else {
|
||||
// Not a partition (e.g., zram0), add at end
|
||||
disks.push(partition);
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut cache = state.cache_disks.lock().await;
|
||||
cache.set(disks.clone());
|
||||
@ -527,6 +760,7 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
proc_cache
|
||||
.names
|
||||
.retain(|pid, _| sys.processes().contains_key(&sysinfo::Pid::from_u32(*pid)));
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!(
|
||||
"Cleaned up {} stale process names in {}ms",
|
||||
proc_cache.names.capacity() - proc_cache.names.len(),
|
||||
@ -549,3 +783,626 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
}
|
||||
payload
|
||||
}
|
||||
|
||||
/// Lightweight child process enumeration using direct /proc access
|
||||
/// This avoids the expensive refresh_processes_specifics(All) call
|
||||
#[cfg(target_os = "linux")]
|
||||
fn enumerate_child_processes_lightweight(
|
||||
parent_pid: u32,
|
||||
system: &sysinfo::System,
|
||||
) -> Vec<DetailedProcessInfo> {
|
||||
let mut children = Vec::new();
|
||||
|
||||
// Read /proc to find all child processes
|
||||
// This is much faster than refresh_processes_specifics(All)
|
||||
if let Ok(entries) = fs::read_dir("/proc") {
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(file_name) = entry.file_name().into_string()
|
||||
&& let Ok(pid) = file_name.parse::<u32>()
|
||||
&& let Some(child_parent_pid) = read_parent_pid_from_proc(pid)
|
||||
&& child_parent_pid == parent_pid
|
||||
&& let Some(child_info) = collect_process_info_from_proc(pid, system)
|
||||
{
|
||||
children.push(child_info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
children
|
||||
}
|
||||
|
||||
/// Read parent PID from /proc/{pid}/stat
|
||||
#[cfg(target_os = "linux")]
|
||||
fn read_parent_pid_from_proc(pid: u32) -> Option<u32> {
|
||||
let stat = fs::read_to_string(format!("/proc/{pid}/stat")).ok()?;
|
||||
// Format: pid (comm) state ppid ...
|
||||
// We need to handle process names with spaces/parentheses
|
||||
let ppid_start = stat.rfind(')')?;
|
||||
let fields: Vec<&str> = stat[ppid_start + 1..].split_whitespace().collect();
|
||||
// After the closing paren: state ppid ...
|
||||
// Field 1 (0-indexed) is ppid
|
||||
fields.get(1)?.parse::<u32>().ok()
|
||||
}
|
||||
|
||||
/// Collect process information from /proc files
|
||||
#[cfg(target_os = "linux")]
|
||||
fn collect_process_info_from_proc(
|
||||
pid: u32,
|
||||
system: &sysinfo::System,
|
||||
) -> Option<DetailedProcessInfo> {
|
||||
// Try to get basic info from sysinfo if it's already loaded (cheap lookup)
|
||||
// Otherwise read from /proc directly
|
||||
let (name, cpu_usage, mem_bytes, virtual_mem_bytes) =
|
||||
if let Some(proc) = system.process(sysinfo::Pid::from_u32(pid)) {
|
||||
(
|
||||
proc.name().to_string_lossy().to_string(),
|
||||
proc.cpu_usage(),
|
||||
proc.memory(),
|
||||
proc.virtual_memory(),
|
||||
)
|
||||
} else {
|
||||
// Process not in sysinfo cache, read minimal info from /proc
|
||||
let name = fs::read_to_string(format!("/proc/{pid}/comm"))
|
||||
.ok()?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
// Read memory from /proc/{pid}/status
|
||||
let status_content = fs::read_to_string(format!("/proc/{pid}/status")).ok()?;
|
||||
let mut mem_bytes = 0u64;
|
||||
let mut virtual_mem_bytes = 0u64;
|
||||
|
||||
for line in status_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("VmRSS:") {
|
||||
if let Some(kb) = value.split_whitespace().next() {
|
||||
mem_bytes = kb.parse::<u64>().unwrap_or(0) * 1024;
|
||||
}
|
||||
} else if let Some(value) = line.strip_prefix("VmSize:")
|
||||
&& let Some(kb) = value.split_whitespace().next()
|
||||
{
|
||||
virtual_mem_bytes = kb.parse::<u64>().unwrap_or(0) * 1024;
|
||||
}
|
||||
}
|
||||
|
||||
(name, 0.0, mem_bytes, virtual_mem_bytes)
|
||||
};
|
||||
|
||||
// Read command line
|
||||
let command = fs::read_to_string(format!("/proc/{pid}/cmdline"))
|
||||
.ok()
|
||||
.map(|s| s.replace('\0', " ").trim().to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
// Read status information
|
||||
let status_content = fs::read_to_string(format!("/proc/{pid}/status")).ok()?;
|
||||
let mut uid = 0u32;
|
||||
let mut gid = 0u32;
|
||||
let mut thread_count = 0u32;
|
||||
let mut status = "Unknown".to_string();
|
||||
|
||||
for line in status_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("Uid:") {
|
||||
if let Some(uid_str) = value.split_whitespace().next() {
|
||||
uid = uid_str.parse().unwrap_or(0);
|
||||
}
|
||||
} else if let Some(value) = line.strip_prefix("Gid:") {
|
||||
if let Some(gid_str) = value.split_whitespace().next() {
|
||||
gid = gid_str.parse().unwrap_or(0);
|
||||
}
|
||||
} else if let Some(value) = line.strip_prefix("Threads:") {
|
||||
thread_count = value.trim().parse().unwrap_or(0);
|
||||
} else if let Some(value) = line.strip_prefix("State:") {
|
||||
status = value
|
||||
.trim()
|
||||
.chars()
|
||||
.next()
|
||||
.map(|c| match c {
|
||||
'R' => "Running",
|
||||
'S' => "Sleeping",
|
||||
'D' => "Disk Sleep",
|
||||
'Z' => "Zombie",
|
||||
'T' => "Stopped",
|
||||
't' => "Tracing Stop",
|
||||
'X' | 'x' => "Dead",
|
||||
'K' => "Wakekill",
|
||||
'W' => "Waking",
|
||||
'P' => "Parked",
|
||||
'I' => "Idle",
|
||||
_ => "Unknown",
|
||||
})
|
||||
.unwrap_or("Unknown")
|
||||
.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Read start time from stat
|
||||
let start_time = if let Ok(stat) = fs::read_to_string(format!("/proc/{pid}/stat")) {
|
||||
let stat_end = stat.rfind(')')?;
|
||||
let fields: Vec<&str> = stat[stat_end + 1..].split_whitespace().collect();
|
||||
// Field 19 (0-indexed) is starttime in clock ticks since boot
|
||||
fields.get(19)?.parse::<u64>().ok()?
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// Read I/O stats if available
|
||||
let (read_bytes, write_bytes) =
|
||||
if let Ok(io_content) = fs::read_to_string(format!("/proc/{pid}/io")) {
|
||||
let mut read_bytes = None;
|
||||
let mut write_bytes = None;
|
||||
|
||||
for line in io_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("read_bytes:") {
|
||||
read_bytes = value.trim().parse().ok();
|
||||
} else if let Some(value) = line.strip_prefix("write_bytes:") {
|
||||
write_bytes = value.trim().parse().ok();
|
||||
}
|
||||
}
|
||||
|
||||
(read_bytes, write_bytes)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
// Read working directory
|
||||
let working_directory = fs::read_link(format!("/proc/{pid}/cwd"))
|
||||
.ok()
|
||||
.map(|p| p.to_string_lossy().to_string());
|
||||
|
||||
// Read executable path
|
||||
let executable_path = fs::read_link(format!("/proc/{pid}/exe"))
|
||||
.ok()
|
||||
.map(|p| p.to_string_lossy().to_string());
|
||||
|
||||
Some(DetailedProcessInfo {
|
||||
pid,
|
||||
name,
|
||||
command,
|
||||
cpu_usage,
|
||||
mem_bytes,
|
||||
virtual_mem_bytes,
|
||||
shared_mem_bytes: None, // Would need to parse /proc/{pid}/statm for this
|
||||
thread_count,
|
||||
fd_count: None, // Would need to count entries in /proc/{pid}/fd
|
||||
status,
|
||||
parent_pid: None, // We already know the parent
|
||||
user_id: uid,
|
||||
group_id: gid,
|
||||
start_time,
|
||||
cpu_time_user: get_cpu_time_user(pid),
|
||||
cpu_time_system: get_cpu_time_system(pid),
|
||||
read_bytes,
|
||||
write_bytes,
|
||||
working_directory,
|
||||
executable_path,
|
||||
child_processes: Vec::new(), // Don't recurse
|
||||
threads: Vec::new(), // Not collected for child processes
|
||||
})
|
||||
}
|
||||
|
||||
/// Fallback for non-Linux: use sysinfo (less efficient but functional)
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn enumerate_child_processes_lightweight(
|
||||
parent_pid: u32,
|
||||
system: &sysinfo::System,
|
||||
) -> Vec<DetailedProcessInfo> {
|
||||
let mut children = Vec::new();
|
||||
|
||||
// On non-Linux, we have to iterate through all processes in sysinfo
|
||||
// This is less efficient but maintains cross-platform compatibility
|
||||
for (child_pid, child_process) in system.processes() {
|
||||
if let Some(parent) = child_process.parent()
|
||||
&& parent.as_u32() == parent_pid
|
||||
{
|
||||
let child_info = DetailedProcessInfo {
|
||||
pid: child_pid.as_u32(),
|
||||
name: child_process.name().to_string_lossy().to_string(),
|
||||
command: child_process
|
||||
.cmd()
|
||||
.iter()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(" "),
|
||||
cpu_usage: child_process.cpu_usage(),
|
||||
mem_bytes: child_process.memory(),
|
||||
virtual_mem_bytes: child_process.virtual_memory(),
|
||||
shared_mem_bytes: None,
|
||||
thread_count: child_process
|
||||
.tasks()
|
||||
.map(|tasks| tasks.len() as u32)
|
||||
.unwrap_or(0),
|
||||
fd_count: None,
|
||||
status: format!("{:?}", child_process.status()),
|
||||
parent_pid: Some(parent_pid),
|
||||
// On non-Linux platforms, sysinfo UID/GID might not be accurate
|
||||
// Just use 0 as placeholder since we can't read /proc
|
||||
user_id: 0,
|
||||
group_id: 0,
|
||||
start_time: child_process.start_time(),
|
||||
cpu_time_user: 0, // Not available on non-Linux in our implementation
|
||||
cpu_time_system: 0,
|
||||
read_bytes: Some(child_process.disk_usage().read_bytes),
|
||||
write_bytes: Some(child_process.disk_usage().written_bytes),
|
||||
working_directory: child_process.cwd().map(|p| p.to_string_lossy().to_string()),
|
||||
executable_path: child_process.exe().map(|p| p.to_string_lossy().to_string()),
|
||||
child_processes: Vec::new(),
|
||||
threads: Vec::new(), // Not collected for non-Linux
|
||||
};
|
||||
children.push(child_info);
|
||||
}
|
||||
}
|
||||
|
||||
children
|
||||
}
|
||||
|
||||
/// Collect thread information for a specific process (Linux only)
|
||||
#[cfg(target_os = "linux")]
|
||||
fn collect_thread_info(pid: u32) -> Vec<crate::types::ThreadInfo> {
|
||||
let mut threads = Vec::new();
|
||||
|
||||
// Read /proc/{pid}/task directory
|
||||
let task_dir = format!("/proc/{pid}/task");
|
||||
let Ok(entries) = fs::read_dir(&task_dir) else {
|
||||
return threads;
|
||||
};
|
||||
|
||||
for entry in entries.flatten() {
|
||||
let file_name = entry.file_name();
|
||||
let tid_str = file_name.to_string_lossy();
|
||||
let Ok(tid) = tid_str.parse::<u32>() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// Read thread name from comm
|
||||
let name = fs::read_to_string(format!("/proc/{pid}/task/{tid}/comm"))
|
||||
.unwrap_or_else(|_| format!("Thread-{tid}"))
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
// Read thread stat for CPU times and status
|
||||
let stat_path = format!("/proc/{pid}/task/{tid}/stat");
|
||||
let Ok(stat_content) = fs::read_to_string(&stat_path) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// Parse stat file (similar format to process stat)
|
||||
// Fields: pid comm state ... utime stime ...
|
||||
let fields: Vec<&str> = stat_content.split_whitespace().collect();
|
||||
if fields.len() < 15 {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Field 2 is state (R, S, D, Z, T, etc.)
|
||||
let status = fields
|
||||
.get(2)
|
||||
.and_then(|s| s.chars().next())
|
||||
.map(|c| match c {
|
||||
'R' => "Running",
|
||||
'S' => "Sleeping",
|
||||
'D' => "Disk Sleep",
|
||||
'Z' => "Zombie",
|
||||
'T' => "Stopped",
|
||||
't' => "Tracing Stop",
|
||||
'X' | 'x' => "Dead",
|
||||
_ => "Unknown",
|
||||
})
|
||||
.unwrap_or("Unknown")
|
||||
.to_string();
|
||||
|
||||
// Field 13 is utime (user CPU time in clock ticks)
|
||||
// Field 14 is stime (system CPU time in clock ticks)
|
||||
let utime = fields
|
||||
.get(13)
|
||||
.and_then(|s| s.parse::<u64>().ok())
|
||||
.unwrap_or(0);
|
||||
let stime = fields
|
||||
.get(14)
|
||||
.and_then(|s| s.parse::<u64>().ok())
|
||||
.unwrap_or(0);
|
||||
|
||||
// Convert clock ticks to microseconds (assuming 100 Hz)
|
||||
// 1 tick = 10ms = 10,000 microseconds
|
||||
let cpu_time_user = utime * 10_000;
|
||||
let cpu_time_system = stime * 10_000;
|
||||
|
||||
threads.push(crate::types::ThreadInfo {
|
||||
tid,
|
||||
name,
|
||||
cpu_time_user,
|
||||
cpu_time_system,
|
||||
status,
|
||||
});
|
||||
}
|
||||
|
||||
threads
|
||||
}
|
||||
|
||||
/// Fallback for non-Linux: return empty thread list
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn collect_thread_info(_pid: u32) -> Vec<crate::types::ThreadInfo> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
/// Collect detailed metrics for a specific process
|
||||
pub async fn collect_process_metrics(
|
||||
pid: u32,
|
||||
state: &AppState,
|
||||
) -> Result<ProcessMetricsResponse, String> {
|
||||
let mut system = state.sys.lock().await;
|
||||
|
||||
// OPTIMIZED: Only refresh the specific process we care about
|
||||
// This avoids polluting the main process list with threads and prevents race conditions
|
||||
system.refresh_processes_specifics(
|
||||
ProcessesToUpdate::Some(&[sysinfo::Pid::from_u32(pid)]),
|
||||
false,
|
||||
ProcessRefreshKind::nothing()
|
||||
.with_memory()
|
||||
.with_cpu()
|
||||
.with_disk_usage(),
|
||||
);
|
||||
|
||||
let process = system
|
||||
.process(sysinfo::Pid::from_u32(pid))
|
||||
.ok_or_else(|| format!("Process {pid} not found"))?;
|
||||
|
||||
// Get current timestamp
|
||||
let cached_at = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map_err(|e| format!("Time error: {e}"))?
|
||||
.as_secs();
|
||||
|
||||
// Extract all needed data from process while we have the lock
|
||||
let name = process.name().to_string_lossy().to_string();
|
||||
let command = process
|
||||
.cmd()
|
||||
.iter()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
let cpu_usage = process.cpu_usage();
|
||||
let mem_bytes = process.memory();
|
||||
let virtual_mem_bytes = process.virtual_memory();
|
||||
let thread_count = process.tasks().map(|tasks| tasks.len() as u32).unwrap_or(0);
|
||||
let status = format!("{:?}", process.status());
|
||||
let parent_pid = process.parent().map(|p| p.as_u32());
|
||||
let start_time = process.start_time();
|
||||
|
||||
// Read UID and GID directly from /proc/{pid}/status for accuracy
|
||||
#[cfg(target_os = "linux")]
|
||||
let (user_id, group_id) =
|
||||
if let Ok(status_content) = std::fs::read_to_string(format!("/proc/{pid}/status")) {
|
||||
let mut uid = 0u32;
|
||||
let mut gid = 0u32;
|
||||
|
||||
for line in status_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("Uid:") {
|
||||
// Uid line format: "Uid: 1000 1000 1000 1000" (real, effective, saved, filesystem)
|
||||
// We want the real UID (first value)
|
||||
if let Some(uid_str) = value.split_whitespace().next() {
|
||||
uid = uid_str.parse().unwrap_or(0);
|
||||
}
|
||||
} else if let Some(value) = line.strip_prefix("Gid:") {
|
||||
// Gid line format: "Gid: 1000 1000 1000 1000" (real, effective, saved, filesystem)
|
||||
// We want the real GID (first value)
|
||||
if let Some(gid_str) = value.split_whitespace().next() {
|
||||
gid = gid_str.parse().unwrap_or(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(uid, gid)
|
||||
} else {
|
||||
// Fallback if /proc read fails (permission issue)
|
||||
(0, 0)
|
||||
};
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
let (user_id, group_id) = (0, 0);
|
||||
|
||||
// Read I/O stats directly from /proc/{pid}/io
|
||||
// Use rchar/wchar to capture ALL I/O including cached reads (like htop/btop do)
|
||||
// sysinfo's total_read_bytes/total_written_bytes only count actual disk I/O
|
||||
#[cfg(target_os = "linux")]
|
||||
let (read_bytes, write_bytes) =
|
||||
if let Ok(io_content) = std::fs::read_to_string(format!("/proc/{pid}/io")) {
|
||||
let mut rchar = 0u64;
|
||||
let mut wchar = 0u64;
|
||||
|
||||
for line in io_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("rchar: ") {
|
||||
rchar = value.trim().parse().unwrap_or(0);
|
||||
} else if let Some(value) = line.strip_prefix("wchar: ") {
|
||||
wchar = value.trim().parse().unwrap_or(0);
|
||||
}
|
||||
}
|
||||
|
||||
(Some(rchar), Some(wchar))
|
||||
} else {
|
||||
// Fallback to sysinfo if we can't read /proc (permissions)
|
||||
let disk_usage = process.disk_usage();
|
||||
(
|
||||
Some(disk_usage.total_read_bytes),
|
||||
Some(disk_usage.total_written_bytes),
|
||||
)
|
||||
};
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
let (read_bytes, write_bytes) = {
|
||||
let disk_usage = process.disk_usage();
|
||||
(
|
||||
Some(disk_usage.total_read_bytes),
|
||||
Some(disk_usage.total_written_bytes),
|
||||
)
|
||||
};
|
||||
|
||||
let working_directory = process.cwd().map(|p| p.to_string_lossy().to_string());
|
||||
let executable_path = process.exe().map(|p| p.to_string_lossy().to_string());
|
||||
|
||||
// Collect child processes using lightweight /proc access
|
||||
// This avoids the expensive system.refresh_processes_specifics(All) call
|
||||
let child_processes = enumerate_child_processes_lightweight(pid, &system);
|
||||
|
||||
// Release the system lock early (automatic when system goes out of scope)
|
||||
drop(system);
|
||||
|
||||
// Collect thread information (Linux only)
|
||||
let threads = collect_thread_info(pid);
|
||||
|
||||
// Now construct the detailed info without holding the lock
|
||||
let detailed_info = DetailedProcessInfo {
|
||||
pid,
|
||||
name,
|
||||
command,
|
||||
cpu_usage,
|
||||
mem_bytes,
|
||||
virtual_mem_bytes,
|
||||
shared_mem_bytes: None, // Not available from sysinfo
|
||||
thread_count,
|
||||
fd_count: None, // Not available from sysinfo on all platforms
|
||||
status,
|
||||
parent_pid,
|
||||
user_id,
|
||||
group_id,
|
||||
start_time,
|
||||
cpu_time_user: get_cpu_time_user(pid),
|
||||
cpu_time_system: get_cpu_time_system(pid),
|
||||
read_bytes,
|
||||
write_bytes,
|
||||
working_directory,
|
||||
executable_path,
|
||||
child_processes,
|
||||
threads,
|
||||
};
|
||||
|
||||
Ok(ProcessMetricsResponse {
|
||||
process: detailed_info,
|
||||
cached_at,
|
||||
})
|
||||
}
|
||||
|
||||
/// Collect journal entries for a specific process
|
||||
pub fn collect_journal_entries(pid: u32) -> Result<JournalResponse, String> {
|
||||
let output = Command::new("journalctl")
|
||||
.args([
|
||||
&format!("_PID={pid}"),
|
||||
"--output=json",
|
||||
"--lines=100",
|
||||
"--no-pager",
|
||||
])
|
||||
.output()
|
||||
.map_err(|e| format!("Failed to execute journalctl: {e}"))?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(format!(
|
||||
"journalctl failed: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
));
|
||||
}
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let mut entries = Vec::new();
|
||||
|
||||
// Parse each line as JSON (journalctl outputs one JSON object per line)
|
||||
for line in stdout.lines() {
|
||||
if line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let json: serde_json::Value =
|
||||
serde_json::from_str(line).map_err(|e| format!("Failed to parse journal JSON: {e}"))?;
|
||||
|
||||
// Extract relevant fields
|
||||
let timestamp_str = json
|
||||
.get("__REALTIME_TIMESTAMP")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("0");
|
||||
|
||||
// Convert timestamp to ISO 8601 format
|
||||
let timestamp = if let Ok(ts_micros) = timestamp_str.parse::<u64>() {
|
||||
let ts_secs = ts_micros / 1_000_000;
|
||||
let ts_nanos = (ts_micros % 1_000_000) * 1000;
|
||||
let time = SystemTime::UNIX_EPOCH
|
||||
+ Duration::from_secs(ts_secs)
|
||||
+ Duration::from_nanos(ts_nanos);
|
||||
// Simple ISO 8601 format - we can improve this if needed
|
||||
format!("{time:?}")
|
||||
.replace("SystemTime { tv_sec: ", "")
|
||||
.replace(", tv_nsec: ", ".")
|
||||
.replace(" }", "")
|
||||
} else {
|
||||
timestamp_str.to_string()
|
||||
};
|
||||
|
||||
let priority = match json.get("PRIORITY").and_then(|v| v.as_str()) {
|
||||
Some("0") => LogLevel::Emergency,
|
||||
Some("1") => LogLevel::Alert,
|
||||
Some("2") => LogLevel::Critical,
|
||||
Some("3") => LogLevel::Error,
|
||||
Some("4") => LogLevel::Warning,
|
||||
Some("5") => LogLevel::Notice,
|
||||
Some("6") => LogLevel::Info,
|
||||
Some("7") => LogLevel::Debug,
|
||||
_ => LogLevel::Info,
|
||||
};
|
||||
|
||||
let message = json
|
||||
.get("MESSAGE")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
let unit = json
|
||||
.get("_SYSTEMD_UNIT")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let entry_pid = json
|
||||
.get("_PID")
|
||||
.and_then(|v| v.as_str())
|
||||
.and_then(|s| s.parse::<u32>().ok());
|
||||
|
||||
let comm = json
|
||||
.get("_COMM")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let uid = json
|
||||
.get("_UID")
|
||||
.and_then(|v| v.as_str())
|
||||
.and_then(|s| s.parse::<u32>().ok());
|
||||
|
||||
let gid = json
|
||||
.get("_GID")
|
||||
.and_then(|v| v.as_str())
|
||||
.and_then(|s| s.parse::<u32>().ok());
|
||||
|
||||
entries.push(JournalEntry {
|
||||
timestamp,
|
||||
priority,
|
||||
message,
|
||||
unit,
|
||||
pid: entry_pid,
|
||||
comm,
|
||||
uid,
|
||||
gid,
|
||||
});
|
||||
}
|
||||
|
||||
// Sort by timestamp (newest first)
|
||||
entries.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));
|
||||
|
||||
let response_timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map_err(|e| format!("Time error: {e}"))?
|
||||
.as_secs();
|
||||
|
||||
let total_count = entries.len() as u32;
|
||||
let truncated = entries.len() >= 100; // We requested 100 lines, so if we got 100, there might be more
|
||||
|
||||
Ok(JournalResponse {
|
||||
entries,
|
||||
total_count,
|
||||
truncated,
|
||||
cached_at: response_timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
@ -63,6 +63,11 @@ pub struct AppState {
|
||||
pub cache_metrics: Arc<Mutex<CacheEntry<crate::types::Metrics>>>,
|
||||
pub cache_disks: Arc<Mutex<CacheEntry<Vec<crate::types::DiskInfo>>>>,
|
||||
pub cache_processes: Arc<Mutex<CacheEntry<crate::types::ProcessesPayload>>>,
|
||||
|
||||
// Process detail caches (per-PID)
|
||||
pub cache_process_metrics:
|
||||
Arc<Mutex<HashMap<u32, CacheEntry<crate::types::ProcessMetricsResponse>>>>,
|
||||
pub cache_journal_entries: Arc<Mutex<HashMap<u32, CacheEntry<crate::types::JournalResponse>>>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -71,6 +76,12 @@ pub struct CacheEntry<T> {
|
||||
pub value: Option<T>,
|
||||
}
|
||||
|
||||
impl<T> Default for CacheEntry<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> CacheEntry<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
@ -90,6 +101,12 @@ impl<T> CacheEntry<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AppState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub fn new() -> Self {
|
||||
let sys = System::new();
|
||||
@ -116,6 +133,8 @@ impl AppState {
|
||||
cache_metrics: Arc::new(Mutex::new(CacheEntry::new())),
|
||||
cache_disks: Arc::new(Mutex::new(CacheEntry::new())),
|
||||
cache_processes: Arc::new(Mutex::new(CacheEntry::new())),
|
||||
cache_process_metrics: Arc::new(Mutex::new(HashMap::new())),
|
||||
cache_journal_entries: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,6 +9,8 @@ pub struct DiskInfo {
|
||||
pub name: String,
|
||||
pub total: u64,
|
||||
pub available: u64,
|
||||
pub temperature: Option<f32>,
|
||||
pub is_partition: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
@ -47,3 +49,76 @@ pub struct ProcessesPayload {
|
||||
pub process_count: usize,
|
||||
pub top_processes: Vec<ProcessInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ThreadInfo {
|
||||
pub tid: u32, // Thread ID
|
||||
pub name: String, // Thread name (from /proc/{pid}/task/{tid}/comm)
|
||||
pub cpu_time_user: u64, // User CPU time in microseconds
|
||||
pub cpu_time_system: u64, // System CPU time in microseconds
|
||||
pub status: String, // Thread status (Running, Sleeping, etc.)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct DetailedProcessInfo {
|
||||
pub pid: u32,
|
||||
pub name: String,
|
||||
pub command: String,
|
||||
pub cpu_usage: f32,
|
||||
pub mem_bytes: u64,
|
||||
pub virtual_mem_bytes: u64,
|
||||
pub shared_mem_bytes: Option<u64>,
|
||||
pub thread_count: u32,
|
||||
pub fd_count: Option<u32>,
|
||||
pub status: String,
|
||||
pub parent_pid: Option<u32>,
|
||||
pub user_id: u32,
|
||||
pub group_id: u32,
|
||||
pub start_time: u64, // Unix timestamp
|
||||
pub cpu_time_user: u64, // Microseconds
|
||||
pub cpu_time_system: u64, // Microseconds
|
||||
pub read_bytes: Option<u64>,
|
||||
pub write_bytes: Option<u64>,
|
||||
pub working_directory: Option<String>,
|
||||
pub executable_path: Option<String>,
|
||||
pub child_processes: Vec<DetailedProcessInfo>,
|
||||
pub threads: Vec<ThreadInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ProcessMetricsResponse {
|
||||
pub process: DetailedProcessInfo,
|
||||
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct JournalEntry {
|
||||
pub timestamp: String, // ISO 8601 formatted timestamp
|
||||
pub priority: LogLevel,
|
||||
pub message: String,
|
||||
pub unit: Option<String>, // systemd unit name
|
||||
pub pid: Option<u32>,
|
||||
pub comm: Option<String>, // process command name
|
||||
pub uid: Option<u32>,
|
||||
pub gid: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub enum LogLevel {
|
||||
Emergency = 0,
|
||||
Alert = 1,
|
||||
Critical = 2,
|
||||
Error = 3,
|
||||
Warning = 4,
|
||||
Notice = 5,
|
||||
Info = 6,
|
||||
Debug = 7,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct JournalResponse {
|
||||
pub entries: Vec<JournalEntry>,
|
||||
pub total_count: u32,
|
||||
pub truncated: bool,
|
||||
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||
}
|
||||
|
||||
@ -17,6 +17,8 @@ use crate::proto::pb;
|
||||
use crate::state::AppState;
|
||||
|
||||
// Compression threshold based on typical payload size
|
||||
// Temporarily increased for testing - revert to 768 for production
|
||||
//const COMPRESSION_THRESHOLD: usize = 50_000;
|
||||
const COMPRESSION_THRESHOLD: usize = 768;
|
||||
|
||||
// Reusable buffer for compression to avoid allocations
|
||||
@ -111,6 +113,90 @@ async fn handle_socket(mut socket: WebSocket, state: AppState) {
|
||||
}
|
||||
drop(cache); // Explicit drop to release mutex early
|
||||
}
|
||||
Message::Text(ref text) if text.starts_with("get_process_metrics:") => {
|
||||
if let Some(pid_str) = text.strip_prefix("get_process_metrics:")
|
||||
&& let Ok(pid) = pid_str.parse::<u32>()
|
||||
{
|
||||
let ttl = std::time::Duration::from_millis(250); // 250ms TTL
|
||||
|
||||
// Check cache first
|
||||
{
|
||||
let cache = state.cache_process_metrics.lock().await;
|
||||
if let Some(entry) = cache.get(&pid)
|
||||
&& entry.is_fresh(ttl)
|
||||
&& let Some(cached_response) = entry.get()
|
||||
{
|
||||
let _ = send_json(&mut socket, cached_response).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Collect fresh data
|
||||
match crate::metrics::collect_process_metrics(pid, &state).await {
|
||||
Ok(response) => {
|
||||
// Cache the response
|
||||
{
|
||||
let mut cache = state.cache_process_metrics.lock().await;
|
||||
cache
|
||||
.entry(pid)
|
||||
.or_insert_with(crate::state::CacheEntry::new)
|
||||
.set(response.clone());
|
||||
}
|
||||
let _ = send_json(&mut socket, &response).await;
|
||||
}
|
||||
Err(err) => {
|
||||
let error_response = serde_json::json!({
|
||||
"error": err,
|
||||
"request": "get_process_metrics",
|
||||
"pid": pid
|
||||
});
|
||||
let _ = send_json(&mut socket, &error_response).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Message::Text(ref text) if text.starts_with("get_journal_entries:") => {
|
||||
if let Some(pid_str) = text.strip_prefix("get_journal_entries:")
|
||||
&& let Ok(pid) = pid_str.parse::<u32>()
|
||||
{
|
||||
let ttl = std::time::Duration::from_secs(1); // 1s TTL
|
||||
|
||||
// Check cache first
|
||||
{
|
||||
let cache = state.cache_journal_entries.lock().await;
|
||||
if let Some(entry) = cache.get(&pid)
|
||||
&& entry.is_fresh(ttl)
|
||||
&& let Some(cached_response) = entry.get()
|
||||
{
|
||||
let _ = send_json(&mut socket, cached_response).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Collect fresh data
|
||||
match crate::metrics::collect_journal_entries(pid) {
|
||||
Ok(response) => {
|
||||
// Cache the response
|
||||
{
|
||||
let mut cache = state.cache_journal_entries.lock().await;
|
||||
cache
|
||||
.entry(pid)
|
||||
.or_insert_with(crate::state::CacheEntry::new)
|
||||
.set(response.clone());
|
||||
}
|
||||
let _ = send_json(&mut socket, &response).await;
|
||||
}
|
||||
Err(err) => {
|
||||
let error_response = serde_json::json!({
|
||||
"error": err,
|
||||
"request": "get_journal_entries",
|
||||
"pid": pid
|
||||
});
|
||||
let _ = send_json(&mut socket, &error_response).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Message::Close(_) => break,
|
||||
_ => {}
|
||||
}
|
||||
|
||||
132
socktop_agent/tests/cache_tests.rs
Normal file
132
socktop_agent/tests/cache_tests.rs
Normal file
@ -0,0 +1,132 @@
|
||||
//! Tests for the process cache functionality
|
||||
|
||||
use socktop_agent::state::{AppState, CacheEntry};
|
||||
use socktop_agent::types::{DetailedProcessInfo, JournalResponse, ProcessMetricsResponse};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_process_cache_ttl() {
|
||||
let state = AppState::new();
|
||||
let pid = 12345;
|
||||
|
||||
// Create mock data
|
||||
let process_info = DetailedProcessInfo {
|
||||
pid,
|
||||
name: "test_process".to_string(),
|
||||
command: "test command".to_string(),
|
||||
cpu_usage: 50.0,
|
||||
mem_bytes: 1024 * 1024,
|
||||
virtual_mem_bytes: 2048 * 1024,
|
||||
shared_mem_bytes: Some(512 * 1024),
|
||||
thread_count: 4,
|
||||
fd_count: Some(10),
|
||||
status: "Running".to_string(),
|
||||
parent_pid: Some(1),
|
||||
user_id: 1000,
|
||||
group_id: 1000,
|
||||
start_time: 1234567890,
|
||||
cpu_time_user: 100000,
|
||||
cpu_time_system: 50000,
|
||||
read_bytes: Some(1024),
|
||||
write_bytes: Some(2048),
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
executable_path: Some("/usr/bin/test".to_string()),
|
||||
child_processes: vec![],
|
||||
threads: vec![],
|
||||
};
|
||||
|
||||
let metrics_response = ProcessMetricsResponse {
|
||||
process: process_info,
|
||||
cached_at: 1234567890,
|
||||
};
|
||||
|
||||
let journal_response = JournalResponse {
|
||||
entries: vec![],
|
||||
total_count: 0,
|
||||
truncated: false,
|
||||
cached_at: 1234567890,
|
||||
};
|
||||
|
||||
// Test process metrics caching
|
||||
{
|
||||
let mut cache = state.cache_process_metrics.lock().await;
|
||||
cache
|
||||
.entry(pid)
|
||||
.or_insert_with(CacheEntry::new)
|
||||
.set(metrics_response.clone());
|
||||
}
|
||||
|
||||
// Should get cached value immediately
|
||||
{
|
||||
let cache = state.cache_process_metrics.lock().await;
|
||||
let ttl = Duration::from_millis(250);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(entry.is_fresh(ttl));
|
||||
assert!(entry.get().is_some());
|
||||
assert_eq!(entry.get().unwrap().process.pid, pid);
|
||||
} else {
|
||||
panic!("Expected cached entry");
|
||||
}
|
||||
}
|
||||
println!("✓ Process metrics cached and retrieved successfully");
|
||||
|
||||
// Test journal entries caching
|
||||
{
|
||||
let mut cache = state.cache_journal_entries.lock().await;
|
||||
cache
|
||||
.entry(pid)
|
||||
.or_insert_with(CacheEntry::new)
|
||||
.set(journal_response.clone());
|
||||
}
|
||||
|
||||
// Should get cached value immediately
|
||||
{
|
||||
let cache = state.cache_journal_entries.lock().await;
|
||||
let ttl = Duration::from_secs(1);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(entry.is_fresh(ttl));
|
||||
assert!(entry.get().is_some());
|
||||
assert_eq!(entry.get().unwrap().total_count, 0);
|
||||
} else {
|
||||
panic!("Expected cached entry");
|
||||
}
|
||||
}
|
||||
println!("✓ Journal entries cached and retrieved successfully");
|
||||
|
||||
// Wait for process metrics to expire (250ms + buffer)
|
||||
sleep(Duration::from_millis(300)).await;
|
||||
|
||||
// Process metrics should be expired now
|
||||
{
|
||||
let cache = state.cache_process_metrics.lock().await;
|
||||
let ttl = Duration::from_millis(250);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(!entry.is_fresh(ttl));
|
||||
}
|
||||
}
|
||||
println!("✓ Process metrics correctly expired after TTL");
|
||||
|
||||
// Journal entries should still be valid (1s TTL)
|
||||
{
|
||||
let cache = state.cache_journal_entries.lock().await;
|
||||
let ttl = Duration::from_secs(1);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(entry.is_fresh(ttl));
|
||||
}
|
||||
}
|
||||
println!("✓ Journal entries still valid within TTL");
|
||||
|
||||
// Wait for journal entries to expire (additional 800ms to reach 1s total)
|
||||
sleep(Duration::from_millis(800)).await;
|
||||
|
||||
// Journal entries should be expired now
|
||||
{
|
||||
let cache = state.cache_journal_entries.lock().await;
|
||||
let ttl = Duration::from_secs(1);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(!entry.is_fresh(ttl));
|
||||
}
|
||||
}
|
||||
println!("✓ Journal entries correctly expired after TTL");
|
||||
}
|
||||
89
socktop_agent/tests/process_details.rs
Normal file
89
socktop_agent/tests/process_details.rs
Normal file
@ -0,0 +1,89 @@
|
||||
//! Tests for process detail collection functionality
|
||||
|
||||
use socktop_agent::metrics::{collect_journal_entries, collect_process_metrics};
|
||||
use socktop_agent::state::AppState;
|
||||
use std::process;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_collect_process_metrics_self() {
|
||||
// Test collecting metrics for our own process
|
||||
let pid = process::id();
|
||||
let state = AppState::new();
|
||||
|
||||
match collect_process_metrics(pid, &state).await {
|
||||
Ok(response) => {
|
||||
assert_eq!(response.process.pid, pid);
|
||||
assert!(!response.process.name.is_empty());
|
||||
// Command might be empty on some systems, so don't assert on it
|
||||
assert!(response.cached_at > 0);
|
||||
println!(
|
||||
"✓ Process metrics collected for PID {}: {} ({})",
|
||||
pid, response.process.name, response.process.command
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
// This might fail if sysinfo can't find the process, which is possible
|
||||
println!("⚠ Warning: Failed to collect process metrics for self: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_collect_journal_entries_self() {
|
||||
// Test collecting journal entries for our own process
|
||||
let pid = process::id();
|
||||
|
||||
match collect_journal_entries(pid) {
|
||||
Ok(response) => {
|
||||
assert!(response.cached_at > 0);
|
||||
println!(
|
||||
"✓ Journal entries collected for PID {}: {} entries",
|
||||
pid, response.total_count
|
||||
);
|
||||
if !response.entries.is_empty() {
|
||||
let entry = &response.entries[0];
|
||||
println!(" Latest entry: {}", entry.message);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// This might fail if journalctl is not available or restricted
|
||||
println!("⚠ Warning: Failed to collect journal entries for self: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_collect_process_metrics_invalid_pid() {
|
||||
// Test with an invalid PID
|
||||
let invalid_pid = 999999;
|
||||
let state = AppState::new();
|
||||
|
||||
match collect_process_metrics(invalid_pid, &state).await {
|
||||
Ok(_) => {
|
||||
println!("⚠ Warning: Unexpectedly found process for invalid PID {invalid_pid}");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Correctly failed for invalid PID {invalid_pid}: {e}");
|
||||
assert!(e.contains("not found"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_collect_journal_entries_invalid_pid() {
|
||||
// Test with an invalid PID - journalctl might still return empty results
|
||||
let invalid_pid = 999999;
|
||||
|
||||
match collect_journal_entries(invalid_pid) {
|
||||
Ok(response) => {
|
||||
println!(
|
||||
"✓ Journal query completed for invalid PID {} (empty result expected): {} entries",
|
||||
invalid_pid, response.total_count
|
||||
);
|
||||
// Should be empty or very few entries
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Journal query failed for invalid PID {invalid_pid}: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,4 +1,3 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
@ -17,7 +16,7 @@ fn generates_self_signed_cert_and_key_in_xdg_path() {
|
||||
let xdg = tmpdir.path().to_path_buf();
|
||||
|
||||
// Run the agent once with --enableSSL, short timeout so it exits quickly when killed
|
||||
let mut cmd = Command::cargo_bin("socktop_agent").expect("binary exists");
|
||||
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("socktop_agent"));
|
||||
// Bind to an ephemeral port (-p 0) to avoid conflicts/flakes
|
||||
cmd.env("XDG_CONFIG_HOME", &xdg)
|
||||
.arg("--enableSSL")
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "socktop_connector"
|
||||
version = "0.1.6"
|
||||
version = "1.50.0"
|
||||
edition = "2024"
|
||||
license = "MIT"
|
||||
description = "WebSocket connector library for socktop agent communication"
|
||||
|
||||
@ -66,7 +66,7 @@ use tokio_tungstenite::{Connector, connect_async_tls_with_config};
|
||||
use crate::error::{ConnectorError, Result};
|
||||
use crate::types::{AgentRequest, AgentResponse};
|
||||
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||
use crate::types::{DiskInfo, Metrics, ProcessInfo, ProcessesPayload};
|
||||
use crate::types::{DiskInfo, Metrics, ProcessInfo, ProcessesPayload, ProcessMetricsResponse, JournalResponse};
|
||||
#[cfg(feature = "tls")]
|
||||
fn ensure_crypto_provider() {
|
||||
use std::sync::Once;
|
||||
@ -186,6 +186,18 @@ impl SocktopConnector {
|
||||
.ok_or_else(|| ConnectorError::invalid_response("Failed to get processes"))?;
|
||||
Ok(AgentResponse::Processes(processes))
|
||||
}
|
||||
AgentRequest::ProcessMetrics { pid } => {
|
||||
let process_metrics = request_process_metrics(stream, pid)
|
||||
.await
|
||||
.ok_or_else(|| ConnectorError::invalid_response("Failed to get process metrics"))?;
|
||||
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||
}
|
||||
AgentRequest::JournalEntries { pid } => {
|
||||
let journal_entries = request_journal_entries(stream, pid)
|
||||
.await
|
||||
.ok_or_else(|| ConnectorError::invalid_response("Failed to get journal entries"))?;
|
||||
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -333,9 +345,7 @@ async fn connect_with_ca_and_config(
|
||||
}
|
||||
}
|
||||
cfg.dangerous().set_certificate_verifier(Arc::new(NoVerify));
|
||||
eprintln!(
|
||||
"socktop_connector: hostname verification disabled (default). Set SOCKTOP_VERIFY_NAME=1 to enable strict SAN checking."
|
||||
);
|
||||
// Note: hostname verification disabled (default). Set SOCKTOP_VERIFY_NAME=1 to enable strict SAN checking.
|
||||
}
|
||||
let cfg = Arc::new(cfg);
|
||||
let (ws, _) = connect_async_tls_with_config(
|
||||
@ -439,6 +449,38 @@ async fn request_processes(ws: &mut WsStream) -> Option<ProcessesPayload> {
|
||||
}
|
||||
}
|
||||
|
||||
// Send a "get_process_metrics:{pid}" request and await a JSON ProcessMetricsResponse
|
||||
#[cfg(feature = "networking")]
|
||||
async fn request_process_metrics(ws: &mut WsStream, pid: u32) -> Option<ProcessMetricsResponse> {
|
||||
let request = format!("get_process_metrics:{}", pid);
|
||||
if ws.send(Message::Text(request)).await.is_err() {
|
||||
return None;
|
||||
}
|
||||
match ws.next().await {
|
||||
Some(Ok(Message::Binary(b))) => {
|
||||
gunzip_to_string(&b).ok().and_then(|s| serde_json::from_str::<ProcessMetricsResponse>(&s).ok())
|
||||
}
|
||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<ProcessMetricsResponse>(&json).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// Send a "get_journal_entries:{pid}" request and await a JSON JournalResponse
|
||||
#[cfg(feature = "networking")]
|
||||
async fn request_journal_entries(ws: &mut WsStream, pid: u32) -> Option<JournalResponse> {
|
||||
let request = format!("get_journal_entries:{}", pid);
|
||||
if ws.send(Message::Text(request)).await.is_err() {
|
||||
return None;
|
||||
}
|
||||
match ws.next().await {
|
||||
Some(Ok(Message::Binary(b))) => {
|
||||
gunzip_to_string(&b).ok().and_then(|s| serde_json::from_str::<JournalResponse>(&s).ok())
|
||||
}
|
||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<JournalResponse>(&json).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// Decompress a gzip-compressed binary frame into a String.
|
||||
/// Unified gzip decompression to string for both networking and WASM
|
||||
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||
@ -807,6 +849,20 @@ impl SocktopConnector {
|
||||
Ok(AgentResponse::Processes(processes))
|
||||
}
|
||||
}
|
||||
AgentRequest::ProcessMetrics { pid: _ } => {
|
||||
// Parse JSON response for process metrics
|
||||
let process_metrics: ProcessMetricsResponse = serde_json::from_str(&response).map_err(|e| {
|
||||
ConnectorError::serialization_error(format!("Failed to parse process metrics: {e}"))
|
||||
})?;
|
||||
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||
}
|
||||
AgentRequest::JournalEntries { pid: _ } => {
|
||||
// Parse JSON response for journal entries
|
||||
let journal_entries: JournalResponse = serde_json::from_str(&response).map_err(|e| {
|
||||
ConnectorError::serialization_error(format!("Failed to parse journal entries: {e}"))
|
||||
})?;
|
||||
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -6,7 +6,8 @@ use crate::{AgentRequest, AgentResponse};
|
||||
|
||||
#[cfg(feature = "networking")]
|
||||
use crate::networking::{
|
||||
WsStream, connect_to_agent, request_disks, request_metrics, request_processes,
|
||||
WsStream, connect_to_agent, request_disks, request_journal_entries, request_metrics,
|
||||
request_process_metrics, request_processes,
|
||||
};
|
||||
|
||||
#[cfg(all(feature = "wasm", not(feature = "networking")))]
|
||||
@ -72,6 +73,20 @@ impl SocktopConnector {
|
||||
.ok_or_else(|| ConnectorError::invalid_response("Failed to get processes"))?;
|
||||
Ok(AgentResponse::Processes(processes))
|
||||
}
|
||||
AgentRequest::ProcessMetrics { pid } => {
|
||||
let process_metrics =
|
||||
request_process_metrics(stream, pid).await.ok_or_else(|| {
|
||||
ConnectorError::invalid_response("Failed to get process metrics")
|
||||
})?;
|
||||
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||
}
|
||||
AgentRequest::JournalEntries { pid } => {
|
||||
let journal_entries =
|
||||
request_journal_entries(stream, pid).await.ok_or_else(|| {
|
||||
ConnectorError::invalid_response("Failed to get journal entries")
|
||||
})?;
|
||||
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -161,7 +161,8 @@ pub use config::ConnectorConfig;
|
||||
pub use connector_impl::SocktopConnector;
|
||||
pub use error::{ConnectorError, Result};
|
||||
pub use types::{
|
||||
AgentRequest, AgentResponse, DiskInfo, GpuInfo, Metrics, NetworkInfo, ProcessInfo,
|
||||
AgentRequest, AgentResponse, DetailedProcessInfo, DiskInfo, GpuInfo, JournalEntry,
|
||||
JournalResponse, LogLevel, Metrics, NetworkInfo, ProcessInfo, ProcessMetricsResponse,
|
||||
ProcessesPayload,
|
||||
};
|
||||
|
||||
|
||||
@ -152,9 +152,7 @@ async fn connect_with_ca_and_config(
|
||||
}
|
||||
}
|
||||
cfg.dangerous().set_certificate_verifier(Arc::new(NoVerify));
|
||||
eprintln!(
|
||||
"socktop_connector: hostname verification disabled (default). Set SOCKTOP_VERIFY_NAME=1 to enable strict SAN checking."
|
||||
);
|
||||
// Note: hostname verification disabled (default). Set SOCKTOP_VERIFY_NAME=1 to enable strict SAN checking.
|
||||
}
|
||||
let cfg = Arc::new(cfg);
|
||||
let (ws, _) = tokio_tungstenite::connect_async_tls_with_config(
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
//! WebSocket request handlers for native (non-WASM) environments.
|
||||
|
||||
use crate::networking::WsStream;
|
||||
use crate::types::{JournalResponse, ProcessMetricsResponse};
|
||||
use crate::utils::{gunzip_to_string, gunzip_to_vec, is_gzip};
|
||||
use crate::{DiskInfo, Metrics, ProcessInfo, ProcessesPayload, pb};
|
||||
|
||||
@ -82,3 +83,36 @@ pub async fn request_processes(ws: &mut WsStream) -> Option<ProcessesPayload> {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a "get_process_metrics:{pid}" request and await a JSON ProcessMetricsResponse
|
||||
pub async fn request_process_metrics(
|
||||
ws: &mut WsStream,
|
||||
pid: u32,
|
||||
) -> Option<ProcessMetricsResponse> {
|
||||
let request = format!("get_process_metrics:{pid}");
|
||||
if ws.send(Message::Text(request)).await.is_err() {
|
||||
return None;
|
||||
}
|
||||
match ws.next().await {
|
||||
Some(Ok(Message::Binary(b))) => gunzip_to_string(&b)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<ProcessMetricsResponse>(&s).ok()),
|
||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<ProcessMetricsResponse>(&json).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a "get_journal_entries:{pid}" request and await a JSON JournalResponse
|
||||
pub async fn request_journal_entries(ws: &mut WsStream, pid: u32) -> Option<JournalResponse> {
|
||||
let request = format!("get_journal_entries:{pid}");
|
||||
if ws.send(Message::Text(request)).await.is_err() {
|
||||
return None;
|
||||
}
|
||||
match ws.next().await {
|
||||
Some(Ok(Message::Binary(b))) => gunzip_to_string(&b)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<JournalResponse>(&s).ok()),
|
||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<JournalResponse>(&json).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -15,6 +15,10 @@ pub struct DiskInfo {
|
||||
pub name: String,
|
||||
pub total: u64,
|
||||
pub available: u64,
|
||||
#[serde(default)]
|
||||
pub temperature: Option<f32>,
|
||||
#[serde(default)]
|
||||
pub is_partition: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
@ -73,6 +77,79 @@ pub struct ProcessesPayload {
|
||||
pub top_processes: Vec<ProcessInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ThreadInfo {
|
||||
pub tid: u32, // Thread ID
|
||||
pub name: String, // Thread name (from /proc/{pid}/task/{tid}/comm)
|
||||
pub cpu_time_user: u64, // User CPU time in microseconds
|
||||
pub cpu_time_system: u64, // System CPU time in microseconds
|
||||
pub status: String, // Thread status (Running, Sleeping, etc.)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct DetailedProcessInfo {
|
||||
pub pid: u32,
|
||||
pub name: String,
|
||||
pub command: String,
|
||||
pub cpu_usage: f32,
|
||||
pub mem_bytes: u64,
|
||||
pub virtual_mem_bytes: u64,
|
||||
pub shared_mem_bytes: Option<u64>,
|
||||
pub thread_count: u32,
|
||||
pub fd_count: Option<u32>,
|
||||
pub status: String,
|
||||
pub parent_pid: Option<u32>,
|
||||
pub user_id: u32,
|
||||
pub group_id: u32,
|
||||
pub start_time: u64, // Unix timestamp
|
||||
pub cpu_time_user: u64, // Microseconds
|
||||
pub cpu_time_system: u64, // Microseconds
|
||||
pub read_bytes: Option<u64>,
|
||||
pub write_bytes: Option<u64>,
|
||||
pub working_directory: Option<String>,
|
||||
pub executable_path: Option<String>,
|
||||
pub child_processes: Vec<DetailedProcessInfo>,
|
||||
pub threads: Vec<ThreadInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ProcessMetricsResponse {
|
||||
pub process: DetailedProcessInfo,
|
||||
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct JournalEntry {
|
||||
pub timestamp: String, // ISO 8601 formatted timestamp
|
||||
pub priority: LogLevel,
|
||||
pub message: String,
|
||||
pub unit: Option<String>, // systemd unit name
|
||||
pub pid: Option<u32>,
|
||||
pub comm: Option<String>, // process command name
|
||||
pub uid: Option<u32>,
|
||||
pub gid: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub enum LogLevel {
|
||||
Emergency = 0,
|
||||
Alert = 1,
|
||||
Critical = 2,
|
||||
Error = 3,
|
||||
Warning = 4,
|
||||
Notice = 5,
|
||||
Info = 6,
|
||||
Debug = 7,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct JournalResponse {
|
||||
pub entries: Vec<JournalEntry>,
|
||||
pub total_count: u32,
|
||||
pub truncated: bool,
|
||||
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||
}
|
||||
|
||||
/// Request types that can be sent to the agent
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(tag = "type")]
|
||||
@ -83,6 +160,10 @@ pub enum AgentRequest {
|
||||
Disks,
|
||||
#[serde(rename = "processes")]
|
||||
Processes,
|
||||
#[serde(rename = "process_metrics")]
|
||||
ProcessMetrics { pid: u32 },
|
||||
#[serde(rename = "journal_entries")]
|
||||
JournalEntries { pid: u32 },
|
||||
}
|
||||
|
||||
impl AgentRequest {
|
||||
@ -92,6 +173,8 @@ impl AgentRequest {
|
||||
AgentRequest::Metrics => "get_metrics".to_string(),
|
||||
AgentRequest::Disks => "get_disks".to_string(),
|
||||
AgentRequest::Processes => "get_processes".to_string(),
|
||||
AgentRequest::ProcessMetrics { pid } => format!("get_process_metrics:{pid}"),
|
||||
AgentRequest::JournalEntries { pid } => format!("get_journal_entries:{pid}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -106,4 +189,8 @@ pub enum AgentResponse {
|
||||
Disks(Vec<DiskInfo>),
|
||||
#[serde(rename = "processes")]
|
||||
Processes(ProcessesPayload),
|
||||
#[serde(rename = "process_metrics")]
|
||||
ProcessMetrics(ProcessMetricsResponse),
|
||||
#[serde(rename = "journal_entries")]
|
||||
JournalEntries(JournalResponse),
|
||||
}
|
||||
|
||||
@ -3,7 +3,10 @@
|
||||
use crate::error::{ConnectorError, Result};
|
||||
use crate::pb::Processes;
|
||||
use crate::utils::{gunzip_to_string, gunzip_to_vec, is_gzip, log_debug};
|
||||
use crate::{AgentRequest, AgentResponse, DiskInfo, Metrics, ProcessInfo, ProcessesPayload};
|
||||
use crate::{
|
||||
AgentRequest, AgentResponse, DiskInfo, JournalResponse, Metrics, ProcessInfo,
|
||||
ProcessMetricsResponse, ProcessesPayload,
|
||||
};
|
||||
|
||||
use prost::Message as ProstMessage;
|
||||
use std::cell::RefCell;
|
||||
@ -206,6 +209,26 @@ pub async fn send_request_and_wait(
|
||||
Ok(AgentResponse::Processes(processes))
|
||||
}
|
||||
}
|
||||
AgentRequest::ProcessMetrics { pid: _ } => {
|
||||
// Parse JSON response for process metrics
|
||||
let process_metrics: ProcessMetricsResponse =
|
||||
serde_json::from_str(&response).map_err(|e| {
|
||||
ConnectorError::serialization_error(format!(
|
||||
"Failed to parse process metrics: {e}"
|
||||
))
|
||||
})?;
|
||||
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||
}
|
||||
AgentRequest::JournalEntries { pid: _ } => {
|
||||
// Parse JSON response for journal entries
|
||||
let journal_entries: JournalResponse =
|
||||
serde_json::from_str(&response).map_err(|e| {
|
||||
ConnectorError::serialization_error(format!(
|
||||
"Failed to parse journal entries: {e}"
|
||||
))
|
||||
})?;
|
||||
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user