Compare commits
No commits in common. "master" and "feature/connection-error-modal" have entirely different histories.
master
...
feature/co
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,7 +1,3 @@
|
||||
/target
|
||||
.vscode/
|
||||
/socktop-wasm-test/target
|
||||
|
||||
# Documentation files from development sessions (context-specific, not for public repo)
|
||||
/OPTIMIZATION_PROCESS_DETAILS.md
|
||||
/THREAD_SUPPORT.md
|
||||
|
||||
999
Cargo.lock
generated
999
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
21
LICENSE
21
LICENSE
@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Witty One Off
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
8
nohup.out
Normal file
8
nohup.out
Normal file
@ -0,0 +1,8 @@
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
||||
Error: Address already in use (os error 98)
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8433/ws
|
||||
Error: Address already in use (os error 98)
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8443/ws
|
||||
socktop_agent: TLS enabled. Listening on wss://0.0.0.0:8443/ws
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "socktop"
|
||||
version = "1.50.0"
|
||||
version = "1.40.0"
|
||||
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
||||
description = "Remote system monitor over WebSocket, TUI like top"
|
||||
edition = "2024"
|
||||
@ -9,7 +9,7 @@ readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
# socktop connector for agent communication
|
||||
socktop_connector = "1.50.0"
|
||||
socktop_connector = { path = "../socktop_connector" }
|
||||
|
||||
tokio = { workspace = true }
|
||||
futures-util = { workspace = true }
|
||||
|
||||
@ -17,7 +17,7 @@ use ratatui::{
|
||||
backend::CrosstermBackend,
|
||||
layout::{Constraint, Direction, Rect},
|
||||
};
|
||||
use tokio::time::{sleep, timeout};
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::history::{PerCoreHistory, push_capped};
|
||||
use crate::retry::{RetryTiming, compute_retry_timing};
|
||||
@ -28,15 +28,11 @@ use crate::ui::cpu::{
|
||||
per_core_handle_scrollbar_mouse,
|
||||
};
|
||||
use crate::ui::modal::{ModalAction, ModalManager, ModalType};
|
||||
use crate::ui::processes::{
|
||||
ProcSortBy, ProcessKeyParams, get_filtered_sorted_indices, processes_handle_key_with_selection,
|
||||
processes_handle_mouse_with_selection,
|
||||
};
|
||||
use crate::ui::processes::{ProcSortBy, processes_handle_key, processes_handle_mouse};
|
||||
use crate::ui::{
|
||||
disks::draw_disks, gpu::draw_gpu, header::draw_header, mem::draw_mem, net::draw_net_spark,
|
||||
swap::draw_swap,
|
||||
};
|
||||
|
||||
use socktop_connector::{
|
||||
AgentRequest, AgentResponse, SocktopConnector, connect_to_socktop_agent,
|
||||
connect_to_socktop_agent_with_tls,
|
||||
@ -80,37 +76,12 @@ pub struct App {
|
||||
pub procs_sort_by: ProcSortBy,
|
||||
last_procs_area: Option<ratatui::layout::Rect>,
|
||||
|
||||
// Process selection state
|
||||
pub selected_process_pid: Option<u32>,
|
||||
pub selected_process_index: Option<usize>, // Index in the visible/sorted list
|
||||
prev_selected_process_pid: Option<u32>, // Track previous selection to detect changes
|
||||
|
||||
// Process search state
|
||||
pub process_search_active: bool,
|
||||
pub process_search_query: String,
|
||||
|
||||
last_procs_poll: Instant,
|
||||
last_disks_poll: Instant,
|
||||
procs_interval: Duration,
|
||||
disks_interval: Duration,
|
||||
metrics_interval: Duration,
|
||||
|
||||
// Process details polling
|
||||
pub process_details: Option<socktop_connector::ProcessMetricsResponse>,
|
||||
pub journal_entries: Option<socktop_connector::JournalResponse>,
|
||||
pub process_cpu_history: VecDeque<f32>, // CPU history for sparkline (last 60 samples)
|
||||
pub process_mem_history: VecDeque<u64>, // Memory usage history in bytes (last 60 samples)
|
||||
pub process_io_read_history: VecDeque<u64>, // Disk read DELTA history in bytes (last 60 samples)
|
||||
pub process_io_write_history: VecDeque<u64>, // Disk write DELTA history in bytes (last 60 samples)
|
||||
last_io_read_bytes: Option<u64>, // Previous read bytes for delta calculation
|
||||
last_io_write_bytes: Option<u64>, // Previous write bytes for delta calculation
|
||||
pub max_process_mem_bytes: u64, // Maximum memory usage observed for current process
|
||||
pub process_details_unsupported: bool, // Track if agent doesn't support process details
|
||||
last_process_details_poll: Instant,
|
||||
last_journal_poll: Instant,
|
||||
process_details_interval: Duration,
|
||||
journal_interval: Duration,
|
||||
|
||||
// For reconnects
|
||||
ws_url: String,
|
||||
tls_ca: Option<String>,
|
||||
@ -149,11 +120,6 @@ impl App {
|
||||
procs_drag: None,
|
||||
procs_sort_by: ProcSortBy::CpuDesc,
|
||||
last_procs_area: None,
|
||||
selected_process_pid: None,
|
||||
selected_process_index: None,
|
||||
prev_selected_process_pid: None,
|
||||
process_search_active: false,
|
||||
process_search_query: String::new(),
|
||||
last_procs_poll: Instant::now()
|
||||
.checked_sub(Duration::from_secs(2))
|
||||
.unwrap_or_else(Instant::now), // trigger immediately on first loop
|
||||
@ -163,24 +129,6 @@ impl App {
|
||||
procs_interval: Duration::from_secs(2),
|
||||
disks_interval: Duration::from_secs(5),
|
||||
metrics_interval: Duration::from_millis(500),
|
||||
process_details: None,
|
||||
journal_entries: None,
|
||||
process_cpu_history: VecDeque::with_capacity(600),
|
||||
process_mem_history: VecDeque::with_capacity(600),
|
||||
process_io_read_history: VecDeque::with_capacity(600),
|
||||
process_io_write_history: VecDeque::with_capacity(600),
|
||||
last_io_read_bytes: None,
|
||||
last_io_write_bytes: None,
|
||||
max_process_mem_bytes: 0,
|
||||
process_details_unsupported: false,
|
||||
last_process_details_poll: Instant::now()
|
||||
.checked_sub(Duration::from_secs(10))
|
||||
.unwrap_or_else(Instant::now),
|
||||
last_journal_poll: Instant::now()
|
||||
.checked_sub(Duration::from_secs(10))
|
||||
.unwrap_or_else(Instant::now),
|
||||
process_details_interval: Duration::from_millis(500),
|
||||
journal_interval: Duration::from_secs(5),
|
||||
ws_url: String::new(),
|
||||
tls_ca: None,
|
||||
verify_hostname: false,
|
||||
@ -617,90 +565,14 @@ impl App {
|
||||
continue; // Skip normal key processing
|
||||
}
|
||||
ModalAction::Cancel | ModalAction::Dismiss => {
|
||||
// If ProcessDetails modal was dismissed, clear the data to save resources
|
||||
if let Some(crate::ui::modal::ModalType::ProcessDetails {
|
||||
..
|
||||
}) = self.modal_manager.current_modal()
|
||||
{
|
||||
self.clear_process_details();
|
||||
}
|
||||
// Modal was dismissed, skip normal key processing
|
||||
continue;
|
||||
// Modal was dismissed, continue to normal processing
|
||||
}
|
||||
ModalAction::Confirm => {
|
||||
// Handle confirmation action here if needed in the future
|
||||
}
|
||||
ModalAction::SwitchToParentProcess(_current_pid) => {
|
||||
// Get parent PID from current process details
|
||||
if let Some(details) = &self.process_details
|
||||
&& let Some(parent_pid) = details.process.parent_pid
|
||||
{
|
||||
// Clear current process details
|
||||
self.clear_process_details();
|
||||
// Update selected process to parent
|
||||
self.selected_process_pid = Some(parent_pid);
|
||||
// Open modal for parent process
|
||||
self.modal_manager.push_modal(
|
||||
crate::ui::modal::ModalType::ProcessDetails {
|
||||
pid: parent_pid,
|
||||
},
|
||||
);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
ModalAction::Handled => {
|
||||
// Modal consumed the key, don't pass to main window
|
||||
continue;
|
||||
}
|
||||
ModalAction::None => {
|
||||
// Modal didn't handle the key, pass through to normal handling
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle search mode
|
||||
if self.process_search_active {
|
||||
match k.code {
|
||||
KeyCode::Esc => {
|
||||
// Exit search mode
|
||||
self.process_search_active = false;
|
||||
self.process_search_query.clear();
|
||||
continue;
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
// Exit search mode, keep filter active, and auto-select first result
|
||||
self.process_search_active = false;
|
||||
|
||||
// Auto-select first filtered result
|
||||
if let Some(m) = self.last_metrics.as_ref() {
|
||||
let idxs = get_filtered_sorted_indices(
|
||||
m,
|
||||
&self.process_search_query,
|
||||
self.procs_sort_by,
|
||||
);
|
||||
if !idxs.is_empty() {
|
||||
let first_idx = idxs[0];
|
||||
self.selected_process_index = Some(first_idx);
|
||||
self.selected_process_pid =
|
||||
Some(m.top_processes[first_idx].pid);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
KeyCode::Backspace => {
|
||||
self.process_search_query.pop();
|
||||
continue;
|
||||
}
|
||||
KeyCode::Char(c) => {
|
||||
self.process_search_query.push(c);
|
||||
continue;
|
||||
}
|
||||
KeyCode::Up | KeyCode::Down => {
|
||||
// Allow arrow keys to navigate even while in search mode
|
||||
// Fall through to normal navigation handling
|
||||
}
|
||||
_ => {
|
||||
continue; // Block other keys in search mode
|
||||
// Modal is still active but didn't consume the key
|
||||
continue; // Skip normal key processing
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -712,35 +584,6 @@ impl App {
|
||||
) {
|
||||
self.should_quit = true;
|
||||
}
|
||||
|
||||
// Activate search mode on '/' (clears query if starting new search, or edits existing)
|
||||
if matches!(k.code, KeyCode::Char('/')) {
|
||||
self.process_search_active = true;
|
||||
// Don't clear query - allow editing existing search
|
||||
continue;
|
||||
}
|
||||
|
||||
// Clear search filter on 'c' or 'C' (when not in search mode)
|
||||
if matches!(k.code, KeyCode::Char('c') | KeyCode::Char('C'))
|
||||
&& !self.process_search_query.is_empty()
|
||||
&& !self.process_search_active
|
||||
{
|
||||
self.process_search_query.clear();
|
||||
self.selected_process_pid = None;
|
||||
self.selected_process_index = None;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Show About modal on 'a' or 'A'
|
||||
if matches!(k.code, KeyCode::Char('a') | KeyCode::Char('A')) {
|
||||
self.modal_manager.push_modal(ModalType::About);
|
||||
}
|
||||
|
||||
// Show Help modal on 'h' or 'H'
|
||||
if matches!(k.code, KeyCode::Char('h') | KeyCode::Char('H')) {
|
||||
self.modal_manager.push_modal(ModalType::Help);
|
||||
}
|
||||
|
||||
// Per-core scroll via keys (Up/Down/PageUp/PageDown/Home/End)
|
||||
let sz = terminal.size()?;
|
||||
let area = Rect::new(0, 0, sz.width, sz.height);
|
||||
@ -760,83 +603,7 @@ impl App {
|
||||
.split(rows[1]);
|
||||
let content = per_core_content_area(top[1]);
|
||||
|
||||
// First try process selection (only handles arrows if a process is selected)
|
||||
let process_handled = if self.last_procs_area.is_some() {
|
||||
processes_handle_key_with_selection(ProcessKeyParams {
|
||||
selected_process_pid: &mut self.selected_process_pid,
|
||||
selected_process_index: &mut self.selected_process_index,
|
||||
key: k,
|
||||
metrics: self.last_metrics.as_ref(),
|
||||
sort_by: self.procs_sort_by,
|
||||
search_query: &self.process_search_query,
|
||||
})
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// If process selection didn't handle it, use CPU scrolling
|
||||
if !process_handled {
|
||||
per_core_handle_key(
|
||||
&mut self.per_core_scroll,
|
||||
k,
|
||||
content.height as usize,
|
||||
);
|
||||
}
|
||||
|
||||
// Auto-scroll to keep selected process visible
|
||||
if let (Some(selected_idx), Some(p_area)) =
|
||||
(self.selected_process_index, self.last_procs_area)
|
||||
&& let Some(m) = self.last_metrics.as_ref()
|
||||
{
|
||||
// Get filtered and sorted indices (same as display)
|
||||
let idxs = get_filtered_sorted_indices(
|
||||
m,
|
||||
&self.process_search_query,
|
||||
self.procs_sort_by,
|
||||
);
|
||||
|
||||
// Find the display position of the selected process in filtered list
|
||||
if let Some(display_pos) =
|
||||
idxs.iter().position(|&idx| idx == selected_idx)
|
||||
{
|
||||
// Calculate viewport size
|
||||
// Account for: borders (2) + header (1) + search box if active (3)
|
||||
let extra_rows = if self.process_search_active
|
||||
|| !self.process_search_query.is_empty()
|
||||
{
|
||||
3 // search box with border
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let viewport_rows =
|
||||
p_area.height.saturating_sub(3 + extra_rows) as usize;
|
||||
|
||||
// Adjust scroll offset to keep selection visible
|
||||
if display_pos < self.procs_scroll_offset {
|
||||
// Selection is above viewport, scroll up
|
||||
self.procs_scroll_offset = display_pos;
|
||||
} else if display_pos >= self.procs_scroll_offset + viewport_rows {
|
||||
// Selection is below viewport, scroll down
|
||||
self.procs_scroll_offset =
|
||||
display_pos.saturating_sub(viewport_rows - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if process selection changed and clear details if so
|
||||
if self.selected_process_pid != self.prev_selected_process_pid {
|
||||
self.clear_process_details();
|
||||
self.prev_selected_process_pid = self.selected_process_pid;
|
||||
}
|
||||
|
||||
// Check if Enter was pressed with a process selected
|
||||
if process_handled
|
||||
&& k.code == KeyCode::Enter
|
||||
&& let Some(selected_pid) = self.selected_process_pid
|
||||
{
|
||||
self.modal_manager
|
||||
.push_modal(ModalType::ProcessDetails { pid: selected_pid });
|
||||
}
|
||||
per_core_handle_key(&mut self.per_core_scroll, k, content.height as usize);
|
||||
|
||||
let total_rows = self
|
||||
.last_metrics
|
||||
@ -848,13 +615,14 @@ impl App {
|
||||
total_rows,
|
||||
content.height as usize,
|
||||
);
|
||||
|
||||
if let Some(p_area) = self.last_procs_area {
|
||||
// page size = visible rows (inner height minus header = 1)
|
||||
let page = p_area.height.saturating_sub(3).max(1) as usize; // borders (2) + header (1)
|
||||
processes_handle_key(&mut self.procs_scroll_offset, k, page);
|
||||
}
|
||||
}
|
||||
Event::Mouse(m) => {
|
||||
// If modal is active, don't handle mouse events on the main window
|
||||
if self.modal_manager.is_active() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Layout to get areas
|
||||
let sz = terminal.size()?;
|
||||
let area = Rect::new(0, 0, sz.width, sz.height);
|
||||
@ -903,33 +671,18 @@ impl App {
|
||||
content.height as usize,
|
||||
);
|
||||
|
||||
// Processes table: sort by column on header click and handle row selection
|
||||
// Processes table: sort by column on header click
|
||||
if let (Some(mm), Some(p_area)) =
|
||||
(self.last_metrics.as_ref(), self.last_procs_area)
|
||||
&& let Some(new_sort) = processes_handle_mouse(
|
||||
&mut self.procs_scroll_offset,
|
||||
&mut self.procs_drag,
|
||||
m,
|
||||
p_area,
|
||||
mm.top_processes.len(),
|
||||
)
|
||||
{
|
||||
use crate::ui::processes::ProcessMouseParams;
|
||||
if let Some(new_sort) =
|
||||
processes_handle_mouse_with_selection(ProcessMouseParams {
|
||||
scroll_offset: &mut self.procs_scroll_offset,
|
||||
selected_process_pid: &mut self.selected_process_pid,
|
||||
selected_process_index: &mut self.selected_process_index,
|
||||
drag: &mut self.procs_drag,
|
||||
mouse: m,
|
||||
area: p_area,
|
||||
total_rows: mm.top_processes.len(),
|
||||
metrics: self.last_metrics.as_ref(),
|
||||
sort_by: self.procs_sort_by,
|
||||
search_query: &self.process_search_query,
|
||||
})
|
||||
{
|
||||
self.procs_sort_by = new_sort;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if process selection changed via mouse and clear details if so
|
||||
if self.selected_process_pid != self.prev_selected_process_pid {
|
||||
self.clear_process_details();
|
||||
self.prev_selected_process_pid = self.selected_process_pid;
|
||||
self.procs_sort_by = new_sort;
|
||||
}
|
||||
}
|
||||
Event::Resize(_, _) => {}
|
||||
@ -979,104 +732,6 @@ impl App {
|
||||
}
|
||||
self.last_disks_poll = Instant::now();
|
||||
}
|
||||
|
||||
// Poll process details when modal is active and process is selected
|
||||
if let Some(pid) = self.selected_process_pid {
|
||||
// Check if ProcessDetails modal is currently active
|
||||
if let Some(crate::ui::modal::ModalType::ProcessDetails { .. }) =
|
||||
self.modal_manager.current_modal()
|
||||
{
|
||||
// Poll process details every 500ms when modal is active
|
||||
if self.last_process_details_poll.elapsed()
|
||||
>= self.process_details_interval
|
||||
{
|
||||
// Use timeout to prevent blocking the event loop
|
||||
match timeout(
|
||||
Duration::from_millis(2000),
|
||||
ws.request(AgentRequest::ProcessMetrics { pid }),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(AgentResponse::ProcessMetrics(details))) => {
|
||||
// Update history for sparklines
|
||||
let cpu_usage = details.process.cpu_usage;
|
||||
push_capped(&mut self.process_cpu_history, cpu_usage, 600);
|
||||
|
||||
let mem_bytes = details.process.mem_bytes;
|
||||
push_capped(&mut self.process_mem_history, mem_bytes, 600);
|
||||
|
||||
// Track maximum memory usage
|
||||
if mem_bytes > self.max_process_mem_bytes {
|
||||
self.max_process_mem_bytes = mem_bytes;
|
||||
}
|
||||
|
||||
// I/O bytes from agent are cumulative, calculate deltas
|
||||
if let Some(read) = details.process.read_bytes {
|
||||
let delta = if let Some(last) = self.last_io_read_bytes
|
||||
{
|
||||
read.saturating_sub(last)
|
||||
} else {
|
||||
0 // First sample, no delta available
|
||||
};
|
||||
push_capped(
|
||||
&mut self.process_io_read_history,
|
||||
delta,
|
||||
600,
|
||||
);
|
||||
self.last_io_read_bytes = Some(read);
|
||||
}
|
||||
if let Some(write) = details.process.write_bytes {
|
||||
let delta = if let Some(last) = self.last_io_write_bytes
|
||||
{
|
||||
write.saturating_sub(last)
|
||||
} else {
|
||||
0 // First sample, no delta available
|
||||
};
|
||||
push_capped(
|
||||
&mut self.process_io_write_history,
|
||||
delta,
|
||||
600,
|
||||
);
|
||||
self.last_io_write_bytes = Some(write);
|
||||
}
|
||||
|
||||
self.process_details = Some(details);
|
||||
self.process_details_unsupported = false;
|
||||
}
|
||||
Ok(Err(_)) | Err(_) => {
|
||||
// Agent doesn't support this feature or timeout occurred
|
||||
// Mark as unsupported so we can show appropriate message
|
||||
self.process_details_unsupported = true;
|
||||
}
|
||||
Ok(Ok(_)) => {
|
||||
// Wrong response type
|
||||
self.process_details_unsupported = true;
|
||||
}
|
||||
}
|
||||
self.last_process_details_poll = Instant::now();
|
||||
}
|
||||
|
||||
// Poll journal entries every 5s when modal is active
|
||||
if self.last_journal_poll.elapsed() >= self.journal_interval {
|
||||
// Use timeout to prevent blocking the event loop
|
||||
match timeout(
|
||||
Duration::from_millis(2000),
|
||||
ws.request(AgentRequest::JournalEntries { pid }),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(AgentResponse::JournalEntries(journal))) => {
|
||||
self.journal_entries = Some(journal);
|
||||
}
|
||||
Ok(Err(_)) | Err(_) | Ok(Ok(_)) => {
|
||||
// Agent doesn't support this feature, error occurred, or wrong response type
|
||||
// Keep journal_entries as None
|
||||
}
|
||||
}
|
||||
self.last_journal_poll = Instant::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// Connection error - show modal if not already shown
|
||||
@ -1105,20 +760,6 @@ impl App {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear process details when modal is closed or selection changes
|
||||
pub fn clear_process_details(&mut self) {
|
||||
self.process_details = None;
|
||||
self.journal_entries = None;
|
||||
self.process_cpu_history.clear();
|
||||
self.process_mem_history.clear();
|
||||
self.process_io_read_history.clear();
|
||||
self.process_io_write_history.clear();
|
||||
self.last_io_read_bytes = None;
|
||||
self.last_io_write_bytes = None;
|
||||
self.max_process_mem_bytes = 0;
|
||||
self.process_details_unsupported = false;
|
||||
}
|
||||
|
||||
fn update_with_metrics(&mut self, mut m: Metrics) {
|
||||
if let Some(prev) = &self.last_metrics {
|
||||
// Preserve slower fields when the fast payload omits them
|
||||
@ -1275,35 +916,14 @@ impl App {
|
||||
crate::ui::processes::draw_top_processes(
|
||||
f,
|
||||
procs_area,
|
||||
crate::ui::processes::ProcessDisplayParams {
|
||||
metrics: self.last_metrics.as_ref(),
|
||||
scroll_offset: self.procs_scroll_offset,
|
||||
sort_by: self.procs_sort_by,
|
||||
selected_process_pid: self.selected_process_pid,
|
||||
selected_process_index: self.selected_process_index,
|
||||
search_query: &self.process_search_query,
|
||||
search_active: self.process_search_active,
|
||||
},
|
||||
self.last_metrics.as_ref(),
|
||||
self.procs_scroll_offset,
|
||||
self.procs_sort_by,
|
||||
);
|
||||
|
||||
// Render modals on top of everything else
|
||||
if self.modal_manager.is_active() {
|
||||
use crate::ui::modal::{ProcessHistoryData, ProcessModalData};
|
||||
self.modal_manager.render(
|
||||
f,
|
||||
ProcessModalData {
|
||||
details: self.process_details.as_ref(),
|
||||
journal: self.journal_entries.as_ref(),
|
||||
history: ProcessHistoryData {
|
||||
cpu: &self.process_cpu_history,
|
||||
mem: &self.process_mem_history,
|
||||
io_read: &self.process_io_read_history,
|
||||
io_write: &self.process_io_write_history,
|
||||
},
|
||||
max_mem_bytes: self.max_process_mem_bytes,
|
||||
unsupported: self.process_details_unsupported,
|
||||
},
|
||||
);
|
||||
self.modal_manager.render(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1326,11 +946,6 @@ impl Default for App {
|
||||
procs_drag: None,
|
||||
procs_sort_by: ProcSortBy::CpuDesc,
|
||||
last_procs_area: None,
|
||||
selected_process_pid: None,
|
||||
selected_process_index: None,
|
||||
prev_selected_process_pid: None,
|
||||
process_search_active: false,
|
||||
process_search_query: String::new(),
|
||||
last_procs_poll: Instant::now()
|
||||
.checked_sub(Duration::from_secs(2))
|
||||
.unwrap_or_else(Instant::now), // trigger immediately on first loop
|
||||
@ -1340,24 +955,6 @@ impl Default for App {
|
||||
procs_interval: Duration::from_secs(2),
|
||||
disks_interval: Duration::from_secs(5),
|
||||
metrics_interval: Duration::from_millis(500),
|
||||
process_details: None,
|
||||
journal_entries: None,
|
||||
process_cpu_history: VecDeque::with_capacity(600),
|
||||
process_mem_history: VecDeque::with_capacity(600),
|
||||
process_io_read_history: VecDeque::with_capacity(600),
|
||||
process_io_write_history: VecDeque::with_capacity(600),
|
||||
last_io_read_bytes: None,
|
||||
last_io_write_bytes: None,
|
||||
max_process_mem_bytes: 0,
|
||||
process_details_unsupported: false,
|
||||
last_process_details_poll: Instant::now()
|
||||
.checked_sub(Duration::from_secs(10))
|
||||
.unwrap_or_else(Instant::now),
|
||||
last_journal_poll: Instant::now()
|
||||
.checked_sub(Duration::from_secs(10))
|
||||
.unwrap_or_else(Instant::now),
|
||||
process_details_interval: Duration::from_millis(500),
|
||||
journal_interval: Duration::from_secs(5),
|
||||
ws_url: String::new(),
|
||||
tls_ca: None,
|
||||
verify_hostname: false,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -42,8 +42,8 @@ pub fn per_core_content_area(area: Rect) -> Rect {
|
||||
/// Handles key events for per-core CPU bars.
|
||||
pub fn per_core_handle_key(scroll_offset: &mut usize, key: KeyEvent, page_size: usize) {
|
||||
match key.code {
|
||||
KeyCode::Left => *scroll_offset = scroll_offset.saturating_sub(1),
|
||||
KeyCode::Right => *scroll_offset = scroll_offset.saturating_add(1),
|
||||
KeyCode::Up => *scroll_offset = scroll_offset.saturating_sub(1),
|
||||
KeyCode::Down => *scroll_offset = scroll_offset.saturating_add(1),
|
||||
KeyCode::PageUp => {
|
||||
let step = page_size.max(1);
|
||||
*scroll_offset = scroll_offset.saturating_sub(step);
|
||||
@ -240,61 +240,20 @@ pub fn draw_cpu_avg_graph(
|
||||
hist: &std::collections::VecDeque<u64>,
|
||||
m: Option<&Metrics>,
|
||||
) {
|
||||
// Calculate average CPU over the monitoring period
|
||||
let avg_cpu = if !hist.is_empty() {
|
||||
let sum: u64 = hist.iter().sum();
|
||||
sum as f64 / hist.len() as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let title = if let Some(mm) = m {
|
||||
format!("CPU (now: {:>5.1}% | avg: {:>5.1}%)", mm.cpu_total, avg_cpu)
|
||||
format!("CPU avg (now: {:>5.1}%)", mm.cpu_total)
|
||||
} else {
|
||||
"CPU avg".into()
|
||||
};
|
||||
|
||||
// Build the top-right info (CPU temp and polling intervals)
|
||||
let top_right_info = if let Some(mm) = m {
|
||||
mm.cpu_temp_c
|
||||
.map(|t| {
|
||||
let icon = if t < 50.0 {
|
||||
"😎"
|
||||
} else if t < 85.0 {
|
||||
"⚠️"
|
||||
} else {
|
||||
"🔥"
|
||||
};
|
||||
format!("CPU Temp: {t:.1}°C {icon}")
|
||||
})
|
||||
.unwrap_or_else(|| "CPU Temp: N/A".into())
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
let max_points = area.width.saturating_sub(2) as usize;
|
||||
let start = hist.len().saturating_sub(max_points);
|
||||
let data: Vec<u64> = hist.iter().skip(start).cloned().collect();
|
||||
|
||||
// Render the sparkline with title on left
|
||||
let spark = Sparkline::default()
|
||||
.block(Block::default().borders(Borders::ALL).title(title))
|
||||
.data(&data)
|
||||
.max(100)
|
||||
.style(Style::default().fg(Color::Cyan));
|
||||
f.render_widget(spark, area);
|
||||
|
||||
// Render the top-right info as text overlay in the top-right corner
|
||||
if !top_right_info.is_empty() {
|
||||
let info_area = Rect {
|
||||
x: area.x + area.width.saturating_sub(top_right_info.len() as u16 + 2),
|
||||
y: area.y,
|
||||
width: top_right_info.len() as u16 + 1,
|
||||
height: 1,
|
||||
};
|
||||
let info_line = Line::from(Span::raw(top_right_info));
|
||||
f.render_widget(Paragraph::new(info_line), info_area);
|
||||
}
|
||||
}
|
||||
|
||||
/// Draws the per-core CPU bars with sparklines and trends.
|
||||
|
||||
@ -24,16 +24,8 @@ pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Filter duplicates by keeping first occurrence of each unique name
|
||||
let mut seen_names = std::collections::HashSet::new();
|
||||
let unique_disks: Vec<_> = mm
|
||||
.disks
|
||||
.iter()
|
||||
.filter(|d| seen_names.insert(d.name.clone()))
|
||||
.collect();
|
||||
|
||||
let per_disk_h = 3u16;
|
||||
let max_cards = (inner.height / per_disk_h).min(unique_disks.len() as u16) as usize;
|
||||
let max_cards = (inner.height / per_disk_h).min(mm.disks.len() as u16) as usize;
|
||||
|
||||
let constraints: Vec<Constraint> = (0..max_cards)
|
||||
.map(|_| Constraint::Length(per_disk_h))
|
||||
@ -44,7 +36,7 @@ pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
|
||||
.split(inner);
|
||||
|
||||
for (i, slot) in rows.iter().enumerate() {
|
||||
let d = unique_disks[i];
|
||||
let d = &mm.disks[i];
|
||||
let used = d.total.saturating_sub(d.available);
|
||||
let ratio = if d.total > 0 {
|
||||
used as f64 / d.total as f64
|
||||
@ -61,43 +53,23 @@ pub fn draw_disks(f: &mut ratatui::Frame<'_>, area: Rect, m: Option<&Metrics>) {
|
||||
ratatui::style::Color::Red
|
||||
};
|
||||
|
||||
// Add indentation for partitions
|
||||
let indent = if d.is_partition { "└─" } else { "" };
|
||||
|
||||
// Add temperature if available
|
||||
let temp_str = d
|
||||
.temperature
|
||||
.map(|t| format!(" {}°C", t.round() as i32))
|
||||
.unwrap_or_default();
|
||||
|
||||
let title = format!(
|
||||
"{}{}{}{} {} / {} ({}%)",
|
||||
indent,
|
||||
"{} {} {} / {} ({}%)",
|
||||
disk_icon(&d.name),
|
||||
truncate_middle(&d.name, (slot.width.saturating_sub(6)) as usize / 2),
|
||||
temp_str,
|
||||
human(used),
|
||||
human(d.total),
|
||||
pct
|
||||
);
|
||||
|
||||
// Indent the entire card (block) for partitions to align with └─ prefix (4 chars)
|
||||
let card_indent = if d.is_partition { 4 } else { 0 };
|
||||
let card_rect = Rect {
|
||||
x: slot.x + card_indent,
|
||||
y: slot.y,
|
||||
width: slot.width.saturating_sub(card_indent),
|
||||
height: slot.height,
|
||||
};
|
||||
|
||||
let card = Block::default().borders(Borders::ALL).title(title);
|
||||
f.render_widget(card, card_rect);
|
||||
f.render_widget(card, *slot);
|
||||
|
||||
let inner_card = Rect {
|
||||
x: card_rect.x + 1,
|
||||
y: card_rect.y + 1,
|
||||
width: card_rect.width.saturating_sub(2),
|
||||
height: card_rect.height.saturating_sub(2),
|
||||
x: slot.x + 1,
|
||||
y: slot.y + 1,
|
||||
width: slot.width.saturating_sub(2),
|
||||
height: slot.height.saturating_sub(2),
|
||||
};
|
||||
if inner_card.height == 0 {
|
||||
continue;
|
||||
|
||||
@ -3,8 +3,7 @@
|
||||
use crate::types::Metrics;
|
||||
use ratatui::{
|
||||
layout::Rect,
|
||||
text::{Line, Span},
|
||||
widgets::{Block, Borders, Paragraph},
|
||||
widgets::{Block, Borders},
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
@ -18,7 +17,20 @@ pub fn draw_header(
|
||||
procs_interval: Duration,
|
||||
) {
|
||||
let base = if let Some(mm) = m {
|
||||
format!("socktop — host: {}", mm.hostname)
|
||||
let temp = mm
|
||||
.cpu_temp_c
|
||||
.map(|t| {
|
||||
let icon = if t < 50.0 {
|
||||
"😎"
|
||||
} else if t < 85.0 {
|
||||
"⚠️"
|
||||
} else {
|
||||
"🔥"
|
||||
};
|
||||
format!("CPU Temp: {t:.1}°C {icon}")
|
||||
})
|
||||
.unwrap_or_else(|| "CPU Temp: N/A".into());
|
||||
format!("socktop — host: {} | {}", mm.hostname, temp)
|
||||
} else {
|
||||
"socktop — connecting...".into()
|
||||
};
|
||||
@ -26,30 +38,15 @@ pub fn draw_header(
|
||||
let tls_txt = if is_tls { "🔒 TLS" } else { "🔒✗ TLS" };
|
||||
// Token indicator
|
||||
let tok_txt = if has_token { "🔑 token" } else { "" };
|
||||
let mi = metrics_interval.as_millis();
|
||||
let pi = procs_interval.as_millis();
|
||||
let intervals = format!("⏱ {mi}ms metrics | {pi}ms procs");
|
||||
let mut parts = vec![base, tls_txt.into()];
|
||||
if !tok_txt.is_empty() {
|
||||
parts.push(tok_txt.into());
|
||||
}
|
||||
parts.push("(a: about, h: help, q: quit)".into());
|
||||
parts.push(intervals);
|
||||
parts.push("(q to quit)".into());
|
||||
let title = parts.join(" | ");
|
||||
|
||||
// Render the block with left-aligned title
|
||||
f.render_widget(Block::default().title(title).borders(Borders::BOTTOM), area);
|
||||
|
||||
// Render polling intervals on the right side
|
||||
let mi = metrics_interval.as_millis();
|
||||
let pi = procs_interval.as_millis();
|
||||
let intervals = format!("⏱ {mi}ms metrics | {pi}ms procs");
|
||||
let intervals_width = intervals.len() as u16;
|
||||
|
||||
if area.width > intervals_width + 2 {
|
||||
let right_area = Rect {
|
||||
x: area.x + area.width.saturating_sub(intervals_width + 1),
|
||||
y: area.y,
|
||||
width: intervals_width,
|
||||
height: 1,
|
||||
};
|
||||
let intervals_line = Line::from(Span::raw(intervals));
|
||||
f.render_widget(Paragraph::new(intervals_line), right_area);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6,10 +6,6 @@ pub mod gpu;
|
||||
pub mod header;
|
||||
pub mod mem;
|
||||
pub mod modal;
|
||||
pub mod modal_connection;
|
||||
pub mod modal_format;
|
||||
pub mod modal_process;
|
||||
pub mod modal_types;
|
||||
pub mod net;
|
||||
pub mod processes;
|
||||
pub mod swap;
|
||||
|
||||
@ -1,29 +1,66 @@
|
||||
//! Modal window system for socktop TUI application
|
||||
|
||||
use super::theme::MODAL_DIM_BG;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use super::theme::{
|
||||
BTN_EXIT_BG_ACTIVE, BTN_EXIT_FG_ACTIVE, BTN_EXIT_FG_INACTIVE, BTN_EXIT_TEXT,
|
||||
BTN_RETRY_BG_ACTIVE, BTN_RETRY_FG_ACTIVE, BTN_RETRY_FG_INACTIVE, BTN_RETRY_TEXT, ICON_CLUSTER,
|
||||
ICON_COUNTDOWN_LABEL, ICON_MESSAGE, ICON_OFFLINE_LABEL, ICON_RETRY_LABEL, ICON_WARNING_TITLE,
|
||||
LARGE_ERROR_ICON, MODAL_AGENT_FG, MODAL_BG, MODAL_BORDER_FG, MODAL_COUNTDOWN_LABEL_FG,
|
||||
MODAL_DIM_BG, MODAL_FG, MODAL_HINT_FG, MODAL_ICON_PINK, MODAL_OFFLINE_LABEL_FG,
|
||||
MODAL_RETRY_LABEL_FG, MODAL_TITLE_FG,
|
||||
};
|
||||
use crossterm::event::KeyCode;
|
||||
use ratatui::{
|
||||
Frame,
|
||||
layout::{Alignment, Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::Line,
|
||||
text::{Line, Span, Text},
|
||||
widgets::{Block, Borders, Clear, Paragraph, Wrap},
|
||||
};
|
||||
|
||||
// Re-export types from modal_types
|
||||
pub use super::modal_types::{
|
||||
ModalAction, ModalButton, ModalType, ProcessHistoryData, ProcessModalData,
|
||||
};
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ModalType {
|
||||
ConnectionError {
|
||||
message: String,
|
||||
disconnected_at: Instant,
|
||||
retry_count: u32,
|
||||
auto_retry_countdown: Option<u64>,
|
||||
},
|
||||
#[allow(dead_code)]
|
||||
Confirmation {
|
||||
title: String,
|
||||
message: String,
|
||||
confirm_text: String,
|
||||
cancel_text: String,
|
||||
},
|
||||
#[allow(dead_code)]
|
||||
Info { title: String, message: String },
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ModalAction {
|
||||
None,
|
||||
RetryConnection,
|
||||
ExitApp,
|
||||
Confirm,
|
||||
Cancel,
|
||||
Dismiss,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ModalButton {
|
||||
Retry,
|
||||
Exit,
|
||||
Confirm,
|
||||
Cancel,
|
||||
Ok,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ModalManager {
|
||||
stack: Vec<ModalType>,
|
||||
pub(super) active_button: ModalButton,
|
||||
pub thread_scroll_offset: usize,
|
||||
pub journal_scroll_offset: usize,
|
||||
pub thread_scroll_max: usize,
|
||||
pub journal_scroll_max: usize,
|
||||
pub help_scroll_offset: usize,
|
||||
active_button: ModalButton,
|
||||
}
|
||||
|
||||
impl ModalManager {
|
||||
@ -31,39 +68,16 @@ impl ModalManager {
|
||||
Self {
|
||||
stack: Vec::new(),
|
||||
active_button: ModalButton::Retry,
|
||||
thread_scroll_offset: 0,
|
||||
journal_scroll_offset: 0,
|
||||
thread_scroll_max: 0,
|
||||
journal_scroll_max: 0,
|
||||
help_scroll_offset: 0,
|
||||
}
|
||||
}
|
||||
pub fn is_active(&self) -> bool {
|
||||
!self.stack.is_empty()
|
||||
}
|
||||
|
||||
pub fn current_modal(&self) -> Option<&ModalType> {
|
||||
self.stack.last()
|
||||
}
|
||||
|
||||
pub fn push_modal(&mut self, modal: ModalType) {
|
||||
self.stack.push(modal);
|
||||
self.active_button = match self.stack.last() {
|
||||
Some(ModalType::ConnectionError { .. }) => ModalButton::Retry,
|
||||
Some(ModalType::ProcessDetails { .. }) => {
|
||||
// Reset scroll state for new process details
|
||||
self.thread_scroll_offset = 0;
|
||||
self.journal_scroll_offset = 0;
|
||||
self.thread_scroll_max = 0;
|
||||
self.journal_scroll_max = 0;
|
||||
ModalButton::Ok
|
||||
}
|
||||
Some(ModalType::About) => ModalButton::Ok,
|
||||
Some(ModalType::Help) => {
|
||||
// Reset scroll state for help modal
|
||||
self.help_scroll_offset = 0;
|
||||
ModalButton::Ok
|
||||
}
|
||||
Some(ModalType::Confirmation { .. }) => ModalButton::Confirm,
|
||||
Some(ModalType::Info { .. }) => ModalButton::Ok,
|
||||
None => ModalButton::Ok,
|
||||
@ -74,9 +88,6 @@ impl ModalManager {
|
||||
if let Some(next) = self.stack.last() {
|
||||
self.active_button = match next {
|
||||
ModalType::ConnectionError { .. } => ModalButton::Retry,
|
||||
ModalType::ProcessDetails { .. } => ModalButton::Ok,
|
||||
ModalType::About => ModalButton::Ok,
|
||||
ModalType::Help => ModalButton::Ok,
|
||||
ModalType::Confirmation { .. } => ModalButton::Confirm,
|
||||
ModalType::Info { .. } => ModalButton::Ok,
|
||||
};
|
||||
@ -124,101 +135,6 @@ impl ModalManager {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('x') | KeyCode::Char('X') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
// Close all ProcessDetails modals at once (handles parent navigation chain)
|
||||
while matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.pop_modal();
|
||||
}
|
||||
ModalAction::Dismiss
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('j') | KeyCode::Char('J') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.thread_scroll_offset = self
|
||||
.thread_scroll_offset
|
||||
.saturating_add(1)
|
||||
.min(self.thread_scroll_max);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('k') | KeyCode::Char('K') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.thread_scroll_offset = self.thread_scroll_offset.saturating_sub(1);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('d') | KeyCode::Char('D') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.thread_scroll_offset = self
|
||||
.thread_scroll_offset
|
||||
.saturating_add(10)
|
||||
.min(self.thread_scroll_max);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('u') | KeyCode::Char('U') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.thread_scroll_offset = self.thread_scroll_offset.saturating_sub(10);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('[') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.journal_scroll_offset = self.journal_scroll_offset.saturating_sub(1);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char(']') => {
|
||||
if matches!(self.stack.last(), Some(ModalType::ProcessDetails { .. })) {
|
||||
self.journal_scroll_offset = self
|
||||
.journal_scroll_offset
|
||||
.saturating_add(1)
|
||||
.min(self.journal_scroll_max);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Char('p') | KeyCode::Char('P') => {
|
||||
// Switch to parent process if it exists
|
||||
if let Some(ModalType::ProcessDetails { pid }) = self.stack.last() {
|
||||
// We need to get the parent PID from the process details
|
||||
// For now, return a special action that the app can handle
|
||||
// The app has access to the process details and can extract parent_pid
|
||||
ModalAction::SwitchToParentProcess(*pid)
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Up => {
|
||||
if matches!(self.stack.last(), Some(ModalType::Help)) {
|
||||
self.help_scroll_offset = self.help_scroll_offset.saturating_sub(1);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
KeyCode::Down => {
|
||||
if matches!(self.stack.last(), Some(ModalType::Help)) {
|
||||
self.help_scroll_offset = self.help_scroll_offset.saturating_add(1);
|
||||
ModalAction::Handled
|
||||
} else {
|
||||
ModalAction::None
|
||||
}
|
||||
}
|
||||
_ => ModalAction::None,
|
||||
}
|
||||
}
|
||||
@ -228,18 +144,6 @@ impl ModalManager {
|
||||
ModalAction::RetryConnection
|
||||
}
|
||||
(Some(ModalType::ConnectionError { .. }), ModalButton::Exit) => ModalAction::ExitApp,
|
||||
(Some(ModalType::ProcessDetails { .. }), ModalButton::Ok) => {
|
||||
self.pop_modal();
|
||||
ModalAction::Dismiss
|
||||
}
|
||||
(Some(ModalType::About), ModalButton::Ok) => {
|
||||
self.pop_modal();
|
||||
ModalAction::Dismiss
|
||||
}
|
||||
(Some(ModalType::Help), ModalButton::Ok) => {
|
||||
self.pop_modal();
|
||||
ModalAction::Dismiss
|
||||
}
|
||||
(Some(ModalType::Confirmation { .. }), ModalButton::Confirm) => ModalAction::Confirm,
|
||||
(Some(ModalType::Confirmation { .. }), ModalButton::Cancel) => ModalAction::Cancel,
|
||||
(Some(ModalType::Info { .. }), ModalButton::Ok) => {
|
||||
@ -262,10 +166,10 @@ impl ModalManager {
|
||||
self.next_button();
|
||||
}
|
||||
|
||||
pub fn render(&mut self, f: &mut Frame, data: ProcessModalData) {
|
||||
if let Some(m) = self.stack.last().cloned() {
|
||||
pub fn render(&self, f: &mut Frame) {
|
||||
if let Some(m) = self.stack.last() {
|
||||
self.render_background_dim(f);
|
||||
self.render_modal_content(f, &m, data);
|
||||
self.render_modal_content(f, m);
|
||||
}
|
||||
}
|
||||
|
||||
@ -280,27 +184,9 @@ impl ModalManager {
|
||||
);
|
||||
}
|
||||
|
||||
fn render_modal_content(&mut self, f: &mut Frame, modal: &ModalType, data: ProcessModalData) {
|
||||
fn render_modal_content(&self, f: &mut Frame, modal: &ModalType) {
|
||||
let area = f.area();
|
||||
// Different sizes for different modal types
|
||||
let modal_area = match modal {
|
||||
ModalType::ProcessDetails { .. } => {
|
||||
// Process details modal uses almost full screen (95% width, 90% height)
|
||||
self.centered_rect(95, 90, area)
|
||||
}
|
||||
ModalType::About => {
|
||||
// About modal uses medium size
|
||||
self.centered_rect(90, 90, area)
|
||||
}
|
||||
ModalType::Help => {
|
||||
// Help modal uses medium size
|
||||
self.centered_rect(70, 80, area)
|
||||
}
|
||||
_ => {
|
||||
// Other modals use smaller size
|
||||
self.centered_rect(70, 50, area)
|
||||
}
|
||||
};
|
||||
let modal_area = self.centered_rect(70, 50, area);
|
||||
f.render_widget(Clear, modal_area);
|
||||
match modal {
|
||||
ModalType::ConnectionError {
|
||||
@ -316,11 +202,6 @@ impl ModalManager {
|
||||
*retry_count,
|
||||
*auto_retry_countdown,
|
||||
),
|
||||
ModalType::ProcessDetails { pid } => {
|
||||
self.render_process_details(f, modal_area, *pid, data)
|
||||
}
|
||||
ModalType::About => self.render_about(f, modal_area),
|
||||
ModalType::Help => self.render_help(f, modal_area),
|
||||
ModalType::Confirmation {
|
||||
title,
|
||||
message,
|
||||
@ -331,6 +212,279 @@ impl ModalManager {
|
||||
}
|
||||
}
|
||||
|
||||
fn render_connection_error(
|
||||
&self,
|
||||
f: &mut Frame,
|
||||
area: Rect,
|
||||
message: &str,
|
||||
disconnected_at: Instant,
|
||||
retry_count: u32,
|
||||
auto_retry_countdown: Option<u64>,
|
||||
) {
|
||||
let duration_text = format_duration(disconnected_at.elapsed());
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([
|
||||
Constraint::Length(3),
|
||||
Constraint::Min(4),
|
||||
Constraint::Length(4),
|
||||
])
|
||||
.split(area);
|
||||
let block = Block::default()
|
||||
.title(ICON_WARNING_TITLE)
|
||||
.title_style(
|
||||
Style::default()
|
||||
.fg(MODAL_TITLE_FG)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
.borders(Borders::ALL)
|
||||
.border_style(Style::default().fg(MODAL_BORDER_FG))
|
||||
.style(Style::default().bg(MODAL_BG).fg(MODAL_FG));
|
||||
f.render_widget(block, area);
|
||||
|
||||
let content_area = chunks[1];
|
||||
let max_w = content_area.width.saturating_sub(15) as usize;
|
||||
let clean_message = if message.to_lowercase().contains("hostname verification")
|
||||
|| message.contains("socktop_connector")
|
||||
{
|
||||
"Connection failed - hostname verification disabled".to_string()
|
||||
} else if message.contains("Failed to fetch metrics:") {
|
||||
if let Some(p) = message.find(':') {
|
||||
let ess = message[p + 1..].trim();
|
||||
if ess.len() > max_w {
|
||||
format!("{}...", &ess[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
ess.to_string()
|
||||
}
|
||||
} else {
|
||||
"Connection error".to_string()
|
||||
}
|
||||
} else if message.starts_with("Retry failed:") {
|
||||
if let Some(p) = message.find(':') {
|
||||
let ess = message[p + 1..].trim();
|
||||
if ess.len() > max_w {
|
||||
format!("{}...", &ess[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
ess.to_string()
|
||||
}
|
||||
} else {
|
||||
"Retry failed".to_string()
|
||||
}
|
||||
} else if message.len() > max_w {
|
||||
format!("{}...", &message[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
message.to_string()
|
||||
};
|
||||
let truncate = |s: &str| {
|
||||
if s.len() > max_w {
|
||||
format!("{}...", &s[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
s.to_string()
|
||||
}
|
||||
};
|
||||
let agent_text = truncate("📡 Cannot connect to socktop agent");
|
||||
let message_text = truncate(&clean_message);
|
||||
let duration_display = truncate(&duration_text);
|
||||
let retry_display = truncate(&retry_count.to_string());
|
||||
let countdown_text = auto_retry_countdown.map(|c| {
|
||||
if c == 0 {
|
||||
"Auto retry now...".to_string()
|
||||
} else {
|
||||
format!("{c}s")
|
||||
}
|
||||
});
|
||||
|
||||
// Determine if we have enough space (height + width) to show large centered icon
|
||||
let icon_max_width = LARGE_ERROR_ICON
|
||||
.iter()
|
||||
.map(|l| l.trim().chars().count())
|
||||
.max()
|
||||
.unwrap_or(0) as u16;
|
||||
let large_allowed = content_area.height >= (LARGE_ERROR_ICON.len() as u16 + 8)
|
||||
&& content_area.width >= icon_max_width + 6; // small margin for borders/padding
|
||||
let mut icon_lines: Vec<Line> = Vec::new();
|
||||
if large_allowed {
|
||||
for &raw in LARGE_ERROR_ICON.iter() {
|
||||
let trimmed = raw.trim();
|
||||
icon_lines.push(Line::from(
|
||||
trimmed
|
||||
.chars()
|
||||
.map(|ch| {
|
||||
if ch == '!' {
|
||||
Span::styled(
|
||||
ch.to_string(),
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
} else if ch == '/' || ch == '\\' || ch == '_' {
|
||||
// keep outline in pink
|
||||
Span::styled(
|
||||
ch.to_string(),
|
||||
Style::default()
|
||||
.fg(MODAL_ICON_PINK)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
} else if ch == ' ' {
|
||||
Span::raw(" ")
|
||||
} else {
|
||||
Span::styled(ch.to_string(), Style::default().fg(MODAL_ICON_PINK))
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
));
|
||||
}
|
||||
icon_lines.push(Line::from("")); // blank spacer line below icon
|
||||
}
|
||||
|
||||
let mut info_lines: Vec<Line> = Vec::new();
|
||||
if !large_allowed {
|
||||
info_lines.push(Line::from(vec![Span::styled(
|
||||
ICON_CLUSTER,
|
||||
Style::default().fg(MODAL_ICON_PINK),
|
||||
)]));
|
||||
info_lines.push(Line::from(""));
|
||||
}
|
||||
info_lines.push(Line::from(vec![Span::styled(
|
||||
&agent_text,
|
||||
Style::default().fg(MODAL_AGENT_FG),
|
||||
)]));
|
||||
info_lines.push(Line::from(""));
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(ICON_MESSAGE, Style::default().fg(MODAL_HINT_FG)),
|
||||
Span::styled(&message_text, Style::default().fg(MODAL_AGENT_FG)),
|
||||
]));
|
||||
info_lines.push(Line::from(""));
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(
|
||||
ICON_OFFLINE_LABEL,
|
||||
Style::default().fg(MODAL_OFFLINE_LABEL_FG),
|
||||
),
|
||||
Span::styled(
|
||||
&duration_display,
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
]));
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(ICON_RETRY_LABEL, Style::default().fg(MODAL_RETRY_LABEL_FG)),
|
||||
Span::styled(
|
||||
&retry_display,
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
]));
|
||||
if let Some(cd) = &countdown_text {
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(
|
||||
ICON_COUNTDOWN_LABEL,
|
||||
Style::default().fg(MODAL_COUNTDOWN_LABEL_FG),
|
||||
),
|
||||
Span::styled(
|
||||
cd,
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
]));
|
||||
}
|
||||
|
||||
let constrained = Rect {
|
||||
x: content_area.x + 2,
|
||||
y: content_area.y,
|
||||
width: content_area.width.saturating_sub(4),
|
||||
height: content_area.height,
|
||||
};
|
||||
if large_allowed {
|
||||
let split = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([
|
||||
Constraint::Length(icon_lines.len() as u16),
|
||||
Constraint::Min(0),
|
||||
])
|
||||
.split(constrained);
|
||||
// Center the icon block; each line already trimmed so per-line centering keeps shape
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(icon_lines))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: false }),
|
||||
split[0],
|
||||
);
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(info_lines))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: true }),
|
||||
split[1],
|
||||
);
|
||||
} else {
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(info_lines))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: true }),
|
||||
constrained,
|
||||
);
|
||||
}
|
||||
|
||||
let button_area = Rect {
|
||||
x: chunks[2].x,
|
||||
y: chunks[2].y,
|
||||
width: chunks[2].width,
|
||||
height: chunks[2].height.saturating_sub(1),
|
||||
};
|
||||
self.render_connection_error_buttons(f, button_area);
|
||||
}
|
||||
|
||||
fn render_connection_error_buttons(&self, f: &mut Frame, area: Rect) {
|
||||
let button_chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([
|
||||
Constraint::Percentage(30),
|
||||
Constraint::Percentage(15),
|
||||
Constraint::Percentage(10),
|
||||
Constraint::Percentage(15),
|
||||
Constraint::Percentage(30),
|
||||
])
|
||||
.split(area);
|
||||
let retry_style = if self.active_button == ModalButton::Retry {
|
||||
Style::default()
|
||||
.bg(BTN_RETRY_BG_ACTIVE)
|
||||
.fg(BTN_RETRY_FG_ACTIVE)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default()
|
||||
.fg(BTN_RETRY_FG_INACTIVE)
|
||||
.add_modifier(Modifier::DIM)
|
||||
};
|
||||
let exit_style = if self.active_button == ModalButton::Exit {
|
||||
Style::default()
|
||||
.bg(BTN_EXIT_BG_ACTIVE)
|
||||
.fg(BTN_EXIT_FG_ACTIVE)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default()
|
||||
.fg(BTN_EXIT_FG_INACTIVE)
|
||||
.add_modifier(Modifier::DIM)
|
||||
};
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(Line::from(vec![Span::styled(
|
||||
BTN_RETRY_TEXT,
|
||||
retry_style,
|
||||
)])))
|
||||
.alignment(Alignment::Center),
|
||||
button_chunks[1],
|
||||
);
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(Line::from(vec![Span::styled(
|
||||
BTN_EXIT_TEXT,
|
||||
exit_style,
|
||||
)])))
|
||||
.alignment(Alignment::Center),
|
||||
button_chunks[3],
|
||||
);
|
||||
}
|
||||
|
||||
fn render_confirmation(
|
||||
&self,
|
||||
f: &mut Frame,
|
||||
@ -423,196 +577,6 @@ impl ModalManager {
|
||||
);
|
||||
}
|
||||
|
||||
fn render_about(&self, f: &mut Frame, area: Rect) {
|
||||
//get ASCII art from a constant stored in theme.rs
|
||||
use super::theme::ASCII_ART;
|
||||
|
||||
let version = env!("CARGO_PKG_VERSION");
|
||||
|
||||
let about_text = format!(
|
||||
"{}\n\
|
||||
Version {}\n\
|
||||
\n\
|
||||
A terminal first remote monitoring tool\n\
|
||||
\n\
|
||||
Website: https://socktop.io\n\
|
||||
GitHub: https://github.com/jasonwitty/socktop\n\
|
||||
\n\
|
||||
License: MIT License\n\
|
||||
\n\
|
||||
Created by Jason Witty\n\
|
||||
jasonpwitty+socktop@proton.me",
|
||||
ASCII_ART, version
|
||||
);
|
||||
|
||||
// Render the border block
|
||||
let block = Block::default()
|
||||
.title(" About socktop ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().bg(Color::Black).fg(Color::DarkGray));
|
||||
f.render_widget(block, area);
|
||||
|
||||
// Calculate inner area manually to avoid any parent styling
|
||||
let inner_area = Rect {
|
||||
x: area.x + 1,
|
||||
y: area.y + 1,
|
||||
width: area.width.saturating_sub(2),
|
||||
height: area.height.saturating_sub(2), // Leave room for button at bottom
|
||||
};
|
||||
|
||||
// Render content area with explicit black background
|
||||
f.render_widget(
|
||||
Paragraph::new(about_text)
|
||||
.style(Style::default().fg(Color::Cyan).bg(Color::Black))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: false }),
|
||||
inner_area,
|
||||
);
|
||||
|
||||
// Button area
|
||||
let button_area = Rect {
|
||||
x: area.x + 1,
|
||||
y: area.y + area.height.saturating_sub(2),
|
||||
width: area.width.saturating_sub(2),
|
||||
height: 1,
|
||||
};
|
||||
|
||||
let ok_style = if self.active_button == ModalButton::Ok {
|
||||
Style::default()
|
||||
.bg(Color::Blue)
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Blue).bg(Color::Black)
|
||||
};
|
||||
|
||||
f.render_widget(
|
||||
Paragraph::new("[ Enter ] Close")
|
||||
.style(ok_style)
|
||||
.alignment(Alignment::Center),
|
||||
button_area,
|
||||
);
|
||||
}
|
||||
|
||||
fn render_help(&self, f: &mut Frame, area: Rect) {
|
||||
let help_lines = vec![
|
||||
"GLOBAL",
|
||||
" q/Q/Esc ........ Quit │ a/A ....... About │ h/H ....... Help",
|
||||
"",
|
||||
"PROCESS LIST",
|
||||
" / .............. Start/edit fuzzy search",
|
||||
" c/C ............ Clear search filter",
|
||||
" ↑/↓ ............ Select/navigate processes",
|
||||
" Enter .......... Open Process Details",
|
||||
" x/X ............ Clear selection",
|
||||
" Click header ... Sort by column (CPU/Mem)",
|
||||
" Click row ...... Select process",
|
||||
"",
|
||||
"SEARCH MODE (after pressing /)",
|
||||
" Type ........... Enter search query (fuzzy match)",
|
||||
" ↑/↓ ............ Navigate results while typing",
|
||||
" Esc ............ Cancel search and clear filter",
|
||||
" Enter .......... Apply filter and select first result",
|
||||
"",
|
||||
"CPU PER-CORE",
|
||||
" ←/→ ............ Scroll cores │ PgUp/PgDn ... Page up/down",
|
||||
" Home/End ....... Jump to first/last core",
|
||||
"",
|
||||
"PROCESS DETAILS MODAL",
|
||||
" x/X ............ Close modal (all parent modals)",
|
||||
" p/P ............ Navigate to parent process",
|
||||
" j/k ............ Scroll threads ↓/↑ (1 line)",
|
||||
" d/u ............ Scroll threads ↓/↑ (10 lines)",
|
||||
" [ / ] .......... Scroll journal ↑/↓",
|
||||
" Esc/Enter ...... Close modal",
|
||||
"",
|
||||
"MODAL NAVIGATION",
|
||||
" Tab/→ .......... Next button │ Shift+Tab/← ... Previous button",
|
||||
" Enter .......... Confirm/OK │ Esc ............ Cancel/Close",
|
||||
];
|
||||
|
||||
// Render the border block
|
||||
let block = Block::default()
|
||||
.title(" Hotkey Help (use ↑/↓ to scroll) ")
|
||||
.borders(Borders::ALL)
|
||||
.style(Style::default().bg(Color::Black).fg(Color::DarkGray));
|
||||
f.render_widget(block, area);
|
||||
|
||||
// Split into content area and button area
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Min(1), Constraint::Length(1)])
|
||||
.split(Rect {
|
||||
x: area.x + 1,
|
||||
y: area.y + 1,
|
||||
width: area.width.saturating_sub(2),
|
||||
height: area.height.saturating_sub(2),
|
||||
});
|
||||
|
||||
let content_area = chunks[0];
|
||||
let button_area = chunks[1];
|
||||
|
||||
// Calculate visible window
|
||||
let visible_height = content_area.height as usize;
|
||||
let total_lines = help_lines.len();
|
||||
let max_scroll = total_lines.saturating_sub(visible_height);
|
||||
let scroll_offset = self.help_scroll_offset.min(max_scroll);
|
||||
|
||||
// Get visible lines
|
||||
let visible_lines: Vec<Line> = help_lines
|
||||
.iter()
|
||||
.skip(scroll_offset)
|
||||
.take(visible_height)
|
||||
.map(|s| Line::from(*s))
|
||||
.collect();
|
||||
|
||||
// Render scrollable content
|
||||
f.render_widget(
|
||||
Paragraph::new(visible_lines)
|
||||
.style(Style::default().fg(Color::Cyan).bg(Color::Black))
|
||||
.alignment(Alignment::Left),
|
||||
content_area,
|
||||
);
|
||||
|
||||
// Render scrollbar if needed
|
||||
if total_lines > visible_height {
|
||||
use ratatui::widgets::{Scrollbar, ScrollbarOrientation, ScrollbarState};
|
||||
|
||||
let scrollbar_area = Rect {
|
||||
x: area.x + area.width.saturating_sub(2),
|
||||
y: area.y + 1,
|
||||
width: 1,
|
||||
height: area.height.saturating_sub(2),
|
||||
};
|
||||
|
||||
let mut scrollbar_state = ScrollbarState::new(max_scroll).position(scroll_offset);
|
||||
|
||||
let scrollbar = Scrollbar::new(ScrollbarOrientation::VerticalRight)
|
||||
.begin_symbol(Some("↑"))
|
||||
.end_symbol(Some("↓"))
|
||||
.style(Style::default().fg(Color::DarkGray));
|
||||
|
||||
f.render_stateful_widget(scrollbar, scrollbar_area, &mut scrollbar_state);
|
||||
}
|
||||
|
||||
// Button area
|
||||
let ok_style = if self.active_button == ModalButton::Ok {
|
||||
Style::default()
|
||||
.bg(Color::Blue)
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default().fg(Color::Blue).bg(Color::Black)
|
||||
};
|
||||
|
||||
f.render_widget(
|
||||
Paragraph::new("[ Enter ] Close")
|
||||
.style(ok_style)
|
||||
.alignment(Alignment::Center),
|
||||
button_area,
|
||||
);
|
||||
}
|
||||
|
||||
fn centered_rect(&self, percent_x: u16, percent_y: u16, r: Rect) -> Rect {
|
||||
let vert = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
@ -632,3 +596,17 @@ impl ModalManager {
|
||||
.split(vert[1])[1]
|
||||
}
|
||||
}
|
||||
|
||||
fn format_duration(duration: Duration) -> String {
|
||||
let total = duration.as_secs();
|
||||
let h = total / 3600;
|
||||
let m = (total % 3600) / 60;
|
||||
let s = total % 60;
|
||||
if h > 0 {
|
||||
format!("{h}h {m}m {s}s")
|
||||
} else if m > 0 {
|
||||
format!("{m}m {s}s")
|
||||
} else {
|
||||
format!("{s}s")
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,297 +0,0 @@
|
||||
//! Connection error modal rendering
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
use super::modal_format::format_duration;
|
||||
use super::theme::{
|
||||
BTN_EXIT_BG_ACTIVE, BTN_EXIT_FG_ACTIVE, BTN_EXIT_FG_INACTIVE, BTN_EXIT_TEXT,
|
||||
BTN_RETRY_BG_ACTIVE, BTN_RETRY_FG_ACTIVE, BTN_RETRY_FG_INACTIVE, BTN_RETRY_TEXT, ICON_CLUSTER,
|
||||
ICON_COUNTDOWN_LABEL, ICON_MESSAGE, ICON_OFFLINE_LABEL, ICON_RETRY_LABEL, ICON_WARNING_TITLE,
|
||||
LARGE_ERROR_ICON, MODAL_AGENT_FG, MODAL_BG, MODAL_BORDER_FG, MODAL_COUNTDOWN_LABEL_FG,
|
||||
MODAL_FG, MODAL_HINT_FG, MODAL_ICON_PINK, MODAL_OFFLINE_LABEL_FG, MODAL_RETRY_LABEL_FG,
|
||||
MODAL_TITLE_FG,
|
||||
};
|
||||
use ratatui::{
|
||||
Frame,
|
||||
layout::{Alignment, Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::{Line, Span, Text},
|
||||
widgets::{Block, Borders, Paragraph, Wrap},
|
||||
};
|
||||
|
||||
use super::modal::{ModalButton, ModalManager};
|
||||
|
||||
impl ModalManager {
|
||||
pub(super) fn render_connection_error(
|
||||
&self,
|
||||
f: &mut Frame,
|
||||
area: Rect,
|
||||
message: &str,
|
||||
disconnected_at: Instant,
|
||||
retry_count: u32,
|
||||
auto_retry_countdown: Option<u64>,
|
||||
) {
|
||||
let duration_text = format_duration(disconnected_at.elapsed());
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([
|
||||
Constraint::Length(3),
|
||||
Constraint::Min(4),
|
||||
Constraint::Length(4),
|
||||
])
|
||||
.split(area);
|
||||
let block = Block::default()
|
||||
.title(ICON_WARNING_TITLE)
|
||||
.title_style(
|
||||
Style::default()
|
||||
.fg(MODAL_TITLE_FG)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
.borders(Borders::ALL)
|
||||
.border_style(Style::default().fg(MODAL_BORDER_FG))
|
||||
.style(Style::default().bg(MODAL_BG).fg(MODAL_FG));
|
||||
f.render_widget(block, area);
|
||||
|
||||
let content_area = chunks[1];
|
||||
let max_w = content_area.width.saturating_sub(15) as usize;
|
||||
let clean_message = if message.to_lowercase().contains("hostname verification")
|
||||
|| message.contains("socktop_connector")
|
||||
{
|
||||
"Connection failed - hostname verification disabled".to_string()
|
||||
} else if message.contains("Failed to fetch metrics:") {
|
||||
if let Some(p) = message.find(':') {
|
||||
let ess = message[p + 1..].trim();
|
||||
if ess.len() > max_w {
|
||||
format!("{}...", &ess[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
ess.to_string()
|
||||
}
|
||||
} else {
|
||||
"Connection error".to_string()
|
||||
}
|
||||
} else if message.starts_with("Retry failed:") {
|
||||
if let Some(p) = message.find(':') {
|
||||
let ess = message[p + 1..].trim();
|
||||
if ess.len() > max_w {
|
||||
format!("{}...", &ess[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
ess.to_string()
|
||||
}
|
||||
} else {
|
||||
"Retry failed".to_string()
|
||||
}
|
||||
} else if message.len() > max_w {
|
||||
format!("{}...", &message[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
message.to_string()
|
||||
};
|
||||
let truncate = |s: &str| {
|
||||
if s.len() > max_w {
|
||||
format!("{}...", &s[..max_w.saturating_sub(3)])
|
||||
} else {
|
||||
s.to_string()
|
||||
}
|
||||
};
|
||||
let agent_text = truncate("📡 Cannot connect to socktop agent");
|
||||
let message_text = truncate(&clean_message);
|
||||
let duration_display = truncate(&duration_text);
|
||||
let retry_display = truncate(&retry_count.to_string());
|
||||
let countdown_text = auto_retry_countdown.map(|c| {
|
||||
if c == 0 {
|
||||
"Auto retry now...".to_string()
|
||||
} else {
|
||||
format!("{c}s")
|
||||
}
|
||||
});
|
||||
|
||||
// Determine if we have enough space (height + width) to show large centered icon
|
||||
let icon_max_width = LARGE_ERROR_ICON
|
||||
.iter()
|
||||
.map(|l| l.trim().chars().count())
|
||||
.max()
|
||||
.unwrap_or(0) as u16;
|
||||
let large_allowed = content_area.height >= (LARGE_ERROR_ICON.len() as u16 + 8)
|
||||
&& content_area.width >= icon_max_width + 6; // small margin for borders/padding
|
||||
let mut icon_lines: Vec<Line> = Vec::new();
|
||||
if large_allowed {
|
||||
for &raw in LARGE_ERROR_ICON.iter() {
|
||||
let trimmed = raw.trim();
|
||||
icon_lines.push(Line::from(
|
||||
trimmed
|
||||
.chars()
|
||||
.map(|ch| {
|
||||
if ch == '!' {
|
||||
Span::styled(
|
||||
ch.to_string(),
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
} else if ch == '/' || ch == '\\' || ch == '_' {
|
||||
// keep outline in pink
|
||||
Span::styled(
|
||||
ch.to_string(),
|
||||
Style::default()
|
||||
.fg(MODAL_ICON_PINK)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
} else if ch == ' ' {
|
||||
Span::raw(" ")
|
||||
} else {
|
||||
Span::styled(ch.to_string(), Style::default().fg(MODAL_ICON_PINK))
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
));
|
||||
}
|
||||
icon_lines.push(Line::from("")); // blank spacer line below icon
|
||||
}
|
||||
|
||||
let mut info_lines: Vec<Line> = Vec::new();
|
||||
if !large_allowed {
|
||||
info_lines.push(Line::from(vec![Span::styled(
|
||||
ICON_CLUSTER,
|
||||
Style::default().fg(MODAL_ICON_PINK),
|
||||
)]));
|
||||
info_lines.push(Line::from(""));
|
||||
}
|
||||
info_lines.push(Line::from(vec![Span::styled(
|
||||
&agent_text,
|
||||
Style::default().fg(MODAL_AGENT_FG),
|
||||
)]));
|
||||
info_lines.push(Line::from(""));
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(ICON_MESSAGE, Style::default().fg(MODAL_HINT_FG)),
|
||||
Span::styled(&message_text, Style::default().fg(MODAL_AGENT_FG)),
|
||||
]));
|
||||
info_lines.push(Line::from(""));
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(
|
||||
ICON_OFFLINE_LABEL,
|
||||
Style::default().fg(MODAL_OFFLINE_LABEL_FG),
|
||||
),
|
||||
Span::styled(
|
||||
&duration_display,
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
]));
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(ICON_RETRY_LABEL, Style::default().fg(MODAL_RETRY_LABEL_FG)),
|
||||
Span::styled(
|
||||
&retry_display,
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
]));
|
||||
if let Some(cd) = &countdown_text {
|
||||
info_lines.push(Line::from(vec![
|
||||
Span::styled(
|
||||
ICON_COUNTDOWN_LABEL,
|
||||
Style::default().fg(MODAL_COUNTDOWN_LABEL_FG),
|
||||
),
|
||||
Span::styled(
|
||||
cd,
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
]));
|
||||
}
|
||||
|
||||
let constrained = Rect {
|
||||
x: content_area.x + 2,
|
||||
y: content_area.y,
|
||||
width: content_area.width.saturating_sub(4),
|
||||
height: content_area.height,
|
||||
};
|
||||
if large_allowed {
|
||||
let split = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([
|
||||
Constraint::Length(icon_lines.len() as u16),
|
||||
Constraint::Min(0),
|
||||
])
|
||||
.split(constrained);
|
||||
// Center the icon block; each line already trimmed so per-line centering keeps shape
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(icon_lines))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: false }),
|
||||
split[0],
|
||||
);
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(info_lines))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: true }),
|
||||
split[1],
|
||||
);
|
||||
} else {
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(info_lines))
|
||||
.alignment(Alignment::Center)
|
||||
.wrap(Wrap { trim: true }),
|
||||
constrained,
|
||||
);
|
||||
}
|
||||
|
||||
let button_area = Rect {
|
||||
x: chunks[2].x,
|
||||
y: chunks[2].y,
|
||||
width: chunks[2].width,
|
||||
height: chunks[2].height.saturating_sub(1),
|
||||
};
|
||||
self.render_connection_error_buttons(f, button_area);
|
||||
}
|
||||
|
||||
fn render_connection_error_buttons(&self, f: &mut Frame, area: Rect) {
|
||||
let button_chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([
|
||||
Constraint::Percentage(30),
|
||||
Constraint::Percentage(15),
|
||||
Constraint::Percentage(10),
|
||||
Constraint::Percentage(15),
|
||||
Constraint::Percentage(30),
|
||||
])
|
||||
.split(area);
|
||||
let retry_style = if self.active_button == ModalButton::Retry {
|
||||
Style::default()
|
||||
.bg(BTN_RETRY_BG_ACTIVE)
|
||||
.fg(BTN_RETRY_FG_ACTIVE)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default()
|
||||
.fg(BTN_RETRY_FG_INACTIVE)
|
||||
.add_modifier(Modifier::DIM)
|
||||
};
|
||||
let exit_style = if self.active_button == ModalButton::Exit {
|
||||
Style::default()
|
||||
.bg(BTN_EXIT_BG_ACTIVE)
|
||||
.fg(BTN_EXIT_FG_ACTIVE)
|
||||
.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default()
|
||||
.fg(BTN_EXIT_FG_INACTIVE)
|
||||
.add_modifier(Modifier::DIM)
|
||||
};
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(Line::from(vec![Span::styled(
|
||||
BTN_RETRY_TEXT,
|
||||
retry_style,
|
||||
)])))
|
||||
.alignment(Alignment::Center),
|
||||
button_chunks[1],
|
||||
);
|
||||
f.render_widget(
|
||||
Paragraph::new(Text::from(Line::from(vec![Span::styled(
|
||||
BTN_EXIT_TEXT,
|
||||
exit_style,
|
||||
)])))
|
||||
.alignment(Alignment::Center),
|
||||
button_chunks[3],
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -1,112 +0,0 @@
|
||||
//! Formatting utilities for process details modal
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
/// Format uptime in human-readable form
|
||||
pub fn format_uptime(secs: u64) -> String {
|
||||
let days = secs / 86400;
|
||||
let hours = (secs % 86400) / 3600;
|
||||
let minutes = (secs % 3600) / 60;
|
||||
let seconds = secs % 60;
|
||||
|
||||
if days > 0 {
|
||||
format!("{days}d {hours}h {minutes}m")
|
||||
} else if hours > 0 {
|
||||
format!("{hours}h {minutes}m {seconds}s")
|
||||
} else if minutes > 0 {
|
||||
format!("{minutes}m {seconds}s")
|
||||
} else {
|
||||
format!("{seconds}s")
|
||||
}
|
||||
}
|
||||
|
||||
/// Format duration in human-readable form
|
||||
pub fn format_duration(duration: Duration) -> String {
|
||||
let total = duration.as_secs();
|
||||
let h = total / 3600;
|
||||
let m = (total % 3600) / 60;
|
||||
let s = total % 60;
|
||||
if h > 0 {
|
||||
format!("{h}h {m}m {s}s")
|
||||
} else if m > 0 {
|
||||
format!("{m}m {s}s")
|
||||
} else {
|
||||
format!("{s}s")
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalize CPU usage to 0-100% by dividing by thread count
|
||||
pub fn normalize_cpu_usage(cpu_usage: f32, thread_count: u32) -> f32 {
|
||||
let threads = thread_count.max(1) as f32;
|
||||
(cpu_usage / threads).min(100.0)
|
||||
}
|
||||
|
||||
/// Calculate dynamic Y-axis maximum in 10% increments
|
||||
pub fn calculate_dynamic_y_max(max_value: f64) -> f64 {
|
||||
((max_value / 10.0).ceil() * 10.0).clamp(10.0, 100.0)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_format_uptime_seconds() {
|
||||
assert_eq!(format_uptime(45), "45s");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_uptime_minutes() {
|
||||
assert_eq!(format_uptime(125), "2m 5s");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_uptime_hours() {
|
||||
assert_eq!(format_uptime(3665), "1h 1m 5s");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_uptime_days() {
|
||||
assert_eq!(format_uptime(90061), "1d 1h 1m");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_cpu_single_thread() {
|
||||
assert_eq!(normalize_cpu_usage(50.0, 1), 50.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_cpu_multi_thread() {
|
||||
assert_eq!(normalize_cpu_usage(400.0, 4), 100.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_cpu_zero_threads() {
|
||||
// Should default to 1 thread to avoid division by zero
|
||||
assert_eq!(normalize_cpu_usage(100.0, 0), 100.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_cpu_caps_at_100() {
|
||||
assert_eq!(normalize_cpu_usage(150.0, 1), 100.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dynamic_y_max_rounds_up() {
|
||||
assert_eq!(calculate_dynamic_y_max(15.0), 20.0);
|
||||
assert_eq!(calculate_dynamic_y_max(25.0), 30.0);
|
||||
assert_eq!(calculate_dynamic_y_max(5.0), 10.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dynamic_y_max_minimum() {
|
||||
assert_eq!(calculate_dynamic_y_max(0.0), 10.0);
|
||||
assert_eq!(calculate_dynamic_y_max(3.0), 10.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dynamic_y_max_caps_at_100() {
|
||||
assert_eq!(calculate_dynamic_y_max(95.0), 100.0);
|
||||
assert_eq!(calculate_dynamic_y_max(100.0), 100.0);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,77 +0,0 @@
|
||||
//! Type definitions for modal system
|
||||
|
||||
use std::time::Instant;
|
||||
|
||||
/// History data for process metrics rendering
|
||||
pub struct ProcessHistoryData<'a> {
|
||||
pub cpu: &'a std::collections::VecDeque<f32>,
|
||||
pub mem: &'a std::collections::VecDeque<u64>,
|
||||
pub io_read: &'a std::collections::VecDeque<u64>,
|
||||
pub io_write: &'a std::collections::VecDeque<u64>,
|
||||
}
|
||||
|
||||
/// Process data for modal rendering
|
||||
pub struct ProcessModalData<'a> {
|
||||
pub details: Option<&'a socktop_connector::ProcessMetricsResponse>,
|
||||
pub journal: Option<&'a socktop_connector::JournalResponse>,
|
||||
pub history: ProcessHistoryData<'a>,
|
||||
pub max_mem_bytes: u64,
|
||||
pub unsupported: bool,
|
||||
}
|
||||
|
||||
/// Parameters for rendering scatter plot
|
||||
pub(super) struct ScatterPlotParams<'a> {
|
||||
pub process: &'a socktop_connector::DetailedProcessInfo,
|
||||
pub main_user_ms: f64,
|
||||
pub main_system_ms: f64,
|
||||
pub max_user: f64,
|
||||
pub max_system: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ModalType {
|
||||
ConnectionError {
|
||||
message: String,
|
||||
disconnected_at: Instant,
|
||||
retry_count: u32,
|
||||
auto_retry_countdown: Option<u64>,
|
||||
},
|
||||
ProcessDetails {
|
||||
pid: u32,
|
||||
},
|
||||
About,
|
||||
Help,
|
||||
#[allow(dead_code)]
|
||||
Confirmation {
|
||||
title: String,
|
||||
message: String,
|
||||
confirm_text: String,
|
||||
cancel_text: String,
|
||||
},
|
||||
#[allow(dead_code)]
|
||||
Info {
|
||||
title: String,
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ModalAction {
|
||||
None, // Modal didn't handle the key, pass to main window
|
||||
Handled, // Modal handled the key, don't pass to main window
|
||||
RetryConnection,
|
||||
ExitApp,
|
||||
Confirm,
|
||||
Cancel,
|
||||
Dismiss,
|
||||
SwitchToParentProcess(u32), // Switch to viewing parent process details
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum ModalButton {
|
||||
Retry,
|
||||
Exit,
|
||||
Confirm,
|
||||
Cancel,
|
||||
Ok,
|
||||
}
|
||||
@ -12,72 +12,9 @@ use std::cmp::Ordering;
|
||||
|
||||
use crate::types::Metrics;
|
||||
use crate::ui::cpu::{per_core_clamp, per_core_handle_scrollbar_mouse};
|
||||
use crate::ui::theme::{
|
||||
PROCESS_SELECTION_BG, PROCESS_SELECTION_FG, PROCESS_TOOLTIP_BG, PROCESS_TOOLTIP_FG, SB_ARROW,
|
||||
SB_THUMB, SB_TRACK,
|
||||
};
|
||||
use crate::ui::theme::{SB_ARROW, SB_THUMB, SB_TRACK};
|
||||
use crate::ui::util::human;
|
||||
|
||||
/// Simple fuzzy matching: returns true if all characters in needle appear in haystack in order (case-insensitive)
|
||||
fn fuzzy_match(haystack: &str, needle: &str) -> bool {
|
||||
if needle.is_empty() {
|
||||
return true;
|
||||
}
|
||||
let haystack_lower = haystack.to_lowercase();
|
||||
let needle_lower = needle.to_lowercase();
|
||||
let mut haystack_chars = haystack_lower.chars();
|
||||
|
||||
for needle_char in needle_lower.chars() {
|
||||
if !haystack_chars.any(|c| c == needle_char) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Get filtered and sorted process indices based on search query and sort order
|
||||
pub fn get_filtered_sorted_indices(
|
||||
metrics: &Metrics,
|
||||
search_query: &str,
|
||||
sort_by: ProcSortBy,
|
||||
) -> Vec<usize> {
|
||||
// Filter processes by search query (fuzzy match)
|
||||
let mut filtered_idxs: Vec<usize> = if search_query.is_empty() {
|
||||
(0..metrics.top_processes.len()).collect()
|
||||
} else {
|
||||
(0..metrics.top_processes.len())
|
||||
.filter(|&i| fuzzy_match(&metrics.top_processes[i].name, search_query))
|
||||
.collect()
|
||||
};
|
||||
|
||||
// Sort filtered rows
|
||||
match sort_by {
|
||||
ProcSortBy::CpuDesc => filtered_idxs.sort_by(|&a, &b| {
|
||||
let aa = metrics.top_processes[a].cpu_usage;
|
||||
let bb = metrics.top_processes[b].cpu_usage;
|
||||
bb.partial_cmp(&aa).unwrap_or(Ordering::Equal)
|
||||
}),
|
||||
ProcSortBy::MemDesc => filtered_idxs.sort_by(|&a, &b| {
|
||||
let aa = metrics.top_processes[a].mem_bytes;
|
||||
let bb = metrics.top_processes[b].mem_bytes;
|
||||
bb.cmp(&aa)
|
||||
}),
|
||||
}
|
||||
|
||||
filtered_idxs
|
||||
}
|
||||
|
||||
/// Parameters for drawing the top processes table
|
||||
pub struct ProcessDisplayParams<'a> {
|
||||
pub metrics: Option<&'a Metrics>,
|
||||
pub scroll_offset: usize,
|
||||
pub sort_by: ProcSortBy,
|
||||
pub selected_process_pid: Option<u32>,
|
||||
pub selected_process_index: Option<usize>,
|
||||
pub search_query: &'a str,
|
||||
pub search_active: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum ProcSortBy {
|
||||
#[default]
|
||||
@ -94,61 +31,28 @@ const COLS: [Constraint; 5] = [
|
||||
Constraint::Length(8), // Mem %
|
||||
];
|
||||
|
||||
pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, params: ProcessDisplayParams) {
|
||||
pub fn draw_top_processes(
|
||||
f: &mut ratatui::Frame<'_>,
|
||||
area: Rect,
|
||||
m: Option<&Metrics>,
|
||||
scroll_offset: usize,
|
||||
sort_by: ProcSortBy,
|
||||
) {
|
||||
// Draw outer block and title
|
||||
let Some(mm) = params.metrics else { return };
|
||||
let Some(mm) = m else { return };
|
||||
let total = mm.process_count.unwrap_or(mm.top_processes.len());
|
||||
let block = Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.title(format!("Top Processes ({total} total)"));
|
||||
f.render_widget(block, area);
|
||||
|
||||
// Inner area (reserve space for search box if active)
|
||||
// Inner area and content area (reserve 2 columns for scrollbar)
|
||||
let inner = Rect {
|
||||
x: area.x + 1,
|
||||
y: area.y + 1,
|
||||
width: area.width.saturating_sub(2),
|
||||
height: area.height.saturating_sub(2),
|
||||
};
|
||||
|
||||
// Draw search box if active
|
||||
let content_start_y = if params.search_active || !params.search_query.is_empty() {
|
||||
let search_area = Rect {
|
||||
x: inner.x,
|
||||
y: inner.y,
|
||||
width: inner.width,
|
||||
height: 3, // Height for border + content
|
||||
};
|
||||
|
||||
let search_text = if params.search_active {
|
||||
format!("Search: {}_", params.search_query)
|
||||
} else {
|
||||
format!(
|
||||
"Filter: {} (press / to edit, c to clear)",
|
||||
params.search_query
|
||||
)
|
||||
};
|
||||
|
||||
let search_block = Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_style(Style::default().fg(Color::Yellow));
|
||||
let search_paragraph = Paragraph::new(search_text)
|
||||
.block(search_block)
|
||||
.style(Style::default().fg(Color::Yellow));
|
||||
f.render_widget(search_paragraph, search_area);
|
||||
|
||||
inner.y + 3
|
||||
} else {
|
||||
inner.y
|
||||
};
|
||||
|
||||
// Content area (reserve 2 columns for scrollbar)
|
||||
let inner = Rect {
|
||||
x: inner.x,
|
||||
y: content_start_y,
|
||||
width: inner.width,
|
||||
height: inner.height.saturating_sub(content_start_y - (area.y + 1)),
|
||||
};
|
||||
if inner.height < 1 || inner.width < 3 {
|
||||
return;
|
||||
}
|
||||
@ -159,15 +63,27 @@ pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, params: Proces
|
||||
height: inner.height,
|
||||
};
|
||||
|
||||
// Get filtered and sorted indices
|
||||
let idxs = get_filtered_sorted_indices(mm, params.search_query, params.sort_by);
|
||||
// Sort rows (by CPU% or Mem bytes), descending.
|
||||
let mut idxs: Vec<usize> = (0..mm.top_processes.len()).collect();
|
||||
match sort_by {
|
||||
ProcSortBy::CpuDesc => idxs.sort_by(|&a, &b| {
|
||||
let aa = mm.top_processes[a].cpu_usage;
|
||||
let bb = mm.top_processes[b].cpu_usage;
|
||||
bb.partial_cmp(&aa).unwrap_or(Ordering::Equal)
|
||||
}),
|
||||
ProcSortBy::MemDesc => idxs.sort_by(|&a, &b| {
|
||||
let aa = mm.top_processes[a].mem_bytes;
|
||||
let bb = mm.top_processes[b].mem_bytes;
|
||||
bb.cmp(&aa)
|
||||
}),
|
||||
}
|
||||
|
||||
// Scrolling
|
||||
let total_rows = idxs.len();
|
||||
let header_rows = 1usize;
|
||||
let viewport_rows = content.height.saturating_sub(header_rows as u16) as usize;
|
||||
let max_off = total_rows.saturating_sub(viewport_rows);
|
||||
let offset = params.scroll_offset.min(max_off);
|
||||
let offset = scroll_offset.min(max_off);
|
||||
let show_n = total_rows.saturating_sub(offset).min(viewport_rows);
|
||||
|
||||
// Build visible rows
|
||||
@ -194,29 +110,12 @@ pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, params: Proces
|
||||
_ => Color::Red,
|
||||
};
|
||||
|
||||
let mut emphasis = if (cpu_val - peak_cpu).abs() < f32::EPSILON {
|
||||
let emphasis = if (cpu_val - peak_cpu).abs() < f32::EPSILON {
|
||||
Style::default().add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
Style::default()
|
||||
};
|
||||
|
||||
// Check if this process is selected - prioritize PID matching
|
||||
let is_selected = if let Some(selected_pid) = params.selected_process_pid {
|
||||
selected_pid == p.pid
|
||||
} else if let Some(selected_idx) = params.selected_process_index {
|
||||
selected_idx == ix // ix is the absolute index in the sorted list
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// Apply selection highlighting
|
||||
if is_selected {
|
||||
emphasis = emphasis
|
||||
.bg(PROCESS_SELECTION_BG)
|
||||
.fg(PROCESS_SELECTION_FG)
|
||||
.add_modifier(Modifier::BOLD);
|
||||
}
|
||||
|
||||
let cpu_str = fmt_cpu_pct(cpu_val);
|
||||
|
||||
ratatui::widgets::Row::new(vec![
|
||||
@ -232,11 +131,11 @@ pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, params: Proces
|
||||
});
|
||||
|
||||
// Header with sort indicator
|
||||
let cpu_hdr = match params.sort_by {
|
||||
let cpu_hdr = match sort_by {
|
||||
ProcSortBy::CpuDesc => "CPU % •",
|
||||
_ => "CPU %",
|
||||
};
|
||||
let mem_hdr = match params.sort_by {
|
||||
let mem_hdr = match sort_by {
|
||||
ProcSortBy::MemDesc => "Mem •",
|
||||
_ => "Mem",
|
||||
};
|
||||
@ -252,47 +151,6 @@ pub fn draw_top_processes(f: &mut ratatui::Frame<'_>, area: Rect, params: Proces
|
||||
.column_spacing(1);
|
||||
f.render_widget(table, content);
|
||||
|
||||
// Draw tooltip if a process is selected
|
||||
if let Some(selected_pid) = params.selected_process_pid {
|
||||
// Find the selected process to get its name
|
||||
let process_info = if let Some(metrics) = params.metrics {
|
||||
metrics
|
||||
.top_processes
|
||||
.iter()
|
||||
.find(|p| p.pid == selected_pid)
|
||||
.map(|p| format!("PID {} • {}", p.pid, p.name))
|
||||
.unwrap_or_else(|| format!("PID {selected_pid}"))
|
||||
} else {
|
||||
format!("PID {selected_pid}")
|
||||
};
|
||||
|
||||
let tooltip_text = format!("{process_info} | Enter for details • X to unselect");
|
||||
let tooltip_width = tooltip_text.len() as u16 + 2; // Add padding
|
||||
let tooltip_height = 3;
|
||||
|
||||
// Position tooltip at bottom-right of the processes area
|
||||
if area.width > tooltip_width + 2 && area.height > tooltip_height + 1 {
|
||||
let tooltip_area = Rect {
|
||||
x: area.x + area.width.saturating_sub(tooltip_width + 1),
|
||||
y: area.y + area.height.saturating_sub(tooltip_height + 1),
|
||||
width: tooltip_width,
|
||||
height: tooltip_height,
|
||||
};
|
||||
|
||||
let tooltip_block = Block::default().borders(Borders::ALL).style(
|
||||
Style::default()
|
||||
.bg(PROCESS_TOOLTIP_BG)
|
||||
.fg(PROCESS_TOOLTIP_FG),
|
||||
);
|
||||
|
||||
let tooltip_paragraph = Paragraph::new(tooltip_text)
|
||||
.block(tooltip_block)
|
||||
.wrap(ratatui::widgets::Wrap { trim: true });
|
||||
|
||||
f.render_widget(tooltip_paragraph, tooltip_area);
|
||||
}
|
||||
}
|
||||
|
||||
// Draw scrollbar like CPU pane
|
||||
let scroll_area = Rect {
|
||||
x: inner.x + inner.width.saturating_sub(1),
|
||||
@ -333,18 +191,6 @@ fn fmt_cpu_pct(v: f32) -> String {
|
||||
}
|
||||
|
||||
/// Handle keyboard scrolling (Up/Down/PageUp/PageDown/Home/End)
|
||||
/// Parameters for process key event handling
|
||||
pub struct ProcessKeyParams<'a> {
|
||||
pub selected_process_pid: &'a mut Option<u32>,
|
||||
pub selected_process_index: &'a mut Option<usize>,
|
||||
pub key: crossterm::event::KeyEvent,
|
||||
pub metrics: Option<&'a Metrics>,
|
||||
pub sort_by: ProcSortBy,
|
||||
pub search_query: &'a str,
|
||||
}
|
||||
|
||||
/// LEGACY: Use processes_handle_key_with_selection for enhanced functionality
|
||||
#[allow(dead_code)]
|
||||
pub fn processes_handle_key(
|
||||
scroll_offset: &mut usize,
|
||||
key: crossterm::event::KeyEvent,
|
||||
@ -353,105 +199,8 @@ pub fn processes_handle_key(
|
||||
crate::ui::cpu::per_core_handle_key(scroll_offset, key, page_size);
|
||||
}
|
||||
|
||||
pub fn processes_handle_key_with_selection(params: ProcessKeyParams) -> bool {
|
||||
use crossterm::event::KeyCode;
|
||||
|
||||
match params.key.code {
|
||||
KeyCode::Up => {
|
||||
// Navigate through filtered and sorted results
|
||||
if let Some(m) = params.metrics {
|
||||
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
|
||||
|
||||
if idxs.is_empty() {
|
||||
// No filtered results, clear selection
|
||||
*params.selected_process_index = None;
|
||||
*params.selected_process_pid = None;
|
||||
} else if params.selected_process_index.is_none()
|
||||
|| params.selected_process_pid.is_none()
|
||||
{
|
||||
// No selection - select the first process in filtered/sorted order
|
||||
let first_idx = idxs[0];
|
||||
*params.selected_process_index = Some(first_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||
} else if let Some(current_idx) = *params.selected_process_index {
|
||||
// Find current position in filtered/sorted list
|
||||
if let Some(pos) = idxs.iter().position(|&idx| idx == current_idx) {
|
||||
if pos > 0 {
|
||||
// Move up in filtered/sorted list
|
||||
let new_idx = idxs[pos - 1];
|
||||
*params.selected_process_index = Some(new_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[new_idx].pid);
|
||||
}
|
||||
} else {
|
||||
// Current selection not in filtered list, select first result
|
||||
let first_idx = idxs[0];
|
||||
*params.selected_process_index = Some(first_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
true // Handled
|
||||
}
|
||||
KeyCode::Down => {
|
||||
// Navigate through filtered and sorted results
|
||||
if let Some(m) = params.metrics {
|
||||
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
|
||||
|
||||
if idxs.is_empty() {
|
||||
// No filtered results, clear selection
|
||||
*params.selected_process_index = None;
|
||||
*params.selected_process_pid = None;
|
||||
} else if params.selected_process_index.is_none()
|
||||
|| params.selected_process_pid.is_none()
|
||||
{
|
||||
// No selection - select the first process in filtered/sorted order
|
||||
let first_idx = idxs[0];
|
||||
*params.selected_process_index = Some(first_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||
} else if let Some(current_idx) = *params.selected_process_index {
|
||||
// Find current position in filtered/sorted list
|
||||
if let Some(pos) = idxs.iter().position(|&idx| idx == current_idx) {
|
||||
if pos + 1 < idxs.len() {
|
||||
// Move down in filtered/sorted list
|
||||
let new_idx = idxs[pos + 1];
|
||||
*params.selected_process_index = Some(new_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[new_idx].pid);
|
||||
}
|
||||
} else {
|
||||
// Current selection not in filtered list, select first result
|
||||
let first_idx = idxs[0];
|
||||
*params.selected_process_index = Some(first_idx);
|
||||
*params.selected_process_pid = Some(m.top_processes[first_idx].pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
true // Handled
|
||||
}
|
||||
KeyCode::Char('x') | KeyCode::Char('X') => {
|
||||
// Unselect any selected process
|
||||
if params.selected_process_pid.is_some() || params.selected_process_index.is_some() {
|
||||
*params.selected_process_pid = None;
|
||||
*params.selected_process_index = None;
|
||||
true // Handled
|
||||
} else {
|
||||
false // No selection to clear
|
||||
}
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
// Signal that Enter was pressed with a selection
|
||||
params.selected_process_pid.is_some() // Return true if we have a selection to handle
|
||||
}
|
||||
_ => {
|
||||
// No other keys handled - let scrollbar handle all navigation
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle mouse for content scrolling and scrollbar dragging.
|
||||
/// Returns Some(new_sort) if the header "CPU %" or "Mem" was clicked.
|
||||
/// LEGACY: Use processes_handle_mouse_with_selection for enhanced functionality
|
||||
#[allow(dead_code)]
|
||||
pub fn processes_handle_mouse(
|
||||
scroll_offset: &mut usize,
|
||||
drag: &mut Option<crate::ui::cpu::PerCoreScrollDrag>,
|
||||
@ -515,124 +264,3 @@ pub fn processes_handle_mouse(
|
||||
);
|
||||
None
|
||||
}
|
||||
|
||||
/// Parameters for process mouse event handling
|
||||
pub struct ProcessMouseParams<'a> {
|
||||
pub scroll_offset: &'a mut usize,
|
||||
pub selected_process_pid: &'a mut Option<u32>,
|
||||
pub selected_process_index: &'a mut Option<usize>,
|
||||
pub drag: &'a mut Option<crate::ui::cpu::PerCoreScrollDrag>,
|
||||
pub mouse: MouseEvent,
|
||||
pub area: Rect,
|
||||
pub total_rows: usize,
|
||||
pub metrics: Option<&'a Metrics>,
|
||||
pub sort_by: ProcSortBy,
|
||||
pub search_query: &'a str,
|
||||
}
|
||||
|
||||
/// Enhanced mouse handler that also manages process selection
|
||||
/// Returns Some(new_sort) if the header was clicked, or handles row selection
|
||||
pub fn processes_handle_mouse_with_selection(params: ProcessMouseParams) -> Option<ProcSortBy> {
|
||||
// Inner and content areas (match draw_top_processes)
|
||||
let inner = Rect {
|
||||
x: params.area.x + 1,
|
||||
y: params.area.y + 1,
|
||||
width: params.area.width.saturating_sub(2),
|
||||
height: params.area.height.saturating_sub(2),
|
||||
};
|
||||
if inner.height == 0 || inner.width <= 2 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Calculate content area - must match draw_top_processes exactly!
|
||||
// If search is active or query exists, content starts after search box (3 lines)
|
||||
let search_active = !params.search_query.is_empty();
|
||||
let content_start_y = if search_active { inner.y + 3 } else { inner.y };
|
||||
|
||||
let content = Rect {
|
||||
x: inner.x,
|
||||
y: content_start_y,
|
||||
width: inner.width.saturating_sub(2),
|
||||
height: inner
|
||||
.height
|
||||
.saturating_sub(if search_active { 3 } else { 0 }),
|
||||
};
|
||||
|
||||
// Scrollbar interactions (click arrows/page/drag)
|
||||
per_core_handle_scrollbar_mouse(
|
||||
params.scroll_offset,
|
||||
params.drag,
|
||||
params.mouse,
|
||||
params.area,
|
||||
params.total_rows,
|
||||
);
|
||||
|
||||
// Wheel scrolling when inside the content
|
||||
crate::ui::cpu::per_core_handle_mouse(
|
||||
params.scroll_offset,
|
||||
params.mouse,
|
||||
content,
|
||||
content.height as usize,
|
||||
);
|
||||
|
||||
// Header click to change sort
|
||||
let header_area = Rect {
|
||||
x: content.x,
|
||||
y: content.y,
|
||||
width: content.width,
|
||||
height: 1,
|
||||
};
|
||||
let inside_header = params.mouse.row == header_area.y
|
||||
&& params.mouse.column >= header_area.x
|
||||
&& params.mouse.column < header_area.x + header_area.width;
|
||||
|
||||
if inside_header && matches!(params.mouse.kind, MouseEventKind::Down(MouseButton::Left)) {
|
||||
// Split header into the same columns
|
||||
let cols = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints(COLS.to_vec())
|
||||
.split(header_area);
|
||||
if params.mouse.column >= cols[2].x && params.mouse.column < cols[2].x + cols[2].width {
|
||||
return Some(ProcSortBy::CpuDesc);
|
||||
}
|
||||
if params.mouse.column >= cols[3].x && params.mouse.column < cols[3].x + cols[3].width {
|
||||
return Some(ProcSortBy::MemDesc);
|
||||
}
|
||||
}
|
||||
|
||||
// Row click for process selection
|
||||
let data_start_row = content.y + 1; // Skip header
|
||||
let data_area_height = content.height.saturating_sub(1); // Exclude header
|
||||
|
||||
if matches!(params.mouse.kind, MouseEventKind::Down(MouseButton::Left))
|
||||
&& params.mouse.row >= data_start_row
|
||||
&& params.mouse.row < data_start_row + data_area_height
|
||||
&& params.mouse.column >= content.x
|
||||
&& params.mouse.column < content.x + content.width
|
||||
{
|
||||
let clicked_row = (params.mouse.row - data_start_row) as usize;
|
||||
|
||||
// Find the actual process using the same filtering/sorting logic as the drawing code
|
||||
if let Some(m) = params.metrics {
|
||||
// Use the same filtered and sorted indices as display
|
||||
let idxs = get_filtered_sorted_indices(m, params.search_query, params.sort_by);
|
||||
|
||||
// Calculate which process was actually clicked based on filtered/sorted order
|
||||
let visible_process_position = *params.scroll_offset + clicked_row;
|
||||
if visible_process_position < idxs.len() {
|
||||
let actual_process_index = idxs[visible_process_position];
|
||||
let clicked_process = &m.top_processes[actual_process_index];
|
||||
*params.selected_process_pid = Some(clicked_process.pid);
|
||||
*params.selected_process_index = Some(actual_process_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clamp to valid range
|
||||
per_core_clamp(
|
||||
params.scroll_offset,
|
||||
params.total_rows,
|
||||
(content.height.saturating_sub(1)) as usize,
|
||||
);
|
||||
None
|
||||
}
|
||||
|
||||
@ -30,15 +30,6 @@ pub const BTN_EXIT_BG_ACTIVE: Color = Color::Rgb(255, 255, 255); // modern red
|
||||
pub const BTN_EXIT_FG_ACTIVE: Color = Color::Rgb(26, 26, 46);
|
||||
pub const BTN_EXIT_FG_INACTIVE: Color = Color::Rgb(255, 255, 255);
|
||||
|
||||
// Process selection colors
|
||||
pub const PROCESS_SELECTION_BG: Color = Color::Rgb(147, 112, 219); // Medium slate blue (purple)
|
||||
pub const PROCESS_SELECTION_FG: Color = Color::Rgb(255, 255, 255); // White text for contrast
|
||||
pub const PROCESS_TOOLTIP_BG: Color = Color::Rgb(147, 112, 219); // Same purple as selection
|
||||
pub const PROCESS_TOOLTIP_FG: Color = Color::Rgb(255, 255, 255); // White text for contrast
|
||||
|
||||
// Process details modal colors (matches main UI aesthetic - no custom colors, terminal defaults)
|
||||
pub const PROCESS_DETAILS_ACCENT: Color = Color::Rgb(147, 112, 219); // Purple accent for highlights
|
||||
|
||||
// Emoji / icon strings (centralized so they can be themed/swapped later)
|
||||
pub const ICON_WARNING_TITLE: &str = " 🔌 CONNECTION ERROR ";
|
||||
pub const ICON_CLUSTER: &str = "⚠️";
|
||||
@ -49,7 +40,7 @@ pub const ICON_COUNTDOWN_LABEL: &str = "⏰ Next auto retry: ";
|
||||
pub const BTN_RETRY_TEXT: &str = " 🔄 Retry ";
|
||||
pub const BTN_EXIT_TEXT: &str = " ❌ Exit ";
|
||||
|
||||
// warning icon
|
||||
// Large multi-line warning icon
|
||||
pub const LARGE_ERROR_ICON: &[&str] = &[
|
||||
" /\\ ",
|
||||
" / \\ ",
|
||||
@ -60,29 +51,3 @@ pub const LARGE_ERROR_ICON: &[&str] = &[
|
||||
" / !! \\ ",
|
||||
" /______________\\ ",
|
||||
];
|
||||
|
||||
//about logo
|
||||
pub const ASCII_ART: &str = r#"
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣠⡄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⣀⣤⣶⣾⠿⠿⠛⠃⠀⠀⠀⠀⠀⣀⣀⣠⡄⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠘⠛⢉⣠⣴⣾⣿⠿⠆⢰⣾⡿⠿⠛⠛⠋⠁⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣿⠟⠋⣁⣤⣤⣶⠀⣠⣤⣶⣾⣿⣿⡿⠀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣶⣿⣿⣿⣿⣿⡆⠘⠛⢉⣁⣤⣤⣤⡀⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⡀⢾⣿⣿⣿⣿⣿⡇⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣿⣿⣧⠈⢿⣿⣿⣿⣿⣷⠀⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿⣿⣿⣿⣧⠈⢿⣿⣿⣿⣿⡄⠀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣼⣿⣿⣿⣿⣿⠿⠋⣁⠀⢿⣿⣿⣿⣷⡀⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣴⣿⣿⣿⣿⡟⢁⣴⣿⣿⡇⢸⣿⣿⡿⠟⠃⠀⠀
|
||||
⠀⠀⠀⠀⠀⠀⢀⣠⣴⣿⣿⣿⣿⣿⣿⡟⢀⣿⣿⣿⡟⢀⣾⠟⢁⣤⣶⣿⠀⠀
|
||||
⠀⠀⠀⠀⣠⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⡇⠸⡿⠟⢋⣠⣾⠃⣰⣿⣿⣿⡟⠀⠀
|
||||
⠀⠀⣴⣄⠙⣿⣿⣿⣿⣿⡿⠿⠛⠋⣉⣁⣤⣴⣶⣿⣿⣿⠀⣿⡿⠟⠋⠀⠀⠀
|
||||
⠀⠀⣿⣿⡆⠹⠟⠋⣁⣤⡄⢰⣿⠿⠟⠛⠋⠉⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||
⠀⠀⠈⠉⠁⠀⠀⠀⠙⠛⠃⠈⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
|
||||
|
||||
███████╗ ██████╗ ██████╗████████╗ ██████╗ ██████╗
|
||||
██╔════╝██╔═══██╗██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗
|
||||
███████╗██║ ██║██║ ██║ ██║ ██║██████╔╝
|
||||
╚════██║██║ ██║██║ ██║ ██║ ██║██╔═══╝
|
||||
███████║╚██████╔╝╚██████╗ ██║ ╚██████╔╝██║
|
||||
╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═════╝ ╚═╝
|
||||
"#;
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "socktop_agent"
|
||||
version = "1.50.2"
|
||||
version = "1.40.70"
|
||||
authors = ["Jason Witty <jasonpwitty+socktop@proton.me>"]
|
||||
description = "Socktop agent daemon. Serves host metrics over WebSocket."
|
||||
edition = "2024"
|
||||
@ -8,39 +8,31 @@ license = "MIT"
|
||||
readme = "README.md"
|
||||
|
||||
[dependencies]
|
||||
# Tokio: Use minimal features instead of "full" to reduce binary size
|
||||
# Only include: rt-multi-thread (async runtime), net (WebSocket), sync (Mutex/RwLock), macros (#[tokio::test])
|
||||
# Excluded: io, fs, process, signal, time (not needed for this workload)
|
||||
# Savings: ~200-300KB binary size, faster compile times
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "net", "sync", "macros"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
axum = { version = "0.7", features = ["ws", "macros"] }
|
||||
sysinfo = { version = "0.37", features = ["network", "disk", "component"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
flate2 = { version = "1", default-features = false, features = ["rust_backend"] }
|
||||
futures-util = "0.3.31"
|
||||
tracing = { version = "0.1", optional = true }
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = true }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
# nvml-wrapper removed (unused; GPU metrics via gfxinfo only now)
|
||||
gfxinfo = "0.1.2"
|
||||
once_cell = "1.19"
|
||||
axum-server = { version = "0.7", features = ["tls-rustls"] }
|
||||
rustls = { version = "0.23", features = ["aws-lc-rs"] }
|
||||
axum-server = { version = "0.6", features = ["tls-rustls"] }
|
||||
rustls = "0.23"
|
||||
rustls-pemfile = "2.1"
|
||||
rcgen = "0.13"
|
||||
rcgen = "0.13" # pure-Rust self-signed cert generation (replaces openssl vendored build)
|
||||
anyhow = "1"
|
||||
hostname = "0.3"
|
||||
prost = { workspace = true }
|
||||
time = { version = "0.3", default-features = false, features = ["formatting", "macros", "parsing" ] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
logging = ["tracing", "tracing-subscriber"]
|
||||
|
||||
[build-dependencies]
|
||||
prost-build = "0.13"
|
||||
tonic-build = { version = "0.12", default-features = false, optional = true }
|
||||
protoc-bin-vendored = "3"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "2.0"
|
||||
tempfile = "3.10"
|
||||
|
||||
@ -1,95 +0,0 @@
|
||||
//! Caching for process metrics and journal entries
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::types::{ProcessMetricsResponse, JournalResponse};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct CacheEntry<T> {
|
||||
data: T,
|
||||
cached_at: Instant,
|
||||
ttl: Duration,
|
||||
}
|
||||
|
||||
impl<T> CacheEntry<T> {
|
||||
fn is_expired(&self) -> bool {
|
||||
self.cached_at.elapsed() > self.ttl
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ProcessCache {
|
||||
process_metrics: RwLock<HashMap<u32, CacheEntry<ProcessMetricsResponse>>>,
|
||||
journal_entries: RwLock<HashMap<u32, CacheEntry<JournalResponse>>>,
|
||||
}
|
||||
|
||||
impl ProcessCache {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
process_metrics: RwLock::new(HashMap::new()),
|
||||
journal_entries: RwLock::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get cached process metrics if available and not expired (250ms TTL)
|
||||
pub async fn get_process_metrics(&self, pid: u32) -> Option<ProcessMetricsResponse> {
|
||||
let cache = self.process_metrics.read().await;
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
if !entry.is_expired() {
|
||||
return Some(entry.data.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Cache process metrics with 250ms TTL
|
||||
pub async fn set_process_metrics(&self, pid: u32, data: ProcessMetricsResponse) {
|
||||
let mut cache = self.process_metrics.write().await;
|
||||
cache.insert(pid, CacheEntry {
|
||||
data,
|
||||
cached_at: Instant::now(),
|
||||
ttl: Duration::from_millis(250),
|
||||
});
|
||||
}
|
||||
|
||||
/// Get cached journal entries if available and not expired (1s TTL)
|
||||
pub async fn get_journal_entries(&self, pid: u32) -> Option<JournalResponse> {
|
||||
let cache = self.journal_entries.read().await;
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
if !entry.is_expired() {
|
||||
return Some(entry.data.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Cache journal entries with 1s TTL
|
||||
pub async fn set_journal_entries(&self, pid: u32, data: JournalResponse) {
|
||||
let mut cache = self.journal_entries.write().await;
|
||||
cache.insert(pid, CacheEntry {
|
||||
data,
|
||||
cached_at: Instant::now(),
|
||||
ttl: Duration::from_secs(1),
|
||||
});
|
||||
}
|
||||
|
||||
/// Clean up expired entries periodically
|
||||
pub async fn cleanup_expired(&self) {
|
||||
{
|
||||
let mut cache = self.process_metrics.write().await;
|
||||
cache.retain(|_, entry| !entry.is_expired());
|
||||
}
|
||||
{
|
||||
let mut cache = self.journal_entries.write().await;
|
||||
cache.retain(|_, entry| !entry.is_expired());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProcessCache {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
@ -1,17 +0,0 @@
|
||||
//! Library interface for socktop_agent functionality
|
||||
//! This allows testing of agent functions.
|
||||
|
||||
pub mod gpu;
|
||||
pub mod metrics;
|
||||
pub mod proto;
|
||||
pub mod state;
|
||||
pub mod tls;
|
||||
pub mod types;
|
||||
pub mod ws;
|
||||
|
||||
// Re-export commonly used types and functions for testing
|
||||
pub use metrics::{collect_journal_entries, collect_process_metrics};
|
||||
pub use state::{AppState, CacheEntry};
|
||||
pub use types::{
|
||||
DetailedProcessInfo, JournalEntry, JournalResponse, LogLevel, ProcessMetricsResponse,
|
||||
};
|
||||
@ -29,53 +29,10 @@ fn arg_value(name: &str) -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
// Install rustls crypto provider before any TLS operations
|
||||
// This is required when using axum-server's tls-rustls feature
|
||||
rustls::crypto::aws_lc_rs::default_provider()
|
||||
.install_default()
|
||||
.ok(); // Ignore error if already installed
|
||||
|
||||
#[cfg(feature = "logging")]
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
// Configure Tokio runtime with optimized thread pool for reduced overhead.
|
||||
//
|
||||
// The agent is primarily I/O-bound (WebSocket, /proc file reads, sysinfo)
|
||||
// with no CPU-intensive or blocking operations, so a smaller thread pool
|
||||
// is beneficial:
|
||||
//
|
||||
// Benefits:
|
||||
// - Lower memory footprint (~1-2MB per thread saved)
|
||||
// - Reduced context switching overhead
|
||||
// - Fewer idle threads consuming resources
|
||||
// - Better for resource-constrained systems
|
||||
//
|
||||
// Trade-offs:
|
||||
// - Slightly reduced throughput under very high concurrent connections
|
||||
// - Could introduce latency if blocking operations are added (don't do this!)
|
||||
//
|
||||
// Default: 2 threads (sufficient for typical workloads with 1-10 clients)
|
||||
// Override: Set SOCKTOP_WORKER_THREADS=4 to use more threads if needed
|
||||
//
|
||||
// Note: Default Tokio uses num_cpus threads which is excessive for this workload.
|
||||
|
||||
let worker_threads = std::env::var("SOCKTOP_WORKER_THREADS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse::<usize>().ok())
|
||||
.unwrap_or(2)
|
||||
.clamp(1, 16); // Ensure 1-16 threads
|
||||
|
||||
let runtime = tokio::runtime::Builder::new_multi_thread()
|
||||
.worker_threads(worker_threads)
|
||||
.thread_name("socktop-agent")
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
runtime.block_on(async_main())
|
||||
}
|
||||
|
||||
async fn async_main() -> anyhow::Result<()> {
|
||||
// Version flag (print and exit). Keep before heavy initialization.
|
||||
if arg_flag("--version") || arg_flag("-V") {
|
||||
println!("socktop_agent {}", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
@ -2,10 +2,7 @@
|
||||
|
||||
use crate::gpu::collect_all_gpus;
|
||||
use crate::state::AppState;
|
||||
use crate::types::{
|
||||
DetailedProcessInfo, DiskInfo, JournalEntry, JournalResponse, LogLevel, Metrics, NetworkInfo,
|
||||
ProcessInfo, ProcessMetricsResponse, ProcessesPayload,
|
||||
};
|
||||
use crate::types::{DiskInfo, Metrics, NetworkInfo, ProcessInfo, ProcessesPayload};
|
||||
use once_cell::sync::OnceCell;
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::collections::HashMap;
|
||||
@ -13,56 +10,13 @@ use std::collections::HashMap;
|
||||
use std::fs;
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::io;
|
||||
use std::process::Command;
|
||||
use std::sync::Mutex;
|
||||
use std::time::Duration as StdDuration;
|
||||
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
use std::time::{Duration, Instant};
|
||||
use sysinfo::{ProcessRefreshKind, ProcessesToUpdate};
|
||||
#[cfg(feature = "logging")]
|
||||
use tracing::warn;
|
||||
|
||||
// NOTE: CPU normalization env removed; non-Linux now always reports per-process share (0..100) as given by sysinfo.
|
||||
|
||||
// Helper functions to get CPU time from /proc/stat on Linux
|
||||
#[cfg(target_os = "linux")]
|
||||
fn get_cpu_time_user(pid: u32) -> u64 {
|
||||
if let Ok(stat) = fs::read_to_string(format!("/proc/{pid}/stat")) {
|
||||
let fields: Vec<&str> = stat.split_whitespace().collect();
|
||||
if fields.len() > 13 {
|
||||
// Field 13 (0-indexed) is utime (user CPU time in clock ticks)
|
||||
if let Ok(utime) = fields[13].parse::<u64>() {
|
||||
// Convert clock ticks to milliseconds (assuming 100 Hz)
|
||||
return utime * 10; // 1 tick = 10ms at 100 Hz
|
||||
}
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn get_cpu_time_system(pid: u32) -> u64 {
|
||||
if let Ok(stat) = fs::read_to_string(format!("/proc/{pid}/stat")) {
|
||||
let fields: Vec<&str> = stat.split_whitespace().collect();
|
||||
if fields.len() > 14 {
|
||||
// Field 14 (0-indexed) is stime (system CPU time in clock ticks)
|
||||
if let Ok(stime) = fields[14].parse::<u64>() {
|
||||
// Convert clock ticks to milliseconds (assuming 100 Hz)
|
||||
return stime * 10; // 1 tick = 10ms at 100 Hz
|
||||
}
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn get_cpu_time_user(_pid: u32) -> u64 {
|
||||
0 // Not implemented for non-Linux platforms
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn get_cpu_time_system(_pid: u32) -> u64 {
|
||||
0 // Not implemented for non-Linux platforms
|
||||
}
|
||||
// Runtime toggles (read once)
|
||||
fn gpu_enabled() -> bool {
|
||||
static ON: OnceCell<bool> = OnceCell::new();
|
||||
@ -169,12 +123,11 @@ pub async fn collect_fast_metrics(state: &AppState) -> Metrics {
|
||||
}
|
||||
}
|
||||
let mut sys = state.sys.lock().await;
|
||||
if let Err(_e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
||||
if let Err(e) = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
||||
sys.refresh_cpu_usage();
|
||||
sys.refresh_memory();
|
||||
})) {
|
||||
#[cfg(feature = "logging")]
|
||||
warn!("sysinfo selective refresh panicked: {_e:?}");
|
||||
warn!("sysinfo selective refresh panicked: {e:?}");
|
||||
}
|
||||
|
||||
// Get or initialize hostname once
|
||||
@ -268,9 +221,8 @@ pub async fn collect_fast_metrics(state: &AppState) -> Metrics {
|
||||
let v = match collect_all_gpus() {
|
||||
Ok(v) if !v.is_empty() => Some(v),
|
||||
Ok(_) => None,
|
||||
Err(_e) => {
|
||||
#[cfg(feature = "logging")]
|
||||
warn!("gpu collection failed: {_e}");
|
||||
Err(e) => {
|
||||
warn!("gpu collection failed: {e}");
|
||||
None
|
||||
}
|
||||
};
|
||||
@ -334,199 +286,14 @@ pub async fn collect_disks(state: &AppState) -> Vec<DiskInfo> {
|
||||
}
|
||||
let mut disks_list = state.disks.lock().await;
|
||||
disks_list.refresh(false); // don't drop missing disks
|
||||
|
||||
// Collect disk temperatures from components
|
||||
// NVMe temps show up as "Composite" under different chip names
|
||||
let disk_temps = {
|
||||
let mut components = state.components.lock().await;
|
||||
components.refresh(true); // true = refresh values, not just the list
|
||||
|
||||
let mut composite_temps = Vec::new();
|
||||
|
||||
for c in components.iter() {
|
||||
let label = c.label().to_ascii_lowercase();
|
||||
|
||||
// Collect all "Composite" temperatures (these are NVMe drives)
|
||||
// Labels are like "nvme Composite CT1000N7BSS503" or "nvme Composite Sabrent Rocket 4.0"
|
||||
if label.contains("composite")
|
||||
&& let Some(temp) = c.temperature()
|
||||
{
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("Found Composite temp: {}°C", temp);
|
||||
composite_temps.push(temp);
|
||||
}
|
||||
}
|
||||
|
||||
// Store composite temps indexed by their order (nvme0n1, nvme1n1, nvme2n1, etc.)
|
||||
let mut temps = std::collections::HashMap::new();
|
||||
for (idx, temp) in composite_temps.iter().enumerate() {
|
||||
let key = format!("nvme{}n1", idx);
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("Mapping {} -> {}°C", key, temp);
|
||||
temps.insert(key, *temp);
|
||||
}
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("Final disk_temps map: {:?}", temps);
|
||||
temps
|
||||
};
|
||||
|
||||
// First collect all partitions from sysinfo, deduplicating by device name
|
||||
// (same partition can be mounted at multiple mount points)
|
||||
let mut seen_partitions = std::collections::HashSet::new();
|
||||
let partitions: Vec<DiskInfo> = disks_list
|
||||
let disks: Vec<DiskInfo> = disks_list
|
||||
.iter()
|
||||
.filter_map(|d| {
|
||||
let name = d.name().to_string_lossy().into_owned();
|
||||
|
||||
// Skip if we've already seen this partition/device
|
||||
if !seen_partitions.insert(name.clone()) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Determine if this is a partition
|
||||
let is_partition = name.contains("p1")
|
||||
|| name.contains("p2")
|
||||
|| name.contains("p3")
|
||||
|| name.ends_with('1')
|
||||
|| name.ends_with('2')
|
||||
|| name.ends_with('3')
|
||||
|| name.ends_with('4')
|
||||
|| name.ends_with('5')
|
||||
|| name.ends_with('6')
|
||||
|| name.ends_with('7')
|
||||
|| name.ends_with('8')
|
||||
|| name.ends_with('9');
|
||||
|
||||
// Try to find temperature for this disk
|
||||
let temperature = disk_temps.iter().find_map(|(key, &temp)| {
|
||||
if name.starts_with(key) {
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("Matched {} with key {} -> {}°C", name, key, temp);
|
||||
Some(temp)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
if temperature.is_none() && !name.starts_with("loop") && !name.starts_with("ram") {
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!("No temperature found for disk: {}", name);
|
||||
}
|
||||
|
||||
Some(DiskInfo {
|
||||
name,
|
||||
total: d.total_space(),
|
||||
available: d.available_space(),
|
||||
temperature,
|
||||
is_partition,
|
||||
})
|
||||
.map(|d| DiskInfo {
|
||||
name: d.name().to_string_lossy().into_owned(),
|
||||
total: d.total_space(),
|
||||
available: d.available_space(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Now create parent disk entries by aggregating partition data
|
||||
let mut parent_disks: std::collections::HashMap<String, (u64, u64, Option<f32>)> =
|
||||
std::collections::HashMap::new();
|
||||
|
||||
for partition in &partitions {
|
||||
if partition.is_partition {
|
||||
// Extract parent disk name
|
||||
// nvme0n1p1 -> nvme0n1, sda1 -> sda, mmcblk0p1 -> mmcblk0
|
||||
let parent_name = if let Some(pos) = partition.name.rfind('p') {
|
||||
// Check if character after 'p' is a digit
|
||||
if partition
|
||||
.name
|
||||
.chars()
|
||||
.nth(pos + 1)
|
||||
.is_some_and(|c| c.is_ascii_digit())
|
||||
{
|
||||
&partition.name[..pos]
|
||||
} else {
|
||||
// Handle sda1, sdb2, etc (just trim trailing digit)
|
||||
partition.name.trim_end_matches(char::is_numeric)
|
||||
}
|
||||
} else {
|
||||
// Handle sda1, sdb2, etc (just trim trailing digit)
|
||||
partition.name.trim_end_matches(char::is_numeric)
|
||||
};
|
||||
|
||||
// Look up temperature for the PARENT disk, not the partition
|
||||
// Strip /dev/ prefix if present for matching
|
||||
let parent_name_for_match = parent_name.strip_prefix("/dev/").unwrap_or(parent_name);
|
||||
let parent_temp = disk_temps.iter().find_map(|(key, &temp)| {
|
||||
if parent_name_for_match.starts_with(key) {
|
||||
Some(temp)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
// Aggregate partition stats into parent
|
||||
let entry = parent_disks
|
||||
.entry(parent_name.to_string())
|
||||
.or_insert((0, 0, parent_temp));
|
||||
entry.0 += partition.total;
|
||||
entry.1 += partition.available;
|
||||
// Keep temperature if any partition has it (or if we just found one)
|
||||
if entry.2.is_none() {
|
||||
entry.2 = parent_temp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create parent disk entries
|
||||
let mut disks: Vec<DiskInfo> = parent_disks
|
||||
.into_iter()
|
||||
.map(|(name, (total, available, temperature))| DiskInfo {
|
||||
name,
|
||||
total,
|
||||
available,
|
||||
temperature,
|
||||
is_partition: false,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Sort parent disks by name
|
||||
disks.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
// Add partitions after their parent disk
|
||||
for partition in partitions {
|
||||
if partition.is_partition {
|
||||
// Find parent disk index
|
||||
let parent_name = if let Some(pos) = partition.name.rfind('p') {
|
||||
if partition
|
||||
.name
|
||||
.chars()
|
||||
.nth(pos + 1)
|
||||
.is_some_and(|c| c.is_ascii_digit())
|
||||
{
|
||||
&partition.name[..pos]
|
||||
} else {
|
||||
partition.name.trim_end_matches(char::is_numeric)
|
||||
}
|
||||
} else {
|
||||
partition.name.trim_end_matches(char::is_numeric)
|
||||
};
|
||||
|
||||
// Find where to insert this partition (after its parent)
|
||||
if let Some(parent_idx) = disks.iter().position(|d| d.name == parent_name) {
|
||||
// Insert after parent and any existing partitions of that parent
|
||||
let mut insert_idx = parent_idx + 1;
|
||||
while insert_idx < disks.len()
|
||||
&& disks[insert_idx].is_partition
|
||||
&& disks[insert_idx].name.starts_with(parent_name)
|
||||
{
|
||||
insert_idx += 1;
|
||||
}
|
||||
disks.insert(insert_idx, partition);
|
||||
} else {
|
||||
// Parent not found (shouldn't happen), just add at end
|
||||
disks.push(partition);
|
||||
}
|
||||
} else {
|
||||
// Not a partition (e.g., zram0), add at end
|
||||
disks.push(partition);
|
||||
}
|
||||
}
|
||||
{
|
||||
let mut cache = state.cache_disks.lock().await;
|
||||
cache.set(disks.clone());
|
||||
@ -760,7 +527,6 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
proc_cache
|
||||
.names
|
||||
.retain(|pid, _| sys.processes().contains_key(&sysinfo::Pid::from_u32(*pid)));
|
||||
#[cfg(feature = "logging")]
|
||||
tracing::debug!(
|
||||
"Cleaned up {} stale process names in {}ms",
|
||||
proc_cache.names.capacity() - proc_cache.names.len(),
|
||||
@ -783,626 +549,3 @@ pub async fn collect_processes_all(state: &AppState) -> ProcessesPayload {
|
||||
}
|
||||
payload
|
||||
}
|
||||
|
||||
/// Lightweight child process enumeration using direct /proc access
|
||||
/// This avoids the expensive refresh_processes_specifics(All) call
|
||||
#[cfg(target_os = "linux")]
|
||||
fn enumerate_child_processes_lightweight(
|
||||
parent_pid: u32,
|
||||
system: &sysinfo::System,
|
||||
) -> Vec<DetailedProcessInfo> {
|
||||
let mut children = Vec::new();
|
||||
|
||||
// Read /proc to find all child processes
|
||||
// This is much faster than refresh_processes_specifics(All)
|
||||
if let Ok(entries) = fs::read_dir("/proc") {
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(file_name) = entry.file_name().into_string()
|
||||
&& let Ok(pid) = file_name.parse::<u32>()
|
||||
&& let Some(child_parent_pid) = read_parent_pid_from_proc(pid)
|
||||
&& child_parent_pid == parent_pid
|
||||
&& let Some(child_info) = collect_process_info_from_proc(pid, system)
|
||||
{
|
||||
children.push(child_info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
children
|
||||
}
|
||||
|
||||
/// Read parent PID from /proc/{pid}/stat
|
||||
#[cfg(target_os = "linux")]
|
||||
fn read_parent_pid_from_proc(pid: u32) -> Option<u32> {
|
||||
let stat = fs::read_to_string(format!("/proc/{pid}/stat")).ok()?;
|
||||
// Format: pid (comm) state ppid ...
|
||||
// We need to handle process names with spaces/parentheses
|
||||
let ppid_start = stat.rfind(')')?;
|
||||
let fields: Vec<&str> = stat[ppid_start + 1..].split_whitespace().collect();
|
||||
// After the closing paren: state ppid ...
|
||||
// Field 1 (0-indexed) is ppid
|
||||
fields.get(1)?.parse::<u32>().ok()
|
||||
}
|
||||
|
||||
/// Collect process information from /proc files
|
||||
#[cfg(target_os = "linux")]
|
||||
fn collect_process_info_from_proc(
|
||||
pid: u32,
|
||||
system: &sysinfo::System,
|
||||
) -> Option<DetailedProcessInfo> {
|
||||
// Try to get basic info from sysinfo if it's already loaded (cheap lookup)
|
||||
// Otherwise read from /proc directly
|
||||
let (name, cpu_usage, mem_bytes, virtual_mem_bytes) =
|
||||
if let Some(proc) = system.process(sysinfo::Pid::from_u32(pid)) {
|
||||
(
|
||||
proc.name().to_string_lossy().to_string(),
|
||||
proc.cpu_usage(),
|
||||
proc.memory(),
|
||||
proc.virtual_memory(),
|
||||
)
|
||||
} else {
|
||||
// Process not in sysinfo cache, read minimal info from /proc
|
||||
let name = fs::read_to_string(format!("/proc/{pid}/comm"))
|
||||
.ok()?
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
// Read memory from /proc/{pid}/status
|
||||
let status_content = fs::read_to_string(format!("/proc/{pid}/status")).ok()?;
|
||||
let mut mem_bytes = 0u64;
|
||||
let mut virtual_mem_bytes = 0u64;
|
||||
|
||||
for line in status_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("VmRSS:") {
|
||||
if let Some(kb) = value.split_whitespace().next() {
|
||||
mem_bytes = kb.parse::<u64>().unwrap_or(0) * 1024;
|
||||
}
|
||||
} else if let Some(value) = line.strip_prefix("VmSize:")
|
||||
&& let Some(kb) = value.split_whitespace().next()
|
||||
{
|
||||
virtual_mem_bytes = kb.parse::<u64>().unwrap_or(0) * 1024;
|
||||
}
|
||||
}
|
||||
|
||||
(name, 0.0, mem_bytes, virtual_mem_bytes)
|
||||
};
|
||||
|
||||
// Read command line
|
||||
let command = fs::read_to_string(format!("/proc/{pid}/cmdline"))
|
||||
.ok()
|
||||
.map(|s| s.replace('\0', " ").trim().to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
// Read status information
|
||||
let status_content = fs::read_to_string(format!("/proc/{pid}/status")).ok()?;
|
||||
let mut uid = 0u32;
|
||||
let mut gid = 0u32;
|
||||
let mut thread_count = 0u32;
|
||||
let mut status = "Unknown".to_string();
|
||||
|
||||
for line in status_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("Uid:") {
|
||||
if let Some(uid_str) = value.split_whitespace().next() {
|
||||
uid = uid_str.parse().unwrap_or(0);
|
||||
}
|
||||
} else if let Some(value) = line.strip_prefix("Gid:") {
|
||||
if let Some(gid_str) = value.split_whitespace().next() {
|
||||
gid = gid_str.parse().unwrap_or(0);
|
||||
}
|
||||
} else if let Some(value) = line.strip_prefix("Threads:") {
|
||||
thread_count = value.trim().parse().unwrap_or(0);
|
||||
} else if let Some(value) = line.strip_prefix("State:") {
|
||||
status = value
|
||||
.trim()
|
||||
.chars()
|
||||
.next()
|
||||
.map(|c| match c {
|
||||
'R' => "Running",
|
||||
'S' => "Sleeping",
|
||||
'D' => "Disk Sleep",
|
||||
'Z' => "Zombie",
|
||||
'T' => "Stopped",
|
||||
't' => "Tracing Stop",
|
||||
'X' | 'x' => "Dead",
|
||||
'K' => "Wakekill",
|
||||
'W' => "Waking",
|
||||
'P' => "Parked",
|
||||
'I' => "Idle",
|
||||
_ => "Unknown",
|
||||
})
|
||||
.unwrap_or("Unknown")
|
||||
.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Read start time from stat
|
||||
let start_time = if let Ok(stat) = fs::read_to_string(format!("/proc/{pid}/stat")) {
|
||||
let stat_end = stat.rfind(')')?;
|
||||
let fields: Vec<&str> = stat[stat_end + 1..].split_whitespace().collect();
|
||||
// Field 19 (0-indexed) is starttime in clock ticks since boot
|
||||
fields.get(19)?.parse::<u64>().ok()?
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// Read I/O stats if available
|
||||
let (read_bytes, write_bytes) =
|
||||
if let Ok(io_content) = fs::read_to_string(format!("/proc/{pid}/io")) {
|
||||
let mut read_bytes = None;
|
||||
let mut write_bytes = None;
|
||||
|
||||
for line in io_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("read_bytes:") {
|
||||
read_bytes = value.trim().parse().ok();
|
||||
} else if let Some(value) = line.strip_prefix("write_bytes:") {
|
||||
write_bytes = value.trim().parse().ok();
|
||||
}
|
||||
}
|
||||
|
||||
(read_bytes, write_bytes)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
// Read working directory
|
||||
let working_directory = fs::read_link(format!("/proc/{pid}/cwd"))
|
||||
.ok()
|
||||
.map(|p| p.to_string_lossy().to_string());
|
||||
|
||||
// Read executable path
|
||||
let executable_path = fs::read_link(format!("/proc/{pid}/exe"))
|
||||
.ok()
|
||||
.map(|p| p.to_string_lossy().to_string());
|
||||
|
||||
Some(DetailedProcessInfo {
|
||||
pid,
|
||||
name,
|
||||
command,
|
||||
cpu_usage,
|
||||
mem_bytes,
|
||||
virtual_mem_bytes,
|
||||
shared_mem_bytes: None, // Would need to parse /proc/{pid}/statm for this
|
||||
thread_count,
|
||||
fd_count: None, // Would need to count entries in /proc/{pid}/fd
|
||||
status,
|
||||
parent_pid: None, // We already know the parent
|
||||
user_id: uid,
|
||||
group_id: gid,
|
||||
start_time,
|
||||
cpu_time_user: get_cpu_time_user(pid),
|
||||
cpu_time_system: get_cpu_time_system(pid),
|
||||
read_bytes,
|
||||
write_bytes,
|
||||
working_directory,
|
||||
executable_path,
|
||||
child_processes: Vec::new(), // Don't recurse
|
||||
threads: Vec::new(), // Not collected for child processes
|
||||
})
|
||||
}
|
||||
|
||||
/// Fallback for non-Linux: use sysinfo (less efficient but functional)
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn enumerate_child_processes_lightweight(
|
||||
parent_pid: u32,
|
||||
system: &sysinfo::System,
|
||||
) -> Vec<DetailedProcessInfo> {
|
||||
let mut children = Vec::new();
|
||||
|
||||
// On non-Linux, we have to iterate through all processes in sysinfo
|
||||
// This is less efficient but maintains cross-platform compatibility
|
||||
for (child_pid, child_process) in system.processes() {
|
||||
if let Some(parent) = child_process.parent()
|
||||
&& parent.as_u32() == parent_pid
|
||||
{
|
||||
let child_info = DetailedProcessInfo {
|
||||
pid: child_pid.as_u32(),
|
||||
name: child_process.name().to_string_lossy().to_string(),
|
||||
command: child_process
|
||||
.cmd()
|
||||
.iter()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(" "),
|
||||
cpu_usage: child_process.cpu_usage(),
|
||||
mem_bytes: child_process.memory(),
|
||||
virtual_mem_bytes: child_process.virtual_memory(),
|
||||
shared_mem_bytes: None,
|
||||
thread_count: child_process
|
||||
.tasks()
|
||||
.map(|tasks| tasks.len() as u32)
|
||||
.unwrap_or(0),
|
||||
fd_count: None,
|
||||
status: format!("{:?}", child_process.status()),
|
||||
parent_pid: Some(parent_pid),
|
||||
// On non-Linux platforms, sysinfo UID/GID might not be accurate
|
||||
// Just use 0 as placeholder since we can't read /proc
|
||||
user_id: 0,
|
||||
group_id: 0,
|
||||
start_time: child_process.start_time(),
|
||||
cpu_time_user: 0, // Not available on non-Linux in our implementation
|
||||
cpu_time_system: 0,
|
||||
read_bytes: Some(child_process.disk_usage().read_bytes),
|
||||
write_bytes: Some(child_process.disk_usage().written_bytes),
|
||||
working_directory: child_process.cwd().map(|p| p.to_string_lossy().to_string()),
|
||||
executable_path: child_process.exe().map(|p| p.to_string_lossy().to_string()),
|
||||
child_processes: Vec::new(),
|
||||
threads: Vec::new(), // Not collected for non-Linux
|
||||
};
|
||||
children.push(child_info);
|
||||
}
|
||||
}
|
||||
|
||||
children
|
||||
}
|
||||
|
||||
/// Collect thread information for a specific process (Linux only)
|
||||
#[cfg(target_os = "linux")]
|
||||
fn collect_thread_info(pid: u32) -> Vec<crate::types::ThreadInfo> {
|
||||
let mut threads = Vec::new();
|
||||
|
||||
// Read /proc/{pid}/task directory
|
||||
let task_dir = format!("/proc/{pid}/task");
|
||||
let Ok(entries) = fs::read_dir(&task_dir) else {
|
||||
return threads;
|
||||
};
|
||||
|
||||
for entry in entries.flatten() {
|
||||
let file_name = entry.file_name();
|
||||
let tid_str = file_name.to_string_lossy();
|
||||
let Ok(tid) = tid_str.parse::<u32>() else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// Read thread name from comm
|
||||
let name = fs::read_to_string(format!("/proc/{pid}/task/{tid}/comm"))
|
||||
.unwrap_or_else(|_| format!("Thread-{tid}"))
|
||||
.trim()
|
||||
.to_string();
|
||||
|
||||
// Read thread stat for CPU times and status
|
||||
let stat_path = format!("/proc/{pid}/task/{tid}/stat");
|
||||
let Ok(stat_content) = fs::read_to_string(&stat_path) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// Parse stat file (similar format to process stat)
|
||||
// Fields: pid comm state ... utime stime ...
|
||||
let fields: Vec<&str> = stat_content.split_whitespace().collect();
|
||||
if fields.len() < 15 {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Field 2 is state (R, S, D, Z, T, etc.)
|
||||
let status = fields
|
||||
.get(2)
|
||||
.and_then(|s| s.chars().next())
|
||||
.map(|c| match c {
|
||||
'R' => "Running",
|
||||
'S' => "Sleeping",
|
||||
'D' => "Disk Sleep",
|
||||
'Z' => "Zombie",
|
||||
'T' => "Stopped",
|
||||
't' => "Tracing Stop",
|
||||
'X' | 'x' => "Dead",
|
||||
_ => "Unknown",
|
||||
})
|
||||
.unwrap_or("Unknown")
|
||||
.to_string();
|
||||
|
||||
// Field 13 is utime (user CPU time in clock ticks)
|
||||
// Field 14 is stime (system CPU time in clock ticks)
|
||||
let utime = fields
|
||||
.get(13)
|
||||
.and_then(|s| s.parse::<u64>().ok())
|
||||
.unwrap_or(0);
|
||||
let stime = fields
|
||||
.get(14)
|
||||
.and_then(|s| s.parse::<u64>().ok())
|
||||
.unwrap_or(0);
|
||||
|
||||
// Convert clock ticks to microseconds (assuming 100 Hz)
|
||||
// 1 tick = 10ms = 10,000 microseconds
|
||||
let cpu_time_user = utime * 10_000;
|
||||
let cpu_time_system = stime * 10_000;
|
||||
|
||||
threads.push(crate::types::ThreadInfo {
|
||||
tid,
|
||||
name,
|
||||
cpu_time_user,
|
||||
cpu_time_system,
|
||||
status,
|
||||
});
|
||||
}
|
||||
|
||||
threads
|
||||
}
|
||||
|
||||
/// Fallback for non-Linux: return empty thread list
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
fn collect_thread_info(_pid: u32) -> Vec<crate::types::ThreadInfo> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
/// Collect detailed metrics for a specific process
|
||||
pub async fn collect_process_metrics(
|
||||
pid: u32,
|
||||
state: &AppState,
|
||||
) -> Result<ProcessMetricsResponse, String> {
|
||||
let mut system = state.sys.lock().await;
|
||||
|
||||
// OPTIMIZED: Only refresh the specific process we care about
|
||||
// This avoids polluting the main process list with threads and prevents race conditions
|
||||
system.refresh_processes_specifics(
|
||||
ProcessesToUpdate::Some(&[sysinfo::Pid::from_u32(pid)]),
|
||||
false,
|
||||
ProcessRefreshKind::nothing()
|
||||
.with_memory()
|
||||
.with_cpu()
|
||||
.with_disk_usage(),
|
||||
);
|
||||
|
||||
let process = system
|
||||
.process(sysinfo::Pid::from_u32(pid))
|
||||
.ok_or_else(|| format!("Process {pid} not found"))?;
|
||||
|
||||
// Get current timestamp
|
||||
let cached_at = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map_err(|e| format!("Time error: {e}"))?
|
||||
.as_secs();
|
||||
|
||||
// Extract all needed data from process while we have the lock
|
||||
let name = process.name().to_string_lossy().to_string();
|
||||
let command = process
|
||||
.cmd()
|
||||
.iter()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
let cpu_usage = process.cpu_usage();
|
||||
let mem_bytes = process.memory();
|
||||
let virtual_mem_bytes = process.virtual_memory();
|
||||
let thread_count = process.tasks().map(|tasks| tasks.len() as u32).unwrap_or(0);
|
||||
let status = format!("{:?}", process.status());
|
||||
let parent_pid = process.parent().map(|p| p.as_u32());
|
||||
let start_time = process.start_time();
|
||||
|
||||
// Read UID and GID directly from /proc/{pid}/status for accuracy
|
||||
#[cfg(target_os = "linux")]
|
||||
let (user_id, group_id) =
|
||||
if let Ok(status_content) = std::fs::read_to_string(format!("/proc/{pid}/status")) {
|
||||
let mut uid = 0u32;
|
||||
let mut gid = 0u32;
|
||||
|
||||
for line in status_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("Uid:") {
|
||||
// Uid line format: "Uid: 1000 1000 1000 1000" (real, effective, saved, filesystem)
|
||||
// We want the real UID (first value)
|
||||
if let Some(uid_str) = value.split_whitespace().next() {
|
||||
uid = uid_str.parse().unwrap_or(0);
|
||||
}
|
||||
} else if let Some(value) = line.strip_prefix("Gid:") {
|
||||
// Gid line format: "Gid: 1000 1000 1000 1000" (real, effective, saved, filesystem)
|
||||
// We want the real GID (first value)
|
||||
if let Some(gid_str) = value.split_whitespace().next() {
|
||||
gid = gid_str.parse().unwrap_or(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(uid, gid)
|
||||
} else {
|
||||
// Fallback if /proc read fails (permission issue)
|
||||
(0, 0)
|
||||
};
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
let (user_id, group_id) = (0, 0);
|
||||
|
||||
// Read I/O stats directly from /proc/{pid}/io
|
||||
// Use rchar/wchar to capture ALL I/O including cached reads (like htop/btop do)
|
||||
// sysinfo's total_read_bytes/total_written_bytes only count actual disk I/O
|
||||
#[cfg(target_os = "linux")]
|
||||
let (read_bytes, write_bytes) =
|
||||
if let Ok(io_content) = std::fs::read_to_string(format!("/proc/{pid}/io")) {
|
||||
let mut rchar = 0u64;
|
||||
let mut wchar = 0u64;
|
||||
|
||||
for line in io_content.lines() {
|
||||
if let Some(value) = line.strip_prefix("rchar: ") {
|
||||
rchar = value.trim().parse().unwrap_or(0);
|
||||
} else if let Some(value) = line.strip_prefix("wchar: ") {
|
||||
wchar = value.trim().parse().unwrap_or(0);
|
||||
}
|
||||
}
|
||||
|
||||
(Some(rchar), Some(wchar))
|
||||
} else {
|
||||
// Fallback to sysinfo if we can't read /proc (permissions)
|
||||
let disk_usage = process.disk_usage();
|
||||
(
|
||||
Some(disk_usage.total_read_bytes),
|
||||
Some(disk_usage.total_written_bytes),
|
||||
)
|
||||
};
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
let (read_bytes, write_bytes) = {
|
||||
let disk_usage = process.disk_usage();
|
||||
(
|
||||
Some(disk_usage.total_read_bytes),
|
||||
Some(disk_usage.total_written_bytes),
|
||||
)
|
||||
};
|
||||
|
||||
let working_directory = process.cwd().map(|p| p.to_string_lossy().to_string());
|
||||
let executable_path = process.exe().map(|p| p.to_string_lossy().to_string());
|
||||
|
||||
// Collect child processes using lightweight /proc access
|
||||
// This avoids the expensive system.refresh_processes_specifics(All) call
|
||||
let child_processes = enumerate_child_processes_lightweight(pid, &system);
|
||||
|
||||
// Release the system lock early (automatic when system goes out of scope)
|
||||
drop(system);
|
||||
|
||||
// Collect thread information (Linux only)
|
||||
let threads = collect_thread_info(pid);
|
||||
|
||||
// Now construct the detailed info without holding the lock
|
||||
let detailed_info = DetailedProcessInfo {
|
||||
pid,
|
||||
name,
|
||||
command,
|
||||
cpu_usage,
|
||||
mem_bytes,
|
||||
virtual_mem_bytes,
|
||||
shared_mem_bytes: None, // Not available from sysinfo
|
||||
thread_count,
|
||||
fd_count: None, // Not available from sysinfo on all platforms
|
||||
status,
|
||||
parent_pid,
|
||||
user_id,
|
||||
group_id,
|
||||
start_time,
|
||||
cpu_time_user: get_cpu_time_user(pid),
|
||||
cpu_time_system: get_cpu_time_system(pid),
|
||||
read_bytes,
|
||||
write_bytes,
|
||||
working_directory,
|
||||
executable_path,
|
||||
child_processes,
|
||||
threads,
|
||||
};
|
||||
|
||||
Ok(ProcessMetricsResponse {
|
||||
process: detailed_info,
|
||||
cached_at,
|
||||
})
|
||||
}
|
||||
|
||||
/// Collect journal entries for a specific process
|
||||
pub fn collect_journal_entries(pid: u32) -> Result<JournalResponse, String> {
|
||||
let output = Command::new("journalctl")
|
||||
.args([
|
||||
&format!("_PID={pid}"),
|
||||
"--output=json",
|
||||
"--lines=100",
|
||||
"--no-pager",
|
||||
])
|
||||
.output()
|
||||
.map_err(|e| format!("Failed to execute journalctl: {e}"))?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(format!(
|
||||
"journalctl failed: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
));
|
||||
}
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let mut entries = Vec::new();
|
||||
|
||||
// Parse each line as JSON (journalctl outputs one JSON object per line)
|
||||
for line in stdout.lines() {
|
||||
if line.trim().is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let json: serde_json::Value =
|
||||
serde_json::from_str(line).map_err(|e| format!("Failed to parse journal JSON: {e}"))?;
|
||||
|
||||
// Extract relevant fields
|
||||
let timestamp_str = json
|
||||
.get("__REALTIME_TIMESTAMP")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("0");
|
||||
|
||||
// Convert timestamp to ISO 8601 format
|
||||
let timestamp = if let Ok(ts_micros) = timestamp_str.parse::<u64>() {
|
||||
let ts_secs = ts_micros / 1_000_000;
|
||||
let ts_nanos = (ts_micros % 1_000_000) * 1000;
|
||||
let time = SystemTime::UNIX_EPOCH
|
||||
+ Duration::from_secs(ts_secs)
|
||||
+ Duration::from_nanos(ts_nanos);
|
||||
// Simple ISO 8601 format - we can improve this if needed
|
||||
format!("{time:?}")
|
||||
.replace("SystemTime { tv_sec: ", "")
|
||||
.replace(", tv_nsec: ", ".")
|
||||
.replace(" }", "")
|
||||
} else {
|
||||
timestamp_str.to_string()
|
||||
};
|
||||
|
||||
let priority = match json.get("PRIORITY").and_then(|v| v.as_str()) {
|
||||
Some("0") => LogLevel::Emergency,
|
||||
Some("1") => LogLevel::Alert,
|
||||
Some("2") => LogLevel::Critical,
|
||||
Some("3") => LogLevel::Error,
|
||||
Some("4") => LogLevel::Warning,
|
||||
Some("5") => LogLevel::Notice,
|
||||
Some("6") => LogLevel::Info,
|
||||
Some("7") => LogLevel::Debug,
|
||||
_ => LogLevel::Info,
|
||||
};
|
||||
|
||||
let message = json
|
||||
.get("MESSAGE")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
|
||||
let unit = json
|
||||
.get("_SYSTEMD_UNIT")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let entry_pid = json
|
||||
.get("_PID")
|
||||
.and_then(|v| v.as_str())
|
||||
.and_then(|s| s.parse::<u32>().ok());
|
||||
|
||||
let comm = json
|
||||
.get("_COMM")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let uid = json
|
||||
.get("_UID")
|
||||
.and_then(|v| v.as_str())
|
||||
.and_then(|s| s.parse::<u32>().ok());
|
||||
|
||||
let gid = json
|
||||
.get("_GID")
|
||||
.and_then(|v| v.as_str())
|
||||
.and_then(|s| s.parse::<u32>().ok());
|
||||
|
||||
entries.push(JournalEntry {
|
||||
timestamp,
|
||||
priority,
|
||||
message,
|
||||
unit,
|
||||
pid: entry_pid,
|
||||
comm,
|
||||
uid,
|
||||
gid,
|
||||
});
|
||||
}
|
||||
|
||||
// Sort by timestamp (newest first)
|
||||
entries.sort_by(|a, b| b.timestamp.cmp(&a.timestamp));
|
||||
|
||||
let response_timestamp = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.map_err(|e| format!("Time error: {e}"))?
|
||||
.as_secs();
|
||||
|
||||
let total_count = entries.len() as u32;
|
||||
let truncated = entries.len() >= 100; // We requested 100 lines, so if we got 100, there might be more
|
||||
|
||||
Ok(JournalResponse {
|
||||
entries,
|
||||
total_count,
|
||||
truncated,
|
||||
cached_at: response_timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
@ -63,11 +63,6 @@ pub struct AppState {
|
||||
pub cache_metrics: Arc<Mutex<CacheEntry<crate::types::Metrics>>>,
|
||||
pub cache_disks: Arc<Mutex<CacheEntry<Vec<crate::types::DiskInfo>>>>,
|
||||
pub cache_processes: Arc<Mutex<CacheEntry<crate::types::ProcessesPayload>>>,
|
||||
|
||||
// Process detail caches (per-PID)
|
||||
pub cache_process_metrics:
|
||||
Arc<Mutex<HashMap<u32, CacheEntry<crate::types::ProcessMetricsResponse>>>>,
|
||||
pub cache_journal_entries: Arc<Mutex<HashMap<u32, CacheEntry<crate::types::JournalResponse>>>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@ -76,12 +71,6 @@ pub struct CacheEntry<T> {
|
||||
pub value: Option<T>,
|
||||
}
|
||||
|
||||
impl<T> Default for CacheEntry<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> CacheEntry<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
@ -101,12 +90,6 @@ impl<T> CacheEntry<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AppState {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub fn new() -> Self {
|
||||
let sys = System::new();
|
||||
@ -133,8 +116,6 @@ impl AppState {
|
||||
cache_metrics: Arc::new(Mutex::new(CacheEntry::new())),
|
||||
cache_disks: Arc::new(Mutex::new(CacheEntry::new())),
|
||||
cache_processes: Arc::new(Mutex::new(CacheEntry::new())),
|
||||
cache_process_metrics: Arc::new(Mutex::new(HashMap::new())),
|
||||
cache_journal_entries: Arc::new(Mutex::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,8 +9,6 @@ pub struct DiskInfo {
|
||||
pub name: String,
|
||||
pub total: u64,
|
||||
pub available: u64,
|
||||
pub temperature: Option<f32>,
|
||||
pub is_partition: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
@ -49,76 +47,3 @@ pub struct ProcessesPayload {
|
||||
pub process_count: usize,
|
||||
pub top_processes: Vec<ProcessInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ThreadInfo {
|
||||
pub tid: u32, // Thread ID
|
||||
pub name: String, // Thread name (from /proc/{pid}/task/{tid}/comm)
|
||||
pub cpu_time_user: u64, // User CPU time in microseconds
|
||||
pub cpu_time_system: u64, // System CPU time in microseconds
|
||||
pub status: String, // Thread status (Running, Sleeping, etc.)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct DetailedProcessInfo {
|
||||
pub pid: u32,
|
||||
pub name: String,
|
||||
pub command: String,
|
||||
pub cpu_usage: f32,
|
||||
pub mem_bytes: u64,
|
||||
pub virtual_mem_bytes: u64,
|
||||
pub shared_mem_bytes: Option<u64>,
|
||||
pub thread_count: u32,
|
||||
pub fd_count: Option<u32>,
|
||||
pub status: String,
|
||||
pub parent_pid: Option<u32>,
|
||||
pub user_id: u32,
|
||||
pub group_id: u32,
|
||||
pub start_time: u64, // Unix timestamp
|
||||
pub cpu_time_user: u64, // Microseconds
|
||||
pub cpu_time_system: u64, // Microseconds
|
||||
pub read_bytes: Option<u64>,
|
||||
pub write_bytes: Option<u64>,
|
||||
pub working_directory: Option<String>,
|
||||
pub executable_path: Option<String>,
|
||||
pub child_processes: Vec<DetailedProcessInfo>,
|
||||
pub threads: Vec<ThreadInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ProcessMetricsResponse {
|
||||
pub process: DetailedProcessInfo,
|
||||
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct JournalEntry {
|
||||
pub timestamp: String, // ISO 8601 formatted timestamp
|
||||
pub priority: LogLevel,
|
||||
pub message: String,
|
||||
pub unit: Option<String>, // systemd unit name
|
||||
pub pid: Option<u32>,
|
||||
pub comm: Option<String>, // process command name
|
||||
pub uid: Option<u32>,
|
||||
pub gid: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub enum LogLevel {
|
||||
Emergency = 0,
|
||||
Alert = 1,
|
||||
Critical = 2,
|
||||
Error = 3,
|
||||
Warning = 4,
|
||||
Notice = 5,
|
||||
Info = 6,
|
||||
Debug = 7,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct JournalResponse {
|
||||
pub entries: Vec<JournalEntry>,
|
||||
pub total_count: u32,
|
||||
pub truncated: bool,
|
||||
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||
}
|
||||
|
||||
@ -17,8 +17,6 @@ use crate::proto::pb;
|
||||
use crate::state::AppState;
|
||||
|
||||
// Compression threshold based on typical payload size
|
||||
// Temporarily increased for testing - revert to 768 for production
|
||||
//const COMPRESSION_THRESHOLD: usize = 50_000;
|
||||
const COMPRESSION_THRESHOLD: usize = 768;
|
||||
|
||||
// Reusable buffer for compression to avoid allocations
|
||||
@ -113,90 +111,6 @@ async fn handle_socket(mut socket: WebSocket, state: AppState) {
|
||||
}
|
||||
drop(cache); // Explicit drop to release mutex early
|
||||
}
|
||||
Message::Text(ref text) if text.starts_with("get_process_metrics:") => {
|
||||
if let Some(pid_str) = text.strip_prefix("get_process_metrics:")
|
||||
&& let Ok(pid) = pid_str.parse::<u32>()
|
||||
{
|
||||
let ttl = std::time::Duration::from_millis(250); // 250ms TTL
|
||||
|
||||
// Check cache first
|
||||
{
|
||||
let cache = state.cache_process_metrics.lock().await;
|
||||
if let Some(entry) = cache.get(&pid)
|
||||
&& entry.is_fresh(ttl)
|
||||
&& let Some(cached_response) = entry.get()
|
||||
{
|
||||
let _ = send_json(&mut socket, cached_response).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Collect fresh data
|
||||
match crate::metrics::collect_process_metrics(pid, &state).await {
|
||||
Ok(response) => {
|
||||
// Cache the response
|
||||
{
|
||||
let mut cache = state.cache_process_metrics.lock().await;
|
||||
cache
|
||||
.entry(pid)
|
||||
.or_insert_with(crate::state::CacheEntry::new)
|
||||
.set(response.clone());
|
||||
}
|
||||
let _ = send_json(&mut socket, &response).await;
|
||||
}
|
||||
Err(err) => {
|
||||
let error_response = serde_json::json!({
|
||||
"error": err,
|
||||
"request": "get_process_metrics",
|
||||
"pid": pid
|
||||
});
|
||||
let _ = send_json(&mut socket, &error_response).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Message::Text(ref text) if text.starts_with("get_journal_entries:") => {
|
||||
if let Some(pid_str) = text.strip_prefix("get_journal_entries:")
|
||||
&& let Ok(pid) = pid_str.parse::<u32>()
|
||||
{
|
||||
let ttl = std::time::Duration::from_secs(1); // 1s TTL
|
||||
|
||||
// Check cache first
|
||||
{
|
||||
let cache = state.cache_journal_entries.lock().await;
|
||||
if let Some(entry) = cache.get(&pid)
|
||||
&& entry.is_fresh(ttl)
|
||||
&& let Some(cached_response) = entry.get()
|
||||
{
|
||||
let _ = send_json(&mut socket, cached_response).await;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Collect fresh data
|
||||
match crate::metrics::collect_journal_entries(pid) {
|
||||
Ok(response) => {
|
||||
// Cache the response
|
||||
{
|
||||
let mut cache = state.cache_journal_entries.lock().await;
|
||||
cache
|
||||
.entry(pid)
|
||||
.or_insert_with(crate::state::CacheEntry::new)
|
||||
.set(response.clone());
|
||||
}
|
||||
let _ = send_json(&mut socket, &response).await;
|
||||
}
|
||||
Err(err) => {
|
||||
let error_response = serde_json::json!({
|
||||
"error": err,
|
||||
"request": "get_journal_entries",
|
||||
"pid": pid
|
||||
});
|
||||
let _ = send_json(&mut socket, &error_response).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Message::Close(_) => break,
|
||||
_ => {}
|
||||
}
|
||||
|
||||
@ -1,132 +0,0 @@
|
||||
//! Tests for the process cache functionality
|
||||
|
||||
use socktop_agent::state::{AppState, CacheEntry};
|
||||
use socktop_agent::types::{DetailedProcessInfo, JournalResponse, ProcessMetricsResponse};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_process_cache_ttl() {
|
||||
let state = AppState::new();
|
||||
let pid = 12345;
|
||||
|
||||
// Create mock data
|
||||
let process_info = DetailedProcessInfo {
|
||||
pid,
|
||||
name: "test_process".to_string(),
|
||||
command: "test command".to_string(),
|
||||
cpu_usage: 50.0,
|
||||
mem_bytes: 1024 * 1024,
|
||||
virtual_mem_bytes: 2048 * 1024,
|
||||
shared_mem_bytes: Some(512 * 1024),
|
||||
thread_count: 4,
|
||||
fd_count: Some(10),
|
||||
status: "Running".to_string(),
|
||||
parent_pid: Some(1),
|
||||
user_id: 1000,
|
||||
group_id: 1000,
|
||||
start_time: 1234567890,
|
||||
cpu_time_user: 100000,
|
||||
cpu_time_system: 50000,
|
||||
read_bytes: Some(1024),
|
||||
write_bytes: Some(2048),
|
||||
working_directory: Some("/tmp".to_string()),
|
||||
executable_path: Some("/usr/bin/test".to_string()),
|
||||
child_processes: vec![],
|
||||
threads: vec![],
|
||||
};
|
||||
|
||||
let metrics_response = ProcessMetricsResponse {
|
||||
process: process_info,
|
||||
cached_at: 1234567890,
|
||||
};
|
||||
|
||||
let journal_response = JournalResponse {
|
||||
entries: vec![],
|
||||
total_count: 0,
|
||||
truncated: false,
|
||||
cached_at: 1234567890,
|
||||
};
|
||||
|
||||
// Test process metrics caching
|
||||
{
|
||||
let mut cache = state.cache_process_metrics.lock().await;
|
||||
cache
|
||||
.entry(pid)
|
||||
.or_insert_with(CacheEntry::new)
|
||||
.set(metrics_response.clone());
|
||||
}
|
||||
|
||||
// Should get cached value immediately
|
||||
{
|
||||
let cache = state.cache_process_metrics.lock().await;
|
||||
let ttl = Duration::from_millis(250);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(entry.is_fresh(ttl));
|
||||
assert!(entry.get().is_some());
|
||||
assert_eq!(entry.get().unwrap().process.pid, pid);
|
||||
} else {
|
||||
panic!("Expected cached entry");
|
||||
}
|
||||
}
|
||||
println!("✓ Process metrics cached and retrieved successfully");
|
||||
|
||||
// Test journal entries caching
|
||||
{
|
||||
let mut cache = state.cache_journal_entries.lock().await;
|
||||
cache
|
||||
.entry(pid)
|
||||
.or_insert_with(CacheEntry::new)
|
||||
.set(journal_response.clone());
|
||||
}
|
||||
|
||||
// Should get cached value immediately
|
||||
{
|
||||
let cache = state.cache_journal_entries.lock().await;
|
||||
let ttl = Duration::from_secs(1);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(entry.is_fresh(ttl));
|
||||
assert!(entry.get().is_some());
|
||||
assert_eq!(entry.get().unwrap().total_count, 0);
|
||||
} else {
|
||||
panic!("Expected cached entry");
|
||||
}
|
||||
}
|
||||
println!("✓ Journal entries cached and retrieved successfully");
|
||||
|
||||
// Wait for process metrics to expire (250ms + buffer)
|
||||
sleep(Duration::from_millis(300)).await;
|
||||
|
||||
// Process metrics should be expired now
|
||||
{
|
||||
let cache = state.cache_process_metrics.lock().await;
|
||||
let ttl = Duration::from_millis(250);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(!entry.is_fresh(ttl));
|
||||
}
|
||||
}
|
||||
println!("✓ Process metrics correctly expired after TTL");
|
||||
|
||||
// Journal entries should still be valid (1s TTL)
|
||||
{
|
||||
let cache = state.cache_journal_entries.lock().await;
|
||||
let ttl = Duration::from_secs(1);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(entry.is_fresh(ttl));
|
||||
}
|
||||
}
|
||||
println!("✓ Journal entries still valid within TTL");
|
||||
|
||||
// Wait for journal entries to expire (additional 800ms to reach 1s total)
|
||||
sleep(Duration::from_millis(800)).await;
|
||||
|
||||
// Journal entries should be expired now
|
||||
{
|
||||
let cache = state.cache_journal_entries.lock().await;
|
||||
let ttl = Duration::from_secs(1);
|
||||
if let Some(entry) = cache.get(&pid) {
|
||||
assert!(!entry.is_fresh(ttl));
|
||||
}
|
||||
}
|
||||
println!("✓ Journal entries correctly expired after TTL");
|
||||
}
|
||||
@ -1,89 +0,0 @@
|
||||
//! Tests for process detail collection functionality
|
||||
|
||||
use socktop_agent::metrics::{collect_journal_entries, collect_process_metrics};
|
||||
use socktop_agent::state::AppState;
|
||||
use std::process;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_collect_process_metrics_self() {
|
||||
// Test collecting metrics for our own process
|
||||
let pid = process::id();
|
||||
let state = AppState::new();
|
||||
|
||||
match collect_process_metrics(pid, &state).await {
|
||||
Ok(response) => {
|
||||
assert_eq!(response.process.pid, pid);
|
||||
assert!(!response.process.name.is_empty());
|
||||
// Command might be empty on some systems, so don't assert on it
|
||||
assert!(response.cached_at > 0);
|
||||
println!(
|
||||
"✓ Process metrics collected for PID {}: {} ({})",
|
||||
pid, response.process.name, response.process.command
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
// This might fail if sysinfo can't find the process, which is possible
|
||||
println!("⚠ Warning: Failed to collect process metrics for self: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_collect_journal_entries_self() {
|
||||
// Test collecting journal entries for our own process
|
||||
let pid = process::id();
|
||||
|
||||
match collect_journal_entries(pid) {
|
||||
Ok(response) => {
|
||||
assert!(response.cached_at > 0);
|
||||
println!(
|
||||
"✓ Journal entries collected for PID {}: {} entries",
|
||||
pid, response.total_count
|
||||
);
|
||||
if !response.entries.is_empty() {
|
||||
let entry = &response.entries[0];
|
||||
println!(" Latest entry: {}", entry.message);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// This might fail if journalctl is not available or restricted
|
||||
println!("⚠ Warning: Failed to collect journal entries for self: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_collect_process_metrics_invalid_pid() {
|
||||
// Test with an invalid PID
|
||||
let invalid_pid = 999999;
|
||||
let state = AppState::new();
|
||||
|
||||
match collect_process_metrics(invalid_pid, &state).await {
|
||||
Ok(_) => {
|
||||
println!("⚠ Warning: Unexpectedly found process for invalid PID {invalid_pid}");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Correctly failed for invalid PID {invalid_pid}: {e}");
|
||||
assert!(e.contains("not found"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_collect_journal_entries_invalid_pid() {
|
||||
// Test with an invalid PID - journalctl might still return empty results
|
||||
let invalid_pid = 999999;
|
||||
|
||||
match collect_journal_entries(invalid_pid) {
|
||||
Ok(response) => {
|
||||
println!(
|
||||
"✓ Journal query completed for invalid PID {} (empty result expected): {} entries",
|
||||
invalid_pid, response.total_count
|
||||
);
|
||||
// Should be empty or very few entries
|
||||
}
|
||||
Err(e) => {
|
||||
println!("✓ Journal query failed for invalid PID {invalid_pid}: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,3 +1,4 @@
|
||||
use assert_cmd::prelude::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
@ -16,7 +17,7 @@ fn generates_self_signed_cert_and_key_in_xdg_path() {
|
||||
let xdg = tmpdir.path().to_path_buf();
|
||||
|
||||
// Run the agent once with --enableSSL, short timeout so it exits quickly when killed
|
||||
let mut cmd = Command::new(assert_cmd::cargo::cargo_bin!("socktop_agent"));
|
||||
let mut cmd = Command::cargo_bin("socktop_agent").expect("binary exists");
|
||||
// Bind to an ephemeral port (-p 0) to avoid conflicts/flakes
|
||||
cmd.env("XDG_CONFIG_HOME", &xdg)
|
||||
.arg("--enableSSL")
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "socktop_connector"
|
||||
version = "1.50.0"
|
||||
version = "0.1.6"
|
||||
edition = "2024"
|
||||
license = "MIT"
|
||||
description = "WebSocket connector library for socktop agent communication"
|
||||
|
||||
@ -66,7 +66,7 @@ use tokio_tungstenite::{Connector, connect_async_tls_with_config};
|
||||
use crate::error::{ConnectorError, Result};
|
||||
use crate::types::{AgentRequest, AgentResponse};
|
||||
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||
use crate::types::{DiskInfo, Metrics, ProcessInfo, ProcessesPayload, ProcessMetricsResponse, JournalResponse};
|
||||
use crate::types::{DiskInfo, Metrics, ProcessInfo, ProcessesPayload};
|
||||
#[cfg(feature = "tls")]
|
||||
fn ensure_crypto_provider() {
|
||||
use std::sync::Once;
|
||||
@ -186,18 +186,6 @@ impl SocktopConnector {
|
||||
.ok_or_else(|| ConnectorError::invalid_response("Failed to get processes"))?;
|
||||
Ok(AgentResponse::Processes(processes))
|
||||
}
|
||||
AgentRequest::ProcessMetrics { pid } => {
|
||||
let process_metrics = request_process_metrics(stream, pid)
|
||||
.await
|
||||
.ok_or_else(|| ConnectorError::invalid_response("Failed to get process metrics"))?;
|
||||
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||
}
|
||||
AgentRequest::JournalEntries { pid } => {
|
||||
let journal_entries = request_journal_entries(stream, pid)
|
||||
.await
|
||||
.ok_or_else(|| ConnectorError::invalid_response("Failed to get journal entries"))?;
|
||||
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -449,38 +437,6 @@ async fn request_processes(ws: &mut WsStream) -> Option<ProcessesPayload> {
|
||||
}
|
||||
}
|
||||
|
||||
// Send a "get_process_metrics:{pid}" request and await a JSON ProcessMetricsResponse
|
||||
#[cfg(feature = "networking")]
|
||||
async fn request_process_metrics(ws: &mut WsStream, pid: u32) -> Option<ProcessMetricsResponse> {
|
||||
let request = format!("get_process_metrics:{}", pid);
|
||||
if ws.send(Message::Text(request)).await.is_err() {
|
||||
return None;
|
||||
}
|
||||
match ws.next().await {
|
||||
Some(Ok(Message::Binary(b))) => {
|
||||
gunzip_to_string(&b).ok().and_then(|s| serde_json::from_str::<ProcessMetricsResponse>(&s).ok())
|
||||
}
|
||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<ProcessMetricsResponse>(&json).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// Send a "get_journal_entries:{pid}" request and await a JSON JournalResponse
|
||||
#[cfg(feature = "networking")]
|
||||
async fn request_journal_entries(ws: &mut WsStream, pid: u32) -> Option<JournalResponse> {
|
||||
let request = format!("get_journal_entries:{}", pid);
|
||||
if ws.send(Message::Text(request)).await.is_err() {
|
||||
return None;
|
||||
}
|
||||
match ws.next().await {
|
||||
Some(Ok(Message::Binary(b))) => {
|
||||
gunzip_to_string(&b).ok().and_then(|s| serde_json::from_str::<JournalResponse>(&s).ok())
|
||||
}
|
||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<JournalResponse>(&json).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
// Decompress a gzip-compressed binary frame into a String.
|
||||
/// Unified gzip decompression to string for both networking and WASM
|
||||
#[cfg(any(feature = "networking", feature = "wasm"))]
|
||||
@ -849,20 +805,6 @@ impl SocktopConnector {
|
||||
Ok(AgentResponse::Processes(processes))
|
||||
}
|
||||
}
|
||||
AgentRequest::ProcessMetrics { pid: _ } => {
|
||||
// Parse JSON response for process metrics
|
||||
let process_metrics: ProcessMetricsResponse = serde_json::from_str(&response).map_err(|e| {
|
||||
ConnectorError::serialization_error(format!("Failed to parse process metrics: {e}"))
|
||||
})?;
|
||||
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||
}
|
||||
AgentRequest::JournalEntries { pid: _ } => {
|
||||
// Parse JSON response for journal entries
|
||||
let journal_entries: JournalResponse = serde_json::from_str(&response).map_err(|e| {
|
||||
ConnectorError::serialization_error(format!("Failed to parse journal entries: {e}"))
|
||||
})?;
|
||||
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -6,8 +6,7 @@ use crate::{AgentRequest, AgentResponse};
|
||||
|
||||
#[cfg(feature = "networking")]
|
||||
use crate::networking::{
|
||||
WsStream, connect_to_agent, request_disks, request_journal_entries, request_metrics,
|
||||
request_process_metrics, request_processes,
|
||||
WsStream, connect_to_agent, request_disks, request_metrics, request_processes,
|
||||
};
|
||||
|
||||
#[cfg(all(feature = "wasm", not(feature = "networking")))]
|
||||
@ -73,20 +72,6 @@ impl SocktopConnector {
|
||||
.ok_or_else(|| ConnectorError::invalid_response("Failed to get processes"))?;
|
||||
Ok(AgentResponse::Processes(processes))
|
||||
}
|
||||
AgentRequest::ProcessMetrics { pid } => {
|
||||
let process_metrics =
|
||||
request_process_metrics(stream, pid).await.ok_or_else(|| {
|
||||
ConnectorError::invalid_response("Failed to get process metrics")
|
||||
})?;
|
||||
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||
}
|
||||
AgentRequest::JournalEntries { pid } => {
|
||||
let journal_entries =
|
||||
request_journal_entries(stream, pid).await.ok_or_else(|| {
|
||||
ConnectorError::invalid_response("Failed to get journal entries")
|
||||
})?;
|
||||
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -161,8 +161,7 @@ pub use config::ConnectorConfig;
|
||||
pub use connector_impl::SocktopConnector;
|
||||
pub use error::{ConnectorError, Result};
|
||||
pub use types::{
|
||||
AgentRequest, AgentResponse, DetailedProcessInfo, DiskInfo, GpuInfo, JournalEntry,
|
||||
JournalResponse, LogLevel, Metrics, NetworkInfo, ProcessInfo, ProcessMetricsResponse,
|
||||
AgentRequest, AgentResponse, DiskInfo, GpuInfo, Metrics, NetworkInfo, ProcessInfo,
|
||||
ProcessesPayload,
|
||||
};
|
||||
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
//! WebSocket request handlers for native (non-WASM) environments.
|
||||
|
||||
use crate::networking::WsStream;
|
||||
use crate::types::{JournalResponse, ProcessMetricsResponse};
|
||||
use crate::utils::{gunzip_to_string, gunzip_to_vec, is_gzip};
|
||||
use crate::{DiskInfo, Metrics, ProcessInfo, ProcessesPayload, pb};
|
||||
|
||||
@ -83,36 +82,3 @@ pub async fn request_processes(ws: &mut WsStream) -> Option<ProcessesPayload> {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a "get_process_metrics:{pid}" request and await a JSON ProcessMetricsResponse
|
||||
pub async fn request_process_metrics(
|
||||
ws: &mut WsStream,
|
||||
pid: u32,
|
||||
) -> Option<ProcessMetricsResponse> {
|
||||
let request = format!("get_process_metrics:{pid}");
|
||||
if ws.send(Message::Text(request)).await.is_err() {
|
||||
return None;
|
||||
}
|
||||
match ws.next().await {
|
||||
Some(Ok(Message::Binary(b))) => gunzip_to_string(&b)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<ProcessMetricsResponse>(&s).ok()),
|
||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<ProcessMetricsResponse>(&json).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a "get_journal_entries:{pid}" request and await a JSON JournalResponse
|
||||
pub async fn request_journal_entries(ws: &mut WsStream, pid: u32) -> Option<JournalResponse> {
|
||||
let request = format!("get_journal_entries:{pid}");
|
||||
if ws.send(Message::Text(request)).await.is_err() {
|
||||
return None;
|
||||
}
|
||||
match ws.next().await {
|
||||
Some(Ok(Message::Binary(b))) => gunzip_to_string(&b)
|
||||
.ok()
|
||||
.and_then(|s| serde_json::from_str::<JournalResponse>(&s).ok()),
|
||||
Some(Ok(Message::Text(json))) => serde_json::from_str::<JournalResponse>(&json).ok(),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -15,10 +15,6 @@ pub struct DiskInfo {
|
||||
pub name: String,
|
||||
pub total: u64,
|
||||
pub available: u64,
|
||||
#[serde(default)]
|
||||
pub temperature: Option<f32>,
|
||||
#[serde(default)]
|
||||
pub is_partition: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
@ -77,79 +73,6 @@ pub struct ProcessesPayload {
|
||||
pub top_processes: Vec<ProcessInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ThreadInfo {
|
||||
pub tid: u32, // Thread ID
|
||||
pub name: String, // Thread name (from /proc/{pid}/task/{tid}/comm)
|
||||
pub cpu_time_user: u64, // User CPU time in microseconds
|
||||
pub cpu_time_system: u64, // System CPU time in microseconds
|
||||
pub status: String, // Thread status (Running, Sleeping, etc.)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct DetailedProcessInfo {
|
||||
pub pid: u32,
|
||||
pub name: String,
|
||||
pub command: String,
|
||||
pub cpu_usage: f32,
|
||||
pub mem_bytes: u64,
|
||||
pub virtual_mem_bytes: u64,
|
||||
pub shared_mem_bytes: Option<u64>,
|
||||
pub thread_count: u32,
|
||||
pub fd_count: Option<u32>,
|
||||
pub status: String,
|
||||
pub parent_pid: Option<u32>,
|
||||
pub user_id: u32,
|
||||
pub group_id: u32,
|
||||
pub start_time: u64, // Unix timestamp
|
||||
pub cpu_time_user: u64, // Microseconds
|
||||
pub cpu_time_system: u64, // Microseconds
|
||||
pub read_bytes: Option<u64>,
|
||||
pub write_bytes: Option<u64>,
|
||||
pub working_directory: Option<String>,
|
||||
pub executable_path: Option<String>,
|
||||
pub child_processes: Vec<DetailedProcessInfo>,
|
||||
pub threads: Vec<ThreadInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ProcessMetricsResponse {
|
||||
pub process: DetailedProcessInfo,
|
||||
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct JournalEntry {
|
||||
pub timestamp: String, // ISO 8601 formatted timestamp
|
||||
pub priority: LogLevel,
|
||||
pub message: String,
|
||||
pub unit: Option<String>, // systemd unit name
|
||||
pub pid: Option<u32>,
|
||||
pub comm: Option<String>, // process command name
|
||||
pub uid: Option<u32>,
|
||||
pub gid: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub enum LogLevel {
|
||||
Emergency = 0,
|
||||
Alert = 1,
|
||||
Critical = 2,
|
||||
Error = 3,
|
||||
Warning = 4,
|
||||
Notice = 5,
|
||||
Info = 6,
|
||||
Debug = 7,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct JournalResponse {
|
||||
pub entries: Vec<JournalEntry>,
|
||||
pub total_count: u32,
|
||||
pub truncated: bool,
|
||||
pub cached_at: u64, // Unix timestamp when this data was cached
|
||||
}
|
||||
|
||||
/// Request types that can be sent to the agent
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(tag = "type")]
|
||||
@ -160,10 +83,6 @@ pub enum AgentRequest {
|
||||
Disks,
|
||||
#[serde(rename = "processes")]
|
||||
Processes,
|
||||
#[serde(rename = "process_metrics")]
|
||||
ProcessMetrics { pid: u32 },
|
||||
#[serde(rename = "journal_entries")]
|
||||
JournalEntries { pid: u32 },
|
||||
}
|
||||
|
||||
impl AgentRequest {
|
||||
@ -173,8 +92,6 @@ impl AgentRequest {
|
||||
AgentRequest::Metrics => "get_metrics".to_string(),
|
||||
AgentRequest::Disks => "get_disks".to_string(),
|
||||
AgentRequest::Processes => "get_processes".to_string(),
|
||||
AgentRequest::ProcessMetrics { pid } => format!("get_process_metrics:{pid}"),
|
||||
AgentRequest::JournalEntries { pid } => format!("get_journal_entries:{pid}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -189,8 +106,4 @@ pub enum AgentResponse {
|
||||
Disks(Vec<DiskInfo>),
|
||||
#[serde(rename = "processes")]
|
||||
Processes(ProcessesPayload),
|
||||
#[serde(rename = "process_metrics")]
|
||||
ProcessMetrics(ProcessMetricsResponse),
|
||||
#[serde(rename = "journal_entries")]
|
||||
JournalEntries(JournalResponse),
|
||||
}
|
||||
|
||||
@ -3,10 +3,7 @@
|
||||
use crate::error::{ConnectorError, Result};
|
||||
use crate::pb::Processes;
|
||||
use crate::utils::{gunzip_to_string, gunzip_to_vec, is_gzip, log_debug};
|
||||
use crate::{
|
||||
AgentRequest, AgentResponse, DiskInfo, JournalResponse, Metrics, ProcessInfo,
|
||||
ProcessMetricsResponse, ProcessesPayload,
|
||||
};
|
||||
use crate::{AgentRequest, AgentResponse, DiskInfo, Metrics, ProcessInfo, ProcessesPayload};
|
||||
|
||||
use prost::Message as ProstMessage;
|
||||
use std::cell::RefCell;
|
||||
@ -209,26 +206,6 @@ pub async fn send_request_and_wait(
|
||||
Ok(AgentResponse::Processes(processes))
|
||||
}
|
||||
}
|
||||
AgentRequest::ProcessMetrics { pid: _ } => {
|
||||
// Parse JSON response for process metrics
|
||||
let process_metrics: ProcessMetricsResponse =
|
||||
serde_json::from_str(&response).map_err(|e| {
|
||||
ConnectorError::serialization_error(format!(
|
||||
"Failed to parse process metrics: {e}"
|
||||
))
|
||||
})?;
|
||||
Ok(AgentResponse::ProcessMetrics(process_metrics))
|
||||
}
|
||||
AgentRequest::JournalEntries { pid: _ } => {
|
||||
// Parse JSON response for journal entries
|
||||
let journal_entries: JournalResponse =
|
||||
serde_json::from_str(&response).map_err(|e| {
|
||||
ConnectorError::serialization_error(format!(
|
||||
"Failed to parse journal entries: {e}"
|
||||
))
|
||||
})?;
|
||||
Ok(AgentResponse::JournalEntries(journal_entries))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user