diff --git a/WatcherAgent/src/docker/mod.rs b/WatcherAgent/src/docker/mod.rs index c4841ce..6a6ce2d 100644 --- a/WatcherAgent/src/docker/mod.rs +++ b/WatcherAgent/src/docker/mod.rs @@ -159,43 +159,58 @@ impl DockerManager { let container_infos: Vec = container_infos_total .into_iter() - .map(|info| DockerCollectMetricDto { - id: Some(info.container.unwrap().id).unwrap_or("".to_string()), - cpu: info - .cpu - .unwrap() - .cpu_usage_percent - .map(|load| DockerContainerCpuDto { - cpu_load: Some(load), - }) - .unwrap_or(DockerContainerCpuDto { cpu_load: None }), - ram: info - .ram - .unwrap() - .memory_usage_percent - .map(|load| DockerContainerRamDto { - cpu_load: Some(load), - }) - .unwrap_or(DockerContainerRamDto { cpu_load: None }), - network: DockerContainerNetworkDto { - net_in: info - .network - .as_ref() - .unwrap() - .rx_bytes - .map(|bytes| bytes as f64) - .or(Some(0.0)), - net_out: info - .network - .unwrap() - .tx_bytes - .map(|bytes| bytes as f64) - .or(Some(0.0)), - }, + .filter_map(|info| { + // Safely handle container extraction + let container = match info.container { + Some(c) => c, + None => { + eprintln!("Warning: Container info missing container data, skipping"); + return None; + } + }; + + // Safely handle CPU data with defaults + let cpu_dto = if let Some(cpu) = info.cpu { + DockerContainerCpuDto { + cpu_load: cpu.cpu_usage_percent, + } + } else { + DockerContainerCpuDto { cpu_load: None } + }; + + // Safely handle RAM data with defaults + let ram_dto = if let Some(ram) = info.ram { + DockerContainerRamDto { + ram_load: ram.memory_usage_percent, + } + } else { + DockerContainerRamDto { ram_load: None } + }; + + // Safely handle network data with defaults + let network_dto = if let Some(net) = info.network { + DockerContainerNetworkDto { + net_in: net.rx_bytes.map(|bytes| bytes as f64).or(Some(0.0)), + net_out: net.tx_bytes.map(|bytes| bytes as f64).or(Some(0.0)), + } + } else { + DockerContainerNetworkDto { + net_in: Some(0.0), + net_out: Some(0.0), + } + }; + + Some(DockerCollectMetricDto { + id: container.id, + cpu: cpu_dto, + ram: ram_dto, + network: network_dto, + }) }) .collect(); + let dto = DockerMetricDto { - server_id: 0, + server_id: 0, // This should be set by the caller containers: serde_json::to_string(&container_infos)?, }; @@ -207,9 +222,9 @@ impl DockerManager { ) -> Result> { let containers = self.get_containers().await?; let dto = DockerRegistrationDto { - server_id: 0, - //container_count, - containers: serde_json::to_string(&containers)?, + server_id: 0, // This will be set by the caller + containers: serde_json::to_string(&containers) + .unwrap_or_else(|_| "[]".to_string()), // Fallback to empty array }; Ok(dto) diff --git a/WatcherAgent/src/main.rs b/WatcherAgent/src/main.rs index 5db93f6..8a989b6 100644 --- a/WatcherAgent/src/main.rs +++ b/WatcherAgent/src/main.rs @@ -153,7 +153,13 @@ async fn main() -> Result<(), Box> { let docker_manager = docker_manager.as_ref().cloned().unwrap(); async move { let mut collector = metrics::Collector::new(server_id, ip, docker_manager); - collector.run(&server_url).await + if let Err(e) = collector.run(&server_url).await { + eprintln!("Metrics collection error: {}", e); + // Don't panic, just return the error + Err(e) + } else { + Ok(()) + } } }); diff --git a/WatcherAgent/src/models.rs b/WatcherAgent/src/models.rs index 5c70080..b44df56 100644 --- a/WatcherAgent/src/models.rs +++ b/WatcherAgent/src/models.rs @@ -235,7 +235,7 @@ pub struct DockerContainerCpuDto { #[derive(Debug, Serialize, Clone)] pub struct DockerContainerRamDto { - pub cpu_load: Option, + pub ram_load: Option, } #[derive(Debug, Serialize, Clone)]