All checks were successful
Rust Cross-Platform Build / Detect Rust Project (push) Successful in 5s
Rust Cross-Platform Build / Run Tests (push) Successful in 1m14s
Rust Cross-Platform Build / Build (x86_64-unknown-linux-gnu) (push) Successful in 3m18s
Rust Cross-Platform Build / Build (x86_64-pc-windows-gnu) (push) Successful in 4m4s
Rust Cross-Platform Build / Set Tag Name (push) Successful in 5s
Rust Cross-Platform Build / Build and Push Docker Image (push) Successful in 2m19s
Rust Cross-Platform Build / Workflow Summary (push) Successful in 2s
Rust Cross-Platform Build / Create Tag (push) Successful in 5s
176 lines
6.7 KiB
Rust
176 lines
6.7 KiB
Rust
/// # WatcherAgent
|
|
///
|
|
/// **WatcherAgent** is a cross-platform system monitoring agent written in Rust.
|
|
///
|
|
/// ## Overview
|
|
/// This agent collects real-time hardware metrics (CPU, GPU, RAM, disk, network) and communicates with a backend server for registration, reporting, and remote control. It is designed for deployment in environments where automated monitoring and remote management of system resources is required.
|
|
///
|
|
/// ## Features
|
|
/// - **Hardware Metrics:** Collects CPU, GPU, RAM, disk, and network statistics using platform-specific APIs.
|
|
/// - **Docker Integration:** Detects and manages its own Docker container, supports image updates and container restarts.
|
|
/// - **Server Communication:** Registers with a backend server, sends periodic heartbeats, and reports metrics securely.
|
|
/// - **Remote Commands:** Listens for and executes commands from the backend (e.g., update image, restart container, stop agent).
|
|
///
|
|
/// ## Modules
|
|
/// - [`api`]: Handles HTTP communication with the backend server (registration, heartbeat, metrics, commands).
|
|
/// - [`hardware`]: Collects hardware metrics from the host system (CPU, GPU, RAM, disk, network).
|
|
/// - [`metrics`]: Orchestrates metric collection and reporting.
|
|
/// - [`models`]: Defines data structures for server communication and metrics.
|
|
/// - [`docker`]: Integrates with Docker for container management and agent lifecycle.
|
|
///
|
|
/// ## Usage
|
|
/// Run the agent with the backend server URL as an argument:
|
|
/// ```sh
|
|
/// watcheragent <server-url>
|
|
/// ```
|
|
///
|
|
/// The agent will register itself, start collecting metrics, and listen for remote commands.
|
|
pub mod api;
|
|
pub mod docker;
|
|
pub mod hardware;
|
|
pub mod metrics;
|
|
pub mod models;
|
|
|
|
use bollard::Docker;
|
|
use std::env;
|
|
use std::error::Error;
|
|
use std::sync::Arc;
|
|
use tokio::task::JoinHandle;
|
|
|
|
/// Awaits a spawned asynchronous task and flattens its nested `Result` type.
|
|
///
|
|
/// This utility is used to handle the result of a `tokio::spawn`ed task that itself returns a `Result`,
|
|
/// propagating any errors from both the task and its execution.
|
|
///
|
|
/// # Type Parameters
|
|
/// * `T` - The type returned by the task on success.
|
|
///
|
|
/// # Arguments
|
|
/// * `handle` - The `JoinHandle` of the spawned task.
|
|
///
|
|
/// # Returns
|
|
/// * `Result<T, Box<dyn Error + Send + Sync>>` - The result of the task, or an error if the task failed or panicked.
|
|
async fn flatten<T>(
|
|
handle: JoinHandle<Result<T, Box<dyn Error + Send + Sync>>>,
|
|
) -> Result<T, Box<dyn Error + Send + Sync>> {
|
|
match handle.await {
|
|
Ok(Ok(result)) => Ok(result),
|
|
Ok(Err(err)) => Err(err),
|
|
Err(_err) => Err("handling failed".into()),
|
|
}
|
|
}
|
|
|
|
/// Main entry point for the WatcherAgent application.
|
|
///
|
|
/// This function performs the following steps:
|
|
/// 1. Initializes the Docker client for container management.
|
|
/// 2. Detects the current running image version.
|
|
/// 3. Parses command-line arguments to obtain the backend server URL.
|
|
/// 4. Registers the agent with the backend server and retrieves its server ID and IP address.
|
|
/// 5. Spawns background tasks for:
|
|
/// - Listening for remote commands from the server
|
|
/// - Sending periodic heartbeat signals
|
|
/// - Collecting and reporting hardware metrics
|
|
/// 6. Waits for all background tasks to complete and logs their results.
|
|
///
|
|
/// # Arguments
|
|
/// * `server-url` - The URL of the backend server to register and report metrics to (passed as a command-line argument).
|
|
///
|
|
/// # Errors
|
|
/// Returns an error if registration or any background task fails, or if required arguments are missing.
|
|
#[tokio::main]
|
|
async fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
|
|
// Parse command-line arguments
|
|
let args: Vec<String> = env::args().collect();
|
|
if args.len() < 2 {
|
|
eprintln!("Usage: {} <server-url>", args[0]);
|
|
return Err("Missing server URL argument".into());
|
|
}
|
|
let server_url = &args[1];
|
|
println!("Server URL: {:?}", server_url);
|
|
|
|
// Registration with backend server
|
|
let (server_id, ip) = match api::register_with_server(&server_url).await {
|
|
Ok((id, ip)) => {
|
|
println!("Registered with server. ID: {}, IP: {}", id, ip);
|
|
(id, ip)
|
|
}
|
|
Err(e) => {
|
|
eprintln!("Fehler bei der Registrierung am Server: {e}");
|
|
return Err(e);
|
|
}
|
|
};
|
|
|
|
// Initialize Docker (optional - agent can run without Docker)
|
|
let docker_manager = docker::DockerManager::new_optional();
|
|
|
|
// Get current image version
|
|
let client_version = if let Some(ref docker_manager) = docker_manager {
|
|
docker_manager.get_client_version().await
|
|
} else {
|
|
"unknown".to_string()
|
|
};
|
|
println!("Client Version: {}", client_version);
|
|
|
|
// Prepare Docker registration DTO
|
|
let container_dto = if let Some(ref docker_manager) = docker_manager {
|
|
docker_manager.create_registration_dto().await?
|
|
} else {
|
|
models::DockerRegistrationDto {
|
|
server_id: 0,
|
|
//container_count: 0, --- IGNORE ---
|
|
containers: "[]".to_string(),
|
|
}
|
|
};
|
|
let _ =
|
|
api::broadcast_docker_containers(server_url, server_id, &mut container_dto.clone()).await?;
|
|
|
|
// Start background tasks
|
|
// Start server listening for commands (only if Docker is available)
|
|
let listening_handle = if let Some(ref docker_manager) = docker_manager {
|
|
tokio::spawn({
|
|
let docker = docker_manager.docker.clone();
|
|
let server_url = server_url.to_string();
|
|
async move { api::listening_to_server(&docker, &server_url).await }
|
|
})
|
|
} else {
|
|
println!("Docker not available, skipping server command listener.");
|
|
tokio::spawn(async { Ok(()) }) // Dummy task
|
|
};
|
|
|
|
// Start heartbeat in background
|
|
let heartbeat_handle = tokio::spawn({
|
|
let ip = ip.clone();
|
|
let server_url = server_url.to_string();
|
|
async move { api::heartbeat_loop(&server_url, &ip).await }
|
|
});
|
|
|
|
// Main metrics loop
|
|
println!("Starting metrics collection...");
|
|
let metrics_handle = tokio::spawn({
|
|
let ip = ip.clone();
|
|
let server_url = server_url.to_string();
|
|
let docker_manager = docker_manager.as_ref().cloned().unwrap();
|
|
async move {
|
|
let mut collector = metrics::Collector::new(server_id, ip, docker_manager);
|
|
collector.run(&server_url).await
|
|
}
|
|
});
|
|
|
|
// Wait for all tasks and check for errors
|
|
let (listening_result, heartbeat_result, metrics_result) = tokio::try_join!(
|
|
flatten(listening_handle),
|
|
flatten(heartbeat_handle),
|
|
flatten(metrics_handle)
|
|
)?;
|
|
|
|
println!(
|
|
"All tasks completed: listening={:?}, heartbeat={:?}, metrics={:?}",
|
|
listening_result, heartbeat_result, metrics_result
|
|
);
|
|
|
|
println!("All tasks completed successfully.");
|
|
|
|
Ok(())
|
|
}
|