Files
watcher/Tests/populate_testdata.py
2025-11-06 20:09:51 +01:00

413 lines
15 KiB
Python

#!/usr/bin/env python3
"""
Watcher Database Test Data Population Script
Füllt die SQLite-Datenbank mit realistischen Testdaten für lokale Entwicklung
"""
import sqlite3
import random
from datetime import datetime, timedelta
import os
# Datenbankpfad
DB_PATH = "Watcher\persistence\watcher.db"
# Prüfe ob Datenbank existiert
if not os.path.exists(DB_PATH):
print(f"❌ Datenbank nicht gefunden: {DB_PATH}")
print("Bitte stelle sicher, dass die Anwendung einmal gestartet wurde, um die Datenbank zu erstellen.")
exit(1)
print(f"📊 Verbinde mit Datenbank: {DB_PATH}")
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
# Lösche vorhandene Daten (optional - auskommentieren wenn bestehende Daten behalten werden sollen)
print("\n🗑️ Lösche vorhandene Testdaten...")
cursor.execute("DELETE FROM ContainerMetrics")
cursor.execute("DELETE FROM Metrics")
cursor.execute("DELETE FROM LogEvents")
cursor.execute("DELETE FROM Containers")
cursor.execute("DELETE FROM Images")
cursor.execute("DELETE FROM Tags")
cursor.execute("DELETE FROM Servers")
# Users werden NICHT gelöscht
conn.commit()
print("✅ Alte Daten gelöscht (Users bleiben erhalten)\n")
# ============================================================================
# 2. SERVERS - Test-Server erstellen
# ============================================================================
print("\n🖥️ Erstelle Server...")
servers_data = [
{
"name": "Production-Web-01",
"ip": "192.168.1.10",
"type": "Ubuntu 22.04",
"description": "Haupt-Webserver für Production",
"cpu_type": "Intel Core i7-12700K",
"cpu_cores": 12,
"gpu_type": None,
"ram_size": 34359738368, # 32 GB in Bytes
"disk_space": "512 GB NVMe SSD",
"is_online": True
},
{
"name": "Dev-Server",
"ip": "192.168.1.20",
"type": "Debian 12",
"description": "Entwicklungs- und Testserver",
"cpu_type": "AMD Ryzen 9 5900X",
"cpu_cores": 12,
"gpu_type": None,
"ram_size": 68719476736, # 64 GB in Bytes
"disk_space": "1 TB NVMe SSD",
"is_online": True
},
{
"name": "GPU-Server-ML",
"ip": "192.168.1.30",
"type": "Ubuntu 22.04 LTS",
"description": "Machine Learning Training Server",
"cpu_type": "AMD Ryzen Threadripper 3970X",
"cpu_cores": 32,
"gpu_type": "NVIDIA RTX 4090",
"ram_size": 137438953472, # 128 GB in Bytes
"disk_space": "2 TB NVMe SSD",
"is_online": True
},
{
"name": "Backup-Server",
"ip": "192.168.1.40",
"type": "Ubuntu 20.04",
"description": "Backup und Storage Server",
"cpu_type": "Intel Xeon E5-2680 v4",
"cpu_cores": 14,
"gpu_type": None,
"ram_size": 17179869184, # 16 GB in Bytes
"disk_space": "10 TB HDD RAID5",
"is_online": False
},
{
"name": "Docker-Host-01",
"ip": "192.168.1.50",
"type": "Ubuntu 22.04",
"description": "Docker Container Host",
"cpu_type": "Intel Xeon Gold 6248R",
"cpu_cores": 24,
"gpu_type": None,
"ram_size": 68719476736, # 64 GB in Bytes
"disk_space": "2 TB NVMe SSD",
"is_online": True
}
]
server_ids = []
for server in servers_data:
last_seen = datetime.utcnow() - timedelta(minutes=random.randint(0, 30)) if server["is_online"] else datetime.utcnow() - timedelta(hours=random.randint(2, 48))
cursor.execute("""
INSERT INTO Servers (
Name, IPAddress, Type, Description,
CpuType, CpuCores, GpuType, RamSize, DiskSpace,
CPU_Load_Warning, CPU_Load_Critical,
CPU_Temp_Warning, CPU_Temp_Critical,
RAM_Load_Warning, RAM_Load_Critical,
GPU_Load_Warning, GPU_Load_Critical,
GPU_Temp_Warning, GPU_Temp_Critical,
Disk_Usage_Warning, Disk_Usage_Critical,
DISK_Temp_Warning, DISK_Temp_Critical,
CreatedAt, IsOnline, LastSeen, IsVerified
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
server["name"], server["ip"], server["type"], server["description"],
server["cpu_type"], server["cpu_cores"], server["gpu_type"], server["ram_size"], server["disk_space"],
75.0, 90.0, # CPU Load
80.0, 90.0, # CPU Temp
85.0, 95.0, # RAM Load
75.0, 90.0, # GPU Load
70.0, 80.0, # GPU Temp
75.0, 90.0, # Disk Usage
34.0, 36.0, # Disk Temp
datetime.utcnow() - timedelta(days=random.randint(30, 365)),
server["is_online"], last_seen, True
))
server_ids.append(cursor.lastrowid)
print(f" ✓ Server '{server['name']}' erstellt (ID: {cursor.lastrowid})")
conn.commit()
# ============================================================================
# 3. METRICS - Server-Metriken erstellen (letzte 48 Stunden)
# ============================================================================
print("\n📈 Erstelle Server-Metriken (letzte 48 Stunden)...")
metrics_count = 0
for server_id in server_ids:
# Finde den Server
cursor.execute("SELECT IsOnline FROM Servers WHERE Id = ?", (server_id,))
is_online = cursor.fetchone()[0]
if not is_online:
continue # Keine Metriken für offline Server
# Erstelle Metriken für die letzten 48 Stunden (alle 5 Minuten)
start_time = datetime.utcnow() - timedelta(hours=48)
current_time = start_time
# Basis-Werte für realistische Schwankungen
base_cpu = random.uniform(20, 40)
base_ram = random.uniform(40, 60)
base_gpu = random.uniform(10, 30) if server_id == server_ids[2] else 0 # Nur GPU-Server
while current_time <= datetime.utcnow():
# Realistische Schwankungen
cpu_load = max(0, min(100, base_cpu + random.gauss(0, 15)))
cpu_temp = 30 + (cpu_load * 0.5) + random.gauss(0, 3)
ram_load = max(0, min(100, base_ram + random.gauss(0, 10)))
gpu_load = max(0, min(100, base_gpu + random.gauss(0, 20))) if base_gpu > 0 else 0
gpu_temp = 25 + (gpu_load * 0.6) + random.gauss(0, 3) if gpu_load > 0 else 0
gpu_vram_usage = gpu_load * 0.8 if gpu_load > 0 else 0
disk_usage = random.uniform(40, 75)
disk_temp = random.uniform(28, 35)
net_in = random.uniform(1000000, 10000000) # 1-10 Mbps in Bits
net_out = random.uniform(500000, 5000000) # 0.5-5 Mbps in Bits
cursor.execute("""
INSERT INTO Metrics (
ServerId, Timestamp,
CPU_Load, CPU_Temp,
GPU_Load, GPU_Temp, GPU_Vram_Size, GPU_Vram_Usage,
RAM_Size, RAM_Load,
DISK_Size, DISK_Usage, DISK_Temp,
NET_In, NET_Out
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
server_id, current_time,
cpu_load, cpu_temp,
gpu_load, gpu_temp, 24.0, gpu_vram_usage, # 24 GB VRAM
64.0, ram_load,
512.0, disk_usage, disk_temp,
net_in, net_out
))
metrics_count += 1
current_time += timedelta(minutes=5)
print(f" ✓ Server {server_id}: {metrics_count} Metriken erstellt")
conn.commit()
print(f"✅ Insgesamt {metrics_count} Metriken erstellt")
# ============================================================================
# 4. IMAGES - Docker Images
# ============================================================================
print("\n🐳 Erstelle Docker Images...")
images_data = [
("nginx", "latest"),
("nginx", "alpine"),
("postgres", "15"),
("postgres", "14-alpine"),
("redis", "7-alpine"),
("node", "18-alpine"),
("python", "3.11-slim"),
("mysql", "8.0"),
("traefik", "v2.10"),
("portainer", "latest")
]
image_ids = {}
for name, tag in images_data:
cursor.execute("""
INSERT INTO Images (Name, Tag)
VALUES (?, ?)
""", (name, tag))
image_ids[f"{name}:{tag}"] = cursor.lastrowid
print(f" ✓ Image '{name}:{tag}' erstellt")
conn.commit()
# ============================================================================
# 5. CONTAINERS - Docker Container
# ============================================================================
print("\n📦 Erstelle Docker Container...")
# Nur für Server, die online sind
online_server_ids = [sid for sid in server_ids[:2]] # Erste 2 Server haben Container
containers_data = [
# Production-Web-01
("nginx-web", "abc123def456", "nginx:latest", online_server_ids[0], True),
("postgres-db", "def456ghi789", "postgres:15", online_server_ids[0], True),
("redis-cache", "ghi789jkl012", "redis:7-alpine", online_server_ids[0], True),
("traefik-proxy", "jkl012mno345", "traefik:v2.10", online_server_ids[0], True),
# Dev-Server
("dev-nginx", "mno345pqr678", "nginx:alpine", online_server_ids[1], True),
("dev-postgres", "pqr678stu901", "postgres:14-alpine", online_server_ids[1], False),
("dev-redis", "stu901vwx234", "redis:7-alpine", online_server_ids[1], True),
("test-app", "vwx234yz567", "node:18-alpine", online_server_ids[1], True),
("portainer", "yz567abc890", "portainer:latest", online_server_ids[1], True),
]
container_ids = []
for name, container_id, image, server_id, is_running in containers_data:
cursor.execute("""
INSERT INTO Containers (Name, ContainerId, Image, ServerId, IsRunning)
VALUES (?, ?, ?, ?, ?)
""", (name, container_id, image, server_id, is_running))
container_ids.append(cursor.lastrowid)
status = "🟢 Running" if is_running else "🔴 Stopped"
print(f" ✓ Container '{name}' erstellt - {status}")
conn.commit()
# ============================================================================
# 6. CONTAINER METRICS - Container-Metriken (letzte 24 Stunden)
# ============================================================================
print("\n📊 Erstelle Container-Metriken (letzte 24 Stunden)...")
container_metrics_count = 0
for container_id in container_ids:
# Prüfe ob Container läuft
cursor.execute("SELECT IsRunning FROM Containers WHERE Id = ?", (container_id,))
is_running = cursor.fetchone()[0]
if not is_running:
continue # Keine Metriken für gestoppte Container
# Erstelle Metriken für die letzten 24 Stunden (alle 5 Minuten)
start_time = datetime.utcnow() - timedelta(hours=24)
current_time = start_time
# Basis-Werte für Container (meist niedriger als Host)
base_cpu = random.uniform(5, 15)
base_ram = random.uniform(10, 30)
while current_time <= datetime.utcnow():
cpu_load = max(0, min(100, base_cpu + random.gauss(0, 8)))
cpu_temp = 30 + (cpu_load * 0.5) + random.gauss(0, 2)
ram_load = max(0, min(100, base_ram + random.gauss(0, 5)))
ram_size = random.uniform(0.5, 4.0) # Container nutzen weniger RAM
cursor.execute("""
INSERT INTO ContainerMetrics (
ContainerId, Timestamp,
CPU_Load, CPU_Temp,
RAM_Size, RAM_Load
) VALUES (?, ?, ?, ?, ?, ?)
""", (
container_id, current_time,
cpu_load, cpu_temp,
ram_size, ram_load
))
container_metrics_count += 1
current_time += timedelta(minutes=5)
conn.commit()
print(f"✅ Insgesamt {container_metrics_count} Container-Metriken erstellt")
# ============================================================================
# 7. LOG EVENTS - Log-Einträge erstellen
# ============================================================================
print("\n📝 Erstelle Log Events...")
log_messages = [
("Info", "Server erfolgreich gestartet", None, None),
("Info", "Backup abgeschlossen", server_ids[3], None),
("Warning", "CPU-Auslastung über 80%", server_ids[0], None),
("Info", "Container gestartet", server_ids[0], container_ids[0]),
("Error", "Datenbank-Verbindung fehlgeschlagen", server_ids[1], container_ids[5]),
("Warning", "Speicherplatz unter 25%", server_ids[1], None),
("Info", "Update installiert", server_ids[2], None),
("Info", "Container neu gestartet", server_ids[0], container_ids[2]),
("Warning", "GPU-Temperatur über 75°C", server_ids[2], None),
("Info", "Netzwerk-Check erfolgreich", server_ids[0], None),
]
for level, message, server_id, container_id in log_messages:
timestamp = datetime.utcnow() - timedelta(hours=random.randint(0, 48))
cursor.execute("""
INSERT INTO LogEvents (Timestamp, Message, Level, ServerId, ContainerId)
VALUES (?, ?, ?, ?, ?)
""", (timestamp, message, level, server_id, container_id))
print(f" ✓ Log: [{level}] {message}")
conn.commit()
# ============================================================================
# 8. TAGS - Tags für Server/Container
# ============================================================================
print("\n🏷️ Erstelle Tags...")
tags_data = ["production", "development", "backup", "docker", "monitoring", "critical"]
for tag_name in tags_data:
cursor.execute("""
INSERT INTO Tags (Name)
VALUES (?)
""", (tag_name,))
print(f" ✓ Tag '{tag_name}' erstellt")
conn.commit()
# ============================================================================
# Abschluss
# ============================================================================
print("\n" + "="*60)
print("✅ Testdaten erfolgreich erstellt!")
print("="*60)
# Statistiken ausgeben
cursor.execute("SELECT COUNT(*) FROM Servers")
server_count = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM Containers")
container_count = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM Metrics")
metrics_count = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM ContainerMetrics")
container_metrics_count = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM LogEvents")
log_count = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM Images")
image_count = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM Tags")
tag_count = cursor.fetchone()[0]
print(f"""
📊 STATISTIK:
🖥️ Server: {server_count}
📦 Container: {container_count}
📈 Server-Metriken: {metrics_count}
📊 Container-Metriken: {container_metrics_count}
📝 Log Events: {log_count}
🐳 Images: {image_count}
🏷️ Tags: {tag_count}
💡 HINWEIS:
- User-Tabelle wurde nicht verändert
- Metriken wurden für die letzten 48 Stunden generiert
- Server 'Backup-Server' ist offline (für Tests)
- Container 'dev-postgres' ist gestoppt (für Tests)
- Die Datenbank befindet sich unter: {DB_PATH}
""")
# Verbindung schließen
conn.close()
print("🔒 Datenbankverbindung geschlossen\n")