# /app/app/workers/monitor_dashboard.py # docker exec sf_api python -m app.workers.monitor_dashboard import asyncio import os import httpx import pynvml import psutil from datetime import datetime, timedelta from sqlalchemy import text from sqlalchemy.ext.asyncio import create_async_engine from rich.console import Console from rich.table import Table from rich.panel import Panel from rich.live import Live from rich.layout import Layout from rich.text import Text from app.core.config import settings console = Console() # NVIDIA inicializálása try: pynvml.nvmlInit() gpu_available = True except Exception: gpu_available = False async def get_hardware_stats(): """Rendszererőforrások: CPU, RAM és GPU""" stats = { "cpu_usage": psutil.cpu_percent(interval=None), "ram_total": psutil.virtual_memory().total // 1024**2, "ram_used": psutil.virtual_memory().used // 1024**2, "ram_perc": psutil.virtual_memory().percent, "gpu": None } if gpu_available: try: handle = pynvml.nvmlDeviceGetHandleByIndex(0) stats["gpu"] = { "name": pynvml.nvmlDeviceGetName(handle), "temp": pynvml.nvmlDeviceGetTemperature(handle, pynvml.NVML_TEMPERATURE_GPU), "load": pynvml.nvmlDeviceGetUtilizationRates(handle).gpu, "vram_total": pynvml.nvmlDeviceGetMemoryInfo(handle).total // 1024**2, "vram_used": pynvml.nvmlDeviceGetMemoryInfo(handle).used // 1024**2, "power": pynvml.nvmlDeviceGetPowerUsage(handle) / 1000 } except: pass return stats async def get_ollama_models(): try: async with httpx.AsyncClient(timeout=2.0) as client: resp = await client.get("http://ollama:11434/api/ps") if resp.status_code == 200: return [m['name'] for m in resp.json().get("models", [])] except: return ["Ollama Comm Error"] return [] async def get_stats(engine): async with engine.connect() as conn: # 1. Sebesség adatok res_hr = await conn.execute(text("SELECT count(*) FROM data.vehicle_model_definitions WHERE status = 'gold_enriched' AND updated_at > NOW() - INTERVAL '1 hour'")) hr_rate = res_hr.scalar() or 0 res_day = await conn.execute(text("SELECT count(*) FROM data.vehicle_model_definitions WHERE status = 'gold_enriched' AND updated_at > NOW() - INTERVAL '24 hours'")) day_rate = res_day.scalar() or 0 # 2. Pipeline res_pipe = await conn.execute(text(""" SELECT (SELECT count(*) FROM data.catalog_discovery WHERE status = 'pending') as r1, (SELECT count(*) FROM data.vehicle_model_definitions WHERE status = 'unverified') as r2, (SELECT count(*) FROM data.vehicle_model_definitions WHERE status = 'awaiting_ai_synthesis') as r3, (SELECT count(*) FROM data.vehicle_model_definitions WHERE status = 'gold_enriched') as r4 """)) r_counts = res_pipe.fetchone() # 3. TOP 7 res_top = await conn.execute(text("SELECT make, count(*) as qty FROM data.vehicle_model_definitions GROUP BY make ORDER BY qty DESC LIMIT 7")) top_makes = res_top.fetchall() # 4. AKTIVITÁS (3 példány per robot) res_r4 = await conn.execute(text("SELECT make, marketing_name FROM data.vehicle_model_definitions WHERE status = 'gold_enriched' ORDER BY updated_at DESC LIMIT 5")) res_r3 = await conn.execute(text("SELECT make, marketing_name FROM data.vehicle_model_definitions WHERE status = 'ai_synthesis_in_progress' ORDER BY updated_at DESC LIMIT 5")) res_r12 = await conn.execute(text("SELECT make, model FROM data.catalog_discovery WHERE status = 'processing' ORDER BY updated_at DESC LIMIT 5")) hw = await get_hardware_stats() ai = await get_ollama_models() return (hr_rate, day_rate), r_counts, top_makes, (res_r4.fetchall(), res_r3.fetchall(), res_r12.fetchall()), hw, ai def make_layout() -> Layout: layout = Layout() layout.split_column( Layout(name="header", size=3), Layout(name="main", ratio=1), Layout(name="hardware", size=10), # Megnövelt hardver rész Layout(name="footer", size=3) ) layout["main"].split_row( Layout(name="left", ratio=1), Layout(name="right", ratio=2) ) layout["left"].split_column(Layout(name="robot_stats"), Layout(name="inventory")) layout["right"].split_column(Layout(name="live_ops")) return layout def update_dashboard(layout, data): rates, r_counts, top_makes, live_data, hw, ai_models = data r4_list, r3_list, r12_list = live_data # Óra (UTC+1 korrekció) local_time = datetime.now() + timedelta(hours=1) # HEADER (Változatlan) layout["header"].update(Panel( f"🛰️ SENTINEL MISSION CONTROL | [bold yellow]{local_time.strftime('%Y-%m-%d %H:%M:%S')}[/] | AI: [green]{rates[0]}[/] /óra — [cyan]{rates[1]}[/] /nap", style="bold white on blue" )) # ROBOT PIPELINE robot_table = Table(title="🤖 Pipeline Állapot", expand=True, border_style="cyan") robot_table.add_column("Robot", style="bold") robot_table.add_column("Várakozik", justify="right") robot_table.add_row("R1-Hunter", f"{r_counts[0]} db") robot_table.add_row("R2-Researcher", f"{r_counts[1]} db") robot_table.add_row("R3-Alchemist", f"{r_counts[2]} db") robot_table.add_row("R4-Validator", f"{r_counts[3]} db") layout["robot_stats"].update(robot_table) # TOP MÁRKÁK brand_table = Table(title="🚜 Top 7 Márka", expand=True, border_style="magenta") brand_table.add_column("Márka", style="yellow") brand_table.add_column("db", justify="right") for m, q in top_makes: brand_table.add_row(m, str(q)) layout["inventory"].update(brand_table) # LIVE OPS (Bővítve 5-5 példányra) ops_table = Table(title="⚡ Aktuális Folyamatok (Utolsó 3/robot)", expand=True, border_style="green") ops_table.add_column("Robot", width=15) ops_table.add_column("Márka / Típus") for r in r4_list: ops_table.add_row("[gold1]R4-VALIDATOR[/]", f"{r[0]} {r[1] or ''}") ops_table.add_section() for r in r3_list: ops_table.add_row("[medium_purple1]R3-ALCHEMIST[/]", f"{r[0]} {r[1] or ''}") ops_table.add_section() for r in r12_list: ops_table.add_row("[sky_blue1]R1-HUNTER[/]", f"{r[0]} {r[1] or ''}") layout["live_ops"].update(ops_table) # HARDWARE & AI (3 OSZLOPOS ELRENDEZÉS) hw_layout = Layout() hw_layout.split_row(Layout(name="sys"), Layout(name="gpu"), Layout(name="ai")) # 1. Rendszer (CPU/RAM) sys_info = ( f"[bold]CPU Terhelés:[/] [bright_blue]{hw['cpu_usage']}%[/]\n" f"[bold]RAM Használat:[/] [bright_magenta]{hw['ram_perc']}%[/]\n" f"({hw['ram_used']} / {hw['ram_total']} MB)" ) hw_layout["sys"].update(Panel(sys_info, title="💻 System Resources", border_style="bright_blue")) # 2. GPU if hw["gpu"]: g = hw["gpu"] gpu_info = ( f"[bold]{g['name']}[/]\n" f"Load: [green]{g['load']}%[/] | Temp: {g['temp']}°C\n" f"VRAM: {g['vram_used']} / {g['vram_total']} MB" ) else: gpu_info = "[red]NVIDIA GPU not detected[/]" hw_layout["gpu"].update(Panel(gpu_info, title="🔌 GPU Monitor", border_style="orange3")) # 3. AI Models ai_info = "[bold]In Memory (VRAM):[/]\n" + ("\n".join([f"🧠 {m}" for m in ai_models]) if ai_models else "No active models.") hw_layout["ai"].update(Panel(ai_info, title="🤖 AI Stack", border_style="plum1")) layout["hardware"].update(hw_layout) layout["footer"].update(Panel(f"Sentinel v2.5 | Kernel: Stabil | Heartbeat: OK", style="italic grey50")) async def main(): engine = create_async_engine(settings.DATABASE_URL) layout = make_layout() with Live(layout, refresh_per_second=1, screen=True): while True: try: data = await get_stats(engine) update_dashboard(layout, data) except: pass await asyncio.sleep(2) if __name__ == "__main__": asyncio.run(main())