Tool-Calling: LLM entscheidet selbst welche Datenquellen abgefragt werden

This commit is contained in:
root 2026-03-09 14:32:56 +07:00
parent 8b35388de0
commit a217eab970
3 changed files with 237 additions and 64 deletions

View file

@ -130,58 +130,53 @@ def gather_silence() -> str:
return "\n".join(lines) return "\n".join(lines)
def gather_context_for_question(question: str) -> str: def _tool_get_server_metrics(host: str = None) -> str:
"""Sammelt relevanten Kontext für eine Freitext-Frage.""" if host:
q = question.lower() return prometheus_client.format_host_detail(host)
parts = [] return prometheus_client.format_overview()
def _tool_get_server_warnings() -> str:
warnings = prometheus_client.get_warnings()
return "\n".join(warnings) if warnings else "Keine Warnungen — alle Werte normal."
def _tool_get_wordpress_stats() -> str:
cfg = _load_config() cfg = _load_config()
wordpress_client.init(cfg)
return wordpress_client.format_overview(cfg)
if any(w in q for w in ["fehler", "error", "problem", "kaputt", "down"]):
parts.append("=== Aktuelle Fehler ===\n" + gather_errors(hours=2))
if any(w in q for w in ["status", "läuft", "container", "übersicht", "alles"]): def _tool_get_feed_stats() -> str:
parts.append("=== Container Status ===\n" + gather_status()) cfg = _load_config()
ct_109 = config.get_container(cfg, vmid=109)
if not ct_109 or not ct_109.tailscale_ip:
return "RSS Manager nicht erreichbar."
import requests as _req
try:
r = _req.get(f"http://{ct_109.tailscale_ip}:8080/api/feed-stats", timeout=10)
if not r.ok:
return "RSS Manager API Fehler."
stats = r.json()
lines = [f"Artikel heute: {stats['today']}, gestern: {stats['yesterday']}"]
for f in stats.get("feeds", []):
if f["posts_today"] > 0:
lines.append(f" {f['name']}: {f['posts_today']} heute")
return "\n".join(lines)
except Exception as e:
return f"RSS Manager Fehler: {e}"
if any(w in q for w in ["still", "silence", "stumm", "logs"]):
parts.append("=== Stille Hosts ===\n" + gather_silence())
# WordPress-Daten für Blog-Fragen def get_tool_handlers() -> dict:
if any(w in q for w in ["wordpress", "blog", "post", "artikel", "kommentar", "plugin"]): """Registry: Tool-Name -> Handler-Funktion. Wird von llm.ask_with_tools() genutzt."""
wordpress_client.init(cfg) return {
wp_overview = wordpress_client.format_overview(cfg) "get_all_containers": lambda: gather_status(),
parts.append("=== WordPress ===\n" + wp_overview) "get_container_detail": lambda query: gather_container_status(query),
"get_errors": lambda hours=2: gather_errors(hours=hours),
# Prometheus-Metriken für System-Fragen "get_container_logs": lambda container, hours=1: gather_logs(container, hours=hours),
if any(w in q for w in ["cpu", "ram", "speicher", "memory", "disk", "platte", "get_silent_hosts": lambda: gather_silence(),
"festplatte", "auslastung", "load", "uptime", "server", "get_server_metrics": lambda host=None: _tool_get_server_metrics(host),
"metriken", "prometheus", "performance", "ressource"]): "get_server_warnings": lambda: _tool_get_server_warnings(),
host_match = None "get_wordpress_stats": lambda: _tool_get_wordpress_stats(),
for name in ["pve-hetzner", "pve-ka-1", "pve-ka-2", "pve-ka-3", "get_feed_stats": lambda: _tool_get_feed_stats(),
"pve-mu-2", "pve-mu-3", "pve-he"]: }
if name.replace("-", "") in q.replace("-", "").replace(" ", ""):
host_match = name
break
if host_match:
parts.append(f"=== Prometheus {host_match} ===\n" +
prometheus_client.format_host_detail(host_match))
else:
parts.append("=== Prometheus Übersicht ===\n" +
prometheus_client.format_overview())
ct_match = re.search(r'\bct[- ]?(\d{3})\b', q)
if ct_match:
parts.append(f"=== CT {ct_match.group(1)} ===\n" + gather_container_status(ct_match.group(1)))
for name in ["wordpress", "rss", "seafile", "forgejo", "portainer",
"fuenfvoracht", "redax", "flugscanner", "edelmetall"]:
if name in q:
parts.append(f"=== {name} ===\n" + gather_container_status(name))
if not parts:
parts.append("=== Container Status ===\n" + gather_status())
parts.append("=== Aktuelle Fehler ===\n" + gather_errors(hours=1))
warnings = prometheus_client.get_warnings()
if warnings:
parts.append("=== Prometheus Warnungen ===\n" + "\n".join(warnings))
return "\n\n".join(parts)

View file

@ -1,5 +1,10 @@
"""OpenRouter LLM-Wrapper für natürliche Antworten.""" """OpenRouter LLM-Wrapper mit Tool-Calling.
Das LLM entscheidet selbst welche Datenquellen es abfragt.
Neue Datenquelle = Tool-Definition hier + Handler in context.py.
"""
import json
import requests import requests
import os import os
import sys import sys
@ -8,20 +13,145 @@ sys.path.insert(0, os.path.dirname(__file__))
from core import config from core import config
MODEL = "openai/gpt-4o-mini" MODEL = "openai/gpt-4o-mini"
MAX_TOOL_ROUNDS = 3
SYSTEM_PROMPT = """Du bist der Hausmeister-Bot für ein Homelab mit mehreren Proxmox-Servern. SYSTEM_PROMPT = """Du bist der Hausmeister-Bot für ein Homelab mit mehreren Proxmox-Servern.
Du antwortest kurz, präzise und auf Deutsch. Du antwortest kurz, präzise und auf Deutsch.
Du bekommst Live-Daten aus Loki (Logs), Proxmox (Container-Status) und homelab.conf. Du hast Tools um Live-Daten abzufragen. Nutze sie um Fragen zu beantworten.
Wenn alles in Ordnung ist, sag das kurz. Bei Problemen erkläre was los ist und schlage Lösungen vor. Wenn alles in Ordnung ist, sag das kurz. Bei Problemen erkläre was los ist und schlage Lösungen vor.
Nutze Emojis sparsam. Formatiere für Telegram (kein Markdown, nur einfacher Text).""" Nutze Emojis sparsam. Formatiere für Telegram (kein Markdown, nur einfacher Text)."""
TOOLS = [
{
"type": "function",
"function": {
"name": "get_all_containers",
"description": "Status aller Container auf allen Proxmox-Servern (running/stopped, RAM, Uptime)",
"parameters": {"type": "object", "properties": {}, "required": []},
},
},
{
"type": "function",
"function": {
"name": "get_container_detail",
"description": "Detail-Status eines einzelnen Containers. Suche per VMID (z.B. 101) oder Name (z.B. wordpress, rss-manager, forgejo)",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "VMID (z.B. '109') oder Container-Name (z.B. 'wordpress')"}
},
"required": ["query"],
},
},
},
{
"type": "function",
"function": {
"name": "get_errors",
"description": "Aktuelle Fehler-Logs aus Loki (alle Container)",
"parameters": {
"type": "object",
"properties": {
"hours": {"type": "number", "description": "Zeitraum in Stunden (default: 2)", "default": 2}
},
"required": [],
},
},
},
{
"type": "function",
"function": {
"name": "get_container_logs",
"description": "Letzte Logs eines bestimmten Containers aus Loki",
"parameters": {
"type": "object",
"properties": {
"container": {"type": "string", "description": "Hostname des Containers (z.B. 'rss-manager', 'wordpress-v2')"},
"hours": {"type": "number", "description": "Zeitraum in Stunden (default: 1)", "default": 1},
},
"required": ["container"],
},
},
},
{
"type": "function",
"function": {
"name": "get_silent_hosts",
"description": "Welche Hosts senden keine Logs mehr? (Stille-Check)",
"parameters": {"type": "object", "properties": {}, "required": []},
},
},
{
"type": "function",
"function": {
"name": "get_server_metrics",
"description": "CPU, RAM, Disk, Load, Uptime von Proxmox-Servern via Prometheus. Ohne host = alle Server.",
"parameters": {
"type": "object",
"properties": {
"host": {
"type": "string",
"description": "Hostname (pve-hetzner, pve-ka-1, pve-ka-2, pve-ka-3, pve-mu-2, pve-mu-3, pve-he, pbs-mu). Leer = alle.",
}
},
"required": [],
},
},
},
{
"type": "function",
"function": {
"name": "get_server_warnings",
"description": "Nur Warnungen: Server mit CPU>80%, RAM>85% oder Disk>85%",
"parameters": {"type": "object", "properties": {}, "required": []},
},
},
{
"type": "function",
"function": {
"name": "get_wordpress_stats",
"description": "WordPress/Blog-Statistiken: Posts heute/gestern/Woche, offene Kommentare, letzte Artikel, Plugin-Status",
"parameters": {"type": "object", "properties": {}, "required": []},
},
},
{
"type": "function",
"function": {
"name": "get_feed_stats",
"description": "RSS-Feed-Status: Aktive Feeds, Artikel heute/gestern, Fehler",
"parameters": {"type": "object", "properties": {}, "required": []},
},
},
]
def _get_api_key() -> str: def _get_api_key() -> str:
cfg = config.parse_config() cfg = config.parse_config()
return cfg.api_keys.get("openrouter_key", "") return cfg.api_keys.get("openrouter_key", "")
def _call_openrouter(messages: list, api_key: str, use_tools: bool = True) -> dict:
payload = {
"model": MODEL,
"messages": messages,
"max_tokens": 600,
}
if use_tools:
payload["tools"] = TOOLS
payload["tool_choice"] = "auto"
r = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={"Authorization": f"Bearer {api_key}"},
json=payload,
timeout=30,
)
r.raise_for_status()
return r.json()
def ask(question: str, context: str) -> str: def ask(question: str, context: str) -> str:
"""Stellt eine Frage mit Kontext an OpenRouter.""" """Legacy-Funktion fuer /commands die bereits Kontext mitbringen."""
api_key = _get_api_key() api_key = _get_api_key()
if not api_key: if not api_key:
return "OpenRouter API Key fehlt in homelab.conf" return "OpenRouter API Key fehlt in homelab.conf"
@ -30,15 +160,63 @@ def ask(question: str, context: str) -> str:
{"role": "system", "content": SYSTEM_PROMPT}, {"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": f"Kontext (Live-Daten):\n{context}\n\nFrage: {question}"}, {"role": "user", "content": f"Kontext (Live-Daten):\n{context}\n\nFrage: {question}"},
] ]
try: try:
r = requests.post( data = _call_openrouter(messages, api_key, use_tools=False)
"https://openrouter.ai/api/v1/chat/completions", return data["choices"][0]["message"]["content"]
headers={"Authorization": f"Bearer {api_key}"}, except Exception as e:
json={"model": MODEL, "messages": messages, "max_tokens": 500}, return f"LLM-Fehler: {e}"
timeout=30,
)
r.raise_for_status() def ask_with_tools(question: str, tool_handlers: dict) -> str:
return r.json()["choices"][0]["message"]["content"] """Freitext-Frage mit automatischem Tool-Calling.
tool_handlers: dict von tool_name -> callable(**kwargs) -> str
"""
api_key = _get_api_key()
if not api_key:
return "OpenRouter API Key fehlt in homelab.conf"
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": question},
]
try:
for _round in range(MAX_TOOL_ROUNDS):
data = _call_openrouter(messages, api_key, use_tools=True)
choice = data["choices"][0]
msg = choice["message"]
tool_calls = msg.get("tool_calls")
if not tool_calls:
return msg.get("content", "Keine Antwort vom LLM.")
messages.append(msg)
for tc in tool_calls:
fn_name = tc["function"]["name"]
try:
fn_args = json.loads(tc["function"]["arguments"])
except (json.JSONDecodeError, KeyError):
fn_args = {}
handler = tool_handlers.get(fn_name)
if handler:
try:
result = handler(**fn_args)
except Exception as e:
result = f"Fehler bei {fn_name}: {e}"
else:
result = f"Unbekanntes Tool: {fn_name}"
messages.append({
"role": "tool",
"tool_call_id": tc["id"],
"content": str(result)[:3000],
})
data = _call_openrouter(messages, api_key, use_tools=False)
return data["choices"][0]["message"]["content"]
except Exception as e: except Exception as e:
return f"LLM-Fehler: {e}" return f"LLM-Fehler: {e}"

View file

@ -277,8 +277,8 @@ async def handle_message(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
await update.message.reply_text("🤔 Denke nach...") await update.message.reply_text("🤔 Denke nach...")
try: try:
data = context.gather_context_for_question(text) handlers = context.get_tool_handlers()
answer = llm.ask(text, data) answer = llm.ask_with_tools(text, handlers)
await update.message.reply_text(answer[:4000], reply_markup=KEYBOARD) await update.message.reply_text(answer[:4000], reply_markup=KEYBOARD)
except Exception as e: except Exception as e:
log.exception("Fehler bei Freitext") log.exception("Fehler bei Freitext")