feat: deep_research Tool fuer Hausmeister-Bot
Open Deep Research (CT 121) als neues LLM-Tool. - LangGraph API auf 10.10.10.121:2024 - SearXNG + OpenRouter als Backend - Polling-basiert (2-10 Min pro Recherche) - Report wird fuer Telegram gekuerzt
This commit is contained in:
parent
394a6af54d
commit
3f245ffea9
1 changed files with 135 additions and 0 deletions
135
homelab-ai-bot/tools/deep_research.py
Normal file
135
homelab-ai-bot/tools/deep_research.py
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
"""Deep Research Tool — Open Deep Research (CT 121) via LangGraph API.
|
||||
|
||||
Ermoeglicht dem Hausmeister-Bot tiefe Web-Recherchen zu starten.
|
||||
Ergebnisse kommen als ausfuehrlicher Report zurueck.
|
||||
"""
|
||||
|
||||
import requests
|
||||
import time
|
||||
import json
|
||||
|
||||
DEEP_RESEARCH_URL = "http://10.10.10.121:2024"
|
||||
ASSISTANT_ID = "e9a5370f-7a53-55a8-ada8-6ab9ef15bb5b"
|
||||
RESEARCH_MODEL = "openai/gpt-4o-mini"
|
||||
POLL_INTERVAL = 10
|
||||
MAX_WAIT = 600
|
||||
|
||||
SYSTEM_PROMPT_EXTRA = """DEEP RESEARCH:
|
||||
Du hast Zugriff auf deep_research — eine KI-gestuetzte Tiefenrecherche die 20-30 Quellen durchsucht.
|
||||
Nutze es wenn der User eine komplexe Frage stellt die gruendliche Recherche erfordert.
|
||||
Beispiele: "Recherchiere X", "Finde heraus...", "Vergleiche A und B", "Was gibt es Neues zu...".
|
||||
NICHT fuer einfache Fakten oder Homelab-Fragen — dafuer reichen die anderen Tools.
|
||||
Das Ergebnis ist ein ausfuehrlicher Report. Fasse ihn fuer Telegram zusammen (max ~3000 Zeichen).
|
||||
deep_research dauert 2-10 Minuten. Sage dem User dass es laeuft."""
|
||||
|
||||
TOOLS = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "deep_research",
|
||||
"description": "Startet eine tiefe Web-Recherche zu einem Thema. Durchsucht 20-30 Quellen und erstellt einen ausfuehrlichen Report. Dauert 2-10 Minuten.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Die Recherche-Frage, moeglichst spezifisch formuliert."
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def _create_thread():
|
||||
r = requests.post(f"{DEEP_RESEARCH_URL}/threads",
|
||||
json={}, timeout=10)
|
||||
r.raise_for_status()
|
||||
return r.json()["thread_id"]
|
||||
|
||||
|
||||
def _start_run(thread_id, query):
|
||||
payload = {
|
||||
"assistant_id": ASSISTANT_ID,
|
||||
"input": {
|
||||
"messages": [{"role": "user", "content": query}]
|
||||
},
|
||||
"config": {
|
||||
"configurable": {
|
||||
"summarization_model": f"openai:{RESEARCH_MODEL}",
|
||||
"research_model": f"openai:{RESEARCH_MODEL}",
|
||||
"compression_model": f"openai:{RESEARCH_MODEL}",
|
||||
"final_report_model": f"openai:{RESEARCH_MODEL}",
|
||||
}
|
||||
}
|
||||
}
|
||||
r = requests.post(f"{DEEP_RESEARCH_URL}/threads/{thread_id}/runs",
|
||||
json=payload, timeout=30)
|
||||
r.raise_for_status()
|
||||
return r.json()["run_id"]
|
||||
|
||||
|
||||
def _poll_run(thread_id, run_id):
|
||||
elapsed = 0
|
||||
while elapsed < MAX_WAIT:
|
||||
time.sleep(POLL_INTERVAL)
|
||||
elapsed += POLL_INTERVAL
|
||||
r = requests.get(
|
||||
f"{DEEP_RESEARCH_URL}/threads/{thread_id}/runs/{run_id}",
|
||||
timeout=10)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
status = data.get("status", "unknown")
|
||||
if status == "success":
|
||||
return True, None
|
||||
if status in ("error", "failed"):
|
||||
return False, data.get("error", "Unbekannter Fehler")
|
||||
if status == "interrupted":
|
||||
return False, "Research wurde unterbrochen"
|
||||
return False, f"Timeout nach {MAX_WAIT}s"
|
||||
|
||||
|
||||
def _get_result(thread_id):
|
||||
r = requests.get(f"{DEEP_RESEARCH_URL}/threads/{thread_id}/state",
|
||||
timeout=30)
|
||||
r.raise_for_status()
|
||||
state = r.json()
|
||||
|
||||
values = state.get("values", {})
|
||||
messages = values.get("messages", [])
|
||||
|
||||
for msg in reversed(messages):
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, str) and len(content) > 200:
|
||||
return content
|
||||
|
||||
return "Kein Report generiert."
|
||||
|
||||
|
||||
def handle_deep_research(query: str, **kw):
|
||||
try:
|
||||
thread_id = _create_thread()
|
||||
run_id = _start_run(thread_id, query)
|
||||
|
||||
ok, error = _poll_run(thread_id, run_id)
|
||||
if not ok:
|
||||
return f"Deep Research fehlgeschlagen: {error}"
|
||||
|
||||
report = _get_result(thread_id)
|
||||
|
||||
if len(report) > 6000:
|
||||
report = report[:6000] + "\n\n[... Report gekuerzt, Original war laenger]"
|
||||
|
||||
return report
|
||||
|
||||
except requests.ConnectionError:
|
||||
return "Deep Research (CT 121) nicht erreichbar. Service laeuft moeglicherweise nicht."
|
||||
except Exception as e:
|
||||
return f"Deep Research Fehler: {e}"
|
||||
|
||||
|
||||
HANDLERS = {
|
||||
"deep_research": handle_deep_research,
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue