fix: blockierende LLM-Aufrufe in Hintergrund-Thread auslagern
Der Telegram Event-Loop wurde bei langen deep_research Calls blockiert, weshalb der systemd Watchdog den Bot nach ~2 Minuten killte. LLM-Aufrufe laufen jetzt via asyncio.to_thread, damit Watchdog und Polling-Loop weiterlaufen waehrend langer Recherche.
This commit is contained in:
parent
af379754f2
commit
3847d6246b
1 changed files with 5 additions and 5 deletions
|
|
@ -409,7 +409,7 @@ async def handle_voice(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
|
|||
context.last_suggest_result = {"type": None}
|
||||
context.set_source_type("telegram_voice")
|
||||
handlers = context.get_tool_handlers(session_id=session_id)
|
||||
answer = llm.ask_with_tools(text, handlers, session_id=session_id)
|
||||
answer = await asyncio.to_thread(llm.ask_with_tools, text, handlers, session_id=session_id)
|
||||
if session_id:
|
||||
memory_client.log_message(session_id, "user", text)
|
||||
memory_client.log_message(session_id, "assistant", answer)
|
||||
|
|
@ -451,7 +451,7 @@ async def handle_photo(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
|
|||
context.last_suggest_result = {"type": None}
|
||||
context.set_source_type("telegram_photo")
|
||||
handlers = context.get_tool_handlers(session_id=session_id)
|
||||
answer = llm.ask_with_image(image_base64, caption, handlers, session_id=session_id)
|
||||
answer = await asyncio.to_thread(llm.ask_with_image, image_base64, caption, handlers, session_id=session_id)
|
||||
|
||||
warning_text, warnings = _check_flight_plausibility(answer)
|
||||
if warning_text:
|
||||
|
|
@ -627,7 +627,7 @@ async def handle_document(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
|
|||
context.last_suggest_result = {"type": None}
|
||||
context.set_source_type("telegram_photo")
|
||||
handlers = context.get_tool_handlers(session_id=session_id)
|
||||
answer = llm.ask_with_image(image_base64, caption, handlers, session_id=session_id)
|
||||
answer = await asyncio.to_thread(llm.ask_with_image, image_base64, caption, handlers, session_id=session_id)
|
||||
|
||||
warning_text, warnings = _check_flight_plausibility(answer)
|
||||
if warning_text:
|
||||
|
|
@ -664,7 +664,7 @@ async def handle_document(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
|
|||
context.last_suggest_result = {"type": None}
|
||||
context.set_source_type("telegram_pdf")
|
||||
handlers = context.get_tool_handlers(session_id=session_id)
|
||||
answer = llm.ask_with_tools(full_prompt, handlers, session_id=session_id)
|
||||
answer = await asyncio.to_thread(llm.ask_with_tools, full_prompt, handlers, session_id=session_id)
|
||||
|
||||
warning_text, warnings = _check_flight_plausibility(answer)
|
||||
if warning_text:
|
||||
|
|
@ -735,7 +735,7 @@ async def handle_message(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
|
|||
context.last_suggest_result = {"type": None}
|
||||
context.set_source_type("telegram_text")
|
||||
handlers = context.get_tool_handlers(session_id=session_id)
|
||||
answer = llm.ask_with_tools(text, handlers, session_id=session_id)
|
||||
answer = await asyncio.to_thread(llm.ask_with_tools, text, handlers, session_id=session_id)
|
||||
if session_id:
|
||||
memory_client.log_message(session_id, "user", text)
|
||||
memory_client.log_message(session_id, "assistant", answer)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue