diff --git a/homelab-ai-bot/telegram_bot.py b/homelab-ai-bot/telegram_bot.py index 536c5bf5..9cd1694e 100644 --- a/homelab-ai-bot/telegram_bot.py +++ b/homelab-ai-bot/telegram_bot.py @@ -460,6 +460,10 @@ async def handle_voice(update: Update, ctx: ContextTypes.DEFAULT_TYPE): document_mode = doc_prefix or rag_mode.is_document_mode(channel_key) log.info("Voice transkribiert: %s", text[:100]) + prev = ACTIVE_LLM_TASKS.pop(update.effective_chat.id, None) + if prev and not prev.done(): + prev.cancel() + log.info("Vorheriger LLM-Lauf abgebrochen (neue Sprachnachricht)") await update.message.reply_text(f"🗣 \"{text}\"\n\n🤔 Denke nach...") session_id = memory_client.get_or_create_session(channel_key, source="telegram") @@ -479,11 +483,14 @@ async def handle_voice(update: Update, ctx: ContextTypes.DEFAULT_TYPE): ACTIVE_LLM_TASKS[update.effective_chat.id] = llm_task waited = 0 + # Kurze Intervalle: bei Save.TV/LLM wirkt 30s ohne Nachricht wie „Haenger“ while not llm_task.done(): - await asyncio.sleep(30) - waited += 30 + await asyncio.sleep(10) + waited += 10 if not llm_task.done(): - await update.message.reply_text("⏳ Suche laeuft noch (" + str(waited) + "s)...") + await update.message.reply_text( + "⏳ Noch dran (" + str(waited) + "s) — Save.TV/Modell kann etwas brauchen…" + ) answer = await llm_task @@ -845,6 +852,10 @@ async def handle_message(update: Update, ctx: ContextTypes.DEFAULT_TYPE): return document_mode = doc_prefix or rag_mode.is_document_mode(channel_key) + prev = ACTIVE_LLM_TASKS.pop(update.effective_chat.id, None) + if prev and not prev.done(): + prev.cancel() + log.info("Vorheriger LLM-Lauf abgebrochen (neue Freitext-Nachricht)") await update.message.reply_text("🤔 Denke nach...") if _likely_deep_research_request(work_text): await update.message.reply_text("🔎 Deep Research gestartet. Das dauert meist 2-5 Minuten.") @@ -867,10 +878,12 @@ async def handle_message(update: Update, ctx: ContextTypes.DEFAULT_TYPE): waited = 0 while not llm_task.done(): - await asyncio.sleep(30) - waited += 30 + await asyncio.sleep(10) + waited += 10 if not llm_task.done(): - await update.message.reply_text("Suche laeuft noch (" + str(waited) + "s)...") + await update.message.reply_text( + "⏳ Noch dran (" + str(waited) + "s) — Save.TV/Modell kann etwas brauchen…" + ) answer = await llm_task