345 lines
11 KiB
Python
345 lines
11 KiB
Python
"""Model Library Integration — Telegram-Buttons, Befehle, Watcher-Webhook.
|
|
|
|
Wird von telegram_bot.py eingebunden:
|
|
import model_library_routes
|
|
model_library_routes.register(app, bot_token, chat_id)
|
|
"""
|
|
|
|
import json
|
|
import logging
|
|
import threading
|
|
import time
|
|
|
|
import requests
|
|
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update
|
|
from telegram.ext import ContextTypes
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
MODEL_LIBRARY_URL = "http://100.116.108.62:8090"
|
|
_CHAT_ID: int | None = None
|
|
_APP = None
|
|
_TG_TOKEN: str | None = None
|
|
_PENDING_FILE = "/var/cache/model-library-pending.json"
|
|
|
|
# --- Persistence for pending notifications ---
|
|
|
|
|
|
def _load_pending() -> list[dict]:
|
|
try:
|
|
with open(_PENDING_FILE) as f:
|
|
return json.load(f)
|
|
except (FileNotFoundError, json.JSONDecodeError):
|
|
return []
|
|
|
|
|
|
def _save_pending(items: list[dict]):
|
|
with open(_PENDING_FILE, "w") as f:
|
|
json.dump(items, f, ensure_ascii=False, indent=2)
|
|
|
|
|
|
def _remove_pending(repo_id: str, filename: str):
|
|
items = [
|
|
m for m in _load_pending()
|
|
if not (m.get("repo_id") == repo_id and m.get("filename") == filename)
|
|
]
|
|
_save_pending(items)
|
|
|
|
|
|
# --- Telegram handlers ---
|
|
|
|
|
|
async def cmd_modelle(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
|
|
"""Zeigt heruntergeladene Modelle aus dem Katalog."""
|
|
try:
|
|
r = requests.get(f"{MODEL_LIBRARY_URL}/api/catalog", timeout=10)
|
|
r.raise_for_status()
|
|
catalog = r.json()
|
|
except Exception as e:
|
|
await update.message.reply_text(f"Fehler beim Abruf: {e}")
|
|
return
|
|
|
|
models = catalog.get("models", [])
|
|
if not models:
|
|
await update.message.reply_text("📂 Bibliothek ist leer.")
|
|
return
|
|
|
|
try:
|
|
sr = requests.get(f"{MODEL_LIBRARY_URL}/api/storage", timeout=5)
|
|
storage = sr.json()
|
|
header = (
|
|
f"💾 {storage['used_gb']:.0f} / {storage['total_gb']:.0f} GB "
|
|
f"({storage['percent_used']:.0f}%) belegt\n\n"
|
|
)
|
|
except Exception:
|
|
header = ""
|
|
|
|
cat_groups: dict[str, list] = {}
|
|
for m in models:
|
|
cat_groups.setdefault(m.get("category", "llm"), []).append(m)
|
|
|
|
cat_icons = {
|
|
"llm": "🧠", "code": "💻", "vision": "👁", "embedding": "📐",
|
|
"audio": "🎵", "image-gen": "🎨",
|
|
}
|
|
|
|
lines = [f"📚 *KI-Modell-Bibliothek* ({len(models)} Modelle)\n{header}"]
|
|
for cat, items in sorted(cat_groups.items()):
|
|
icon = cat_icons.get(cat, "📦")
|
|
lines.append(f"{icon} *{cat.upper()}*")
|
|
for m in sorted(items, key=lambda x: x["filename"]):
|
|
q = m.get("quant", "?")
|
|
sz = m.get("size_gb", 0)
|
|
lines.append(f" • `{m['filename']}` ({q}, {sz:.1f} GB)")
|
|
lines.append("")
|
|
|
|
await update.message.reply_text("\n".join(lines), parse_mode="Markdown")
|
|
|
|
|
|
async def cmd_watchlist(update: Update, ctx: ContextTypes.DEFAULT_TYPE):
|
|
"""Zeigt die aktuelle Model-Watchlist."""
|
|
try:
|
|
r = requests.get(f"{MODEL_LIBRARY_URL}/api/watchlist", timeout=10)
|
|
r.raise_for_status()
|
|
data = r.json()
|
|
except Exception as e:
|
|
await update.message.reply_text(f"Fehler beim Abruf: {e}")
|
|
return
|
|
|
|
authors = data.get("authors", [])
|
|
if not authors:
|
|
await update.message.reply_text("Watchlist ist leer.")
|
|
return
|
|
|
|
lines = ["👀 *Model-Watchlist*\n"]
|
|
for a in authors:
|
|
name = a.get("name", "?")
|
|
comment = a.get("comment", "")
|
|
lines.append(f" • *{name}* — {comment}")
|
|
|
|
await update.message.reply_text("\n".join(lines), parse_mode="Markdown")
|
|
|
|
|
|
def _resolve_callback(data: str) -> tuple[str, str] | None:
|
|
"""Löst Callback-Data auf — direkt (repo|file) oder über Index (idx_N)."""
|
|
prefix = data[:9]
|
|
payload = data[9:]
|
|
|
|
if payload.startswith("idx_"):
|
|
try:
|
|
idx = int(payload[4:])
|
|
pending = _load_pending()
|
|
if 0 <= idx < len(pending):
|
|
m = pending[idx]
|
|
return m.get("repo_id"), m.get("filename")
|
|
except (ValueError, IndexError):
|
|
pass
|
|
return None
|
|
|
|
if "|" in payload:
|
|
return tuple(payload.split("|", 1))
|
|
return None
|
|
|
|
|
|
async def handle_model_callback(data: str, query) -> bool:
|
|
"""Verarbeitet model_dl_* und model_ig_* Callbacks. Gibt True zurück wenn verarbeitet."""
|
|
if data.startswith("model_dl_"):
|
|
resolved = _resolve_callback(data)
|
|
if not resolved:
|
|
await query.edit_message_text(query.message.text + "\n\n⚠️ Modell nicht mehr verfügbar")
|
|
return True
|
|
repo_id, filename = resolved
|
|
|
|
await query.edit_message_text(
|
|
query.message.text + f"\n\n⏳ Download gestartet: `{filename}`",
|
|
parse_mode="Markdown",
|
|
)
|
|
try:
|
|
r = requests.post(
|
|
f"{MODEL_LIBRARY_URL}/api/download",
|
|
json={"repo_id": repo_id, "filename": filename},
|
|
timeout=15,
|
|
)
|
|
result = r.json()
|
|
if result.get("status") == "started":
|
|
await query.edit_message_text(
|
|
query.message.text.split("\n\n⏳")[0]
|
|
+ f"\n\n✅ Download läuft: `{filename}`",
|
|
parse_mode="Markdown",
|
|
)
|
|
else:
|
|
err = result.get("error", "Unbekannter Fehler")
|
|
await query.edit_message_text(
|
|
query.message.text.split("\n\n⏳")[0]
|
|
+ f"\n\n⚠️ {err}",
|
|
parse_mode="Markdown",
|
|
)
|
|
except Exception as e:
|
|
log.exception("Model download trigger failed")
|
|
await query.edit_message_text(
|
|
query.message.text.split("\n\n⏳")[0]
|
|
+ f"\n\n❌ Fehler: {e}",
|
|
parse_mode="Markdown",
|
|
)
|
|
_remove_pending(repo_id, filename)
|
|
return True
|
|
|
|
if data.startswith("model_ig_"):
|
|
resolved = _resolve_callback(data)
|
|
if not resolved:
|
|
await query.edit_message_text(query.message.text + "\n\n⚠️ Modell nicht mehr verfügbar")
|
|
return True
|
|
repo_id, filename = resolved
|
|
|
|
try:
|
|
requests.post(
|
|
f"{MODEL_LIBRARY_URL}/api/ignore",
|
|
json={"repo_id": repo_id, "filename": filename},
|
|
timeout=10,
|
|
)
|
|
except Exception:
|
|
pass
|
|
await query.edit_message_text(
|
|
query.message.text + "\n\n🚫 Ignoriert"
|
|
)
|
|
_remove_pending(repo_id, filename)
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
# --- Webhook: Watcher meldet neue Modelle ---
|
|
|
|
|
|
def _notify_new_models_sync(models: list[dict]):
|
|
"""Sendet Telegram-Nachrichten mit Inline-Buttons fuer jedes neue Modell.
|
|
|
|
Nutzt die Telegram HTTP API direkt (nicht python-telegram-bot),
|
|
da wir in einem Worker-Thread ohne asyncio-Loop laufen.
|
|
"""
|
|
if not _TG_TOKEN or not _CHAT_ID:
|
|
log.warning("Bot-Token oder Chat-ID nicht konfiguriert")
|
|
return
|
|
|
|
MAX_PER_BATCH = 10
|
|
if len(models) > MAX_PER_BATCH:
|
|
log.warning("Benachrichtigungen begrenzt: %d -> %d", len(models), MAX_PER_BATCH)
|
|
models = models[:MAX_PER_BATCH]
|
|
|
|
pending = _load_pending()
|
|
sent = 0
|
|
|
|
for m in models:
|
|
repo_id = m.get("repo_id", "?")
|
|
filename = m.get("filename", "?")
|
|
size_gb = m.get("size_gb", 0)
|
|
quant = m.get("quant", "?")
|
|
|
|
already = any(
|
|
p.get("repo_id") == repo_id and p.get("filename") == filename
|
|
for p in pending
|
|
)
|
|
if already:
|
|
continue
|
|
|
|
cb_dl = f"model_dl_{repo_id}|{filename}"
|
|
cb_ig = f"model_ig_{repo_id}|{filename}"
|
|
|
|
used_index = False
|
|
if len(cb_dl.encode()) > 64 or len(cb_ig.encode()) > 64:
|
|
pending.append(m)
|
|
idx = len(pending) - 1
|
|
_save_pending(pending)
|
|
used_index = True
|
|
cb_dl = f"model_dl_idx_{idx}"
|
|
cb_ig = f"model_ig_idx_{idx}"
|
|
|
|
safe_repo = repo_id.replace("_", "\\_")
|
|
text = (
|
|
f"🆕 *Neues KI-Modell verfügbar*\n\n"
|
|
f"📦 `{filename}`\n"
|
|
f"🏷 Repo: {safe_repo}\n"
|
|
f"📏 {size_gb:.1f} GB | Quant: {quant}"
|
|
)
|
|
|
|
keyboard = {
|
|
"inline_keyboard": [[
|
|
{"text": "✅ Herunterladen", "callback_data": cb_dl},
|
|
{"text": "🚫 Ignorieren", "callback_data": cb_ig},
|
|
]]
|
|
}
|
|
|
|
try:
|
|
resp = requests.post(
|
|
f"https://api.telegram.org/bot{_TG_TOKEN}/sendMessage",
|
|
json={
|
|
"chat_id": _CHAT_ID,
|
|
"text": text,
|
|
"parse_mode": "Markdown",
|
|
"reply_markup": keyboard,
|
|
},
|
|
timeout=15,
|
|
)
|
|
if resp.ok:
|
|
log.info("Modell-Benachrichtigung gesendet: %s", filename)
|
|
sent += 1
|
|
else:
|
|
log.error("Telegram API Fehler: %s %s", resp.status_code, resp.text[:200])
|
|
except Exception:
|
|
log.exception("Fehler beim Senden der Modell-Benachrichtigung: %s", filename)
|
|
|
|
if not used_index:
|
|
pending.append(m)
|
|
_save_pending(pending)
|
|
time.sleep(1.5)
|
|
|
|
|
|
def start_webhook_listener():
|
|
"""Startet einen kleinen Flask-Server (Port 8767) fuer Watcher-Webhooks."""
|
|
from flask import Flask, request as freq, jsonify
|
|
|
|
webhook_app = Flask("model-library-webhook")
|
|
webhook_app.logger.setLevel(logging.WARNING)
|
|
werkzeug_log = logging.getLogger("werkzeug")
|
|
werkzeug_log.setLevel(logging.WARNING)
|
|
|
|
@webhook_app.route("/api/notify_new_models", methods=["POST"])
|
|
def notify():
|
|
data = freq.get_json(silent=True)
|
|
if not data:
|
|
return jsonify({"error": "no data"}), 400
|
|
models = data.get("models", [])
|
|
if models:
|
|
threading.Thread(
|
|
target=_notify_new_models_sync, args=(models,), daemon=True
|
|
).start()
|
|
return jsonify({"status": "ok", "queued": len(models)})
|
|
|
|
@webhook_app.route("/health", methods=["GET"])
|
|
def health():
|
|
return jsonify({"status": "ok"})
|
|
|
|
t = threading.Thread(
|
|
target=lambda: webhook_app.run(host="0.0.0.0", port=8767, use_reloader=False),
|
|
daemon=True,
|
|
)
|
|
t.start()
|
|
log.info("Model-Library Webhook-Listener auf Port 8767 gestartet")
|
|
|
|
|
|
# --- Registration ---
|
|
|
|
|
|
def register(app, chat_id: int | None, token: str | None = None):
|
|
"""Wird von telegram_bot.py aufgerufen."""
|
|
global _CHAT_ID, _APP, _TG_TOKEN
|
|
_CHAT_ID = chat_id
|
|
_APP = app
|
|
_TG_TOKEN = token
|
|
|
|
from telegram.ext import CommandHandler
|
|
app.add_handler(CommandHandler("modelle", cmd_modelle))
|
|
app.add_handler(CommandHandler("watchlist", cmd_watchlist))
|
|
|
|
start_webhook_listener()
|
|
log.info("Model-Library Routes registriert (/modelle, /watchlist, Webhook :8767)")
|