feat: Hausmeister-Bot Text-Modell auf Grok 4.1 Fast (OpenRouter) umgestellt

- MODEL_LOCAL: qwen3:30b-a3b (Ollama lokal) -> x-ai/grok-4.1-fast (OpenRouter)
- OLLAMA_MODELS geleert — kein lokales Ollama mehr fuer Text
- warmup_ollama() als No-Op (kein VRAM-Warmup mehr noetig)
- Vision bleibt auf gpt-4o-mini (OpenRouter, kein GPU)
- Ref: Issue #75 Phase 1
This commit is contained in:
Homelab Cursor 2026-04-11 07:33:57 +02:00
parent 75a4f9ca4a
commit 8011145904

View file

@ -18,27 +18,17 @@ log = logging.getLogger('llm')
OLLAMA_BASE = "http://100.84.255.83:11434"
OPENROUTER_BASE = "https://openrouter.ai/api/v1"
MODEL_LOCAL = "qwen3:30b-a3b"
MODEL_LOCAL = "x-ai/grok-4.1-fast"
MODEL_VISION = "openai/gpt-4o-mini"
MODEL_ONLINE = "perplexity/sonar"
FALLBACK_MODEL = "qwen2.5:14b"
FALLBACK_MODEL = None
MAX_TOOL_ROUNDS = 3
OLLAMA_MODELS = {MODEL_LOCAL, FALLBACK_MODEL}
OLLAMA_MODELS = set()
def warmup_ollama():
"""Laedt Hauptmodell + Embedding permanent in VRAM (keep_alive=-1)."""
for model in [MODEL_LOCAL, "nomic-embed-text"]:
try:
requests.post(
f"{OLLAMA_BASE}/api/generate",
json={"model": model, "prompt": "", "keep_alive": -1},
timeout=120,
)
log.info("Ollama warmup: %s permanent geladen", model)
except Exception as e:
log.warning("Ollama warmup fehlgeschlagen fuer %s: %s", model, e)
"""No-Op: Text-Modell laeuft jetzt ueber OpenRouter (Grok 4.1 Fast), kein Ollama-Warmup noetig."""
log.info('Ollama warmup uebersprungen — Text laeuft ueber OpenRouter (Grok 4.1 Fast)')
PASSTHROUGH_TOOLS = {"get_temperaturen", "get_energie", "get_heizung"}
_LOCAL_OVERRIDES = [