diff --git a/homelab-ai-bot/savetv_extra_routes.py b/homelab-ai-bot/savetv_extra_routes.py new file mode 100644 index 00000000..293f4c22 --- /dev/null +++ b/homelab-ai-bot/savetv_extra_routes.py @@ -0,0 +1,556 @@ +"""Extra Routes fuer savetv_web.py - nicht im Git, lokal in CT 116.""" + +import html as _html +import shutil +import time as _time +from pathlib import Path +from urllib.parse import quote as _urlquote +from flask import send_from_directory, request, jsonify + +SAVETV_DIR = Path("/mnt/savetv") + + +def register_extra_routes(app, progress_lock=None, load_progress_raw=None, save_progress_raw=None): + import threading as _threading + _plock = progress_lock if progress_lock is not None else _threading.Lock() + + def _load_prog(): + if load_progress_raw is not None: + return load_progress_raw() + pf = SAVETV_DIR / ".download_progress.json" + import json as _j + return _j.loads(pf.read_text()) if pf.exists() else {} + + def _save_prog(prog): + if save_progress_raw is not None: + save_progress_raw(prog) + else: + import json as _j + (SAVETV_DIR / ".download_progress.json").write_text(_j.dumps(prog, ensure_ascii=False, indent=2)) + + @app.route("/files/") + def serve_file(filename): + return send_from_directory(str(SAVETV_DIR), filename, as_attachment=True) + + @app.route("/api/delete", methods=["POST"]) + def api_delete(): + data = request.get_json() + filename = data.get("filename", "") + if not filename or ".." in filename or "/" in filename: + return jsonify({"ok": False, "error": "Ungueltig"}), 400 + target = SAVETV_DIR / filename + if not target.exists(): + return jsonify({"ok": False, "error": "Nicht gefunden"}), 404 + try: + target.unlink() + return jsonify({"ok": True, "deleted": filename}) + except Exception as e: + return jsonify({"ok": False, "error": str(e)}), 500 + + @app.route("/downloads") + def downloads_page(): + files = [] + for fp in SAVETV_DIR.iterdir(): + if fp.suffix == ".mp4": + st = fp.stat() + size_mb = round(st.st_size / 1024 / 1024, 1) + mtime = st.st_mtime + files.append((fp.name, size_mb, mtime)) + files.sort(key=lambda x: x[2], reverse=True) + total_gb = round(sum(s for _, s, _ in files) / 1024, 2) + nav = '
← Archiv📁 Downloads⚙️ Status
' + rows = "" + from datetime import datetime as _dt + for name, size, mtime in files: + clean = name.rsplit(".", 1)[0] + esc = _html.escape(name, quote=True) + date_str = _dt.fromtimestamp(mtime).strftime("%d.%m.%Y") + rows += ( + '' + '' + clean + '' + '' + date_str + '' + '' + str(size) + ' MB' + '' + '' + '⬇ Download ' + '' + '' + ) + return ( + 'Downloads' + '' + + nav + + '

📁 Gespeicherte Filme

' + '
' + str(len(files)) + ' Dateien · ' + str(total_gb) + ' GB
' + '
Sortieren:' + '' + '' + '' + '
' + '' + rows + '
' + '' + ) + + + FILMINFO_CACHE = Path("/mnt/savetv/.filminfo_cache.json") + BOGUS_GENRES = {"Stummfilm", "Tonfilm", "Farbfilm", "Schwarzweissfilm", + "Langfilm", "Kurzfilm", "Independentfilm"} + + def _load_filminfo_cache(): + if FILMINFO_CACHE.exists(): + try: + import json as _json + return _json.loads(FILMINFO_CACHE.read_text()) + except Exception: + pass + return {} + + def _save_filminfo_cache(cache): + import json as _json + FILMINFO_CACHE.write_text(_json.dumps(cache, ensure_ascii=False, indent=1)) + + def _wikidata_lookup(title): + """Lookup year/genre/country for a film title via Wikidata.""" + import requests as _rq + import re + + search_title = re.sub(r"\s*[-\u2013\u2014]\s*.+$", "", title).strip() + result = {"year": "", "genres": [], "countries": []} + + def _parse_bindings(bindings): + year = "" + genres = set() + countries = set() + for b in bindings: + if not year and b.get("year", {}).get("value"): + year = b["year"]["value"] + if b.get("genreLabel", {}).get("value"): + genres.add(b["genreLabel"]["value"]) + if b.get("countryLabel", {}).get("value"): + countries.add(b["countryLabel"]["value"]) + return year, sorted(genres)[:3], sorted(countries)[:2] + + for lang in ["de", "en"]: + sparql = ('SELECT ?year ?genreLabel ?countryLabel WHERE {{ ' + '?film wdt:P31 wd:Q11424 . ' + '?film rdfs:label "{t}"@{l} . ' + 'OPTIONAL {{ ?film wdt:P577 ?date }} ' + 'OPTIONAL {{ ?film wdt:P136 ?genre }} ' + 'OPTIONAL {{ ?film wdt:P495 ?country }} ' + 'BIND(YEAR(?date) AS ?year) ' + 'SERVICE wikibase:label {{ bd:serviceParam wikibase:language "de,en" }} ' + '}} LIMIT 20').format(t=search_title.replace('"', '\\"'), l=lang) + try: + r = _rq.get("https://query.wikidata.org/sparql", + params={"query": sparql, "format": "json"}, + headers={"User-Agent": "SaveTV/1.0"}, timeout=8) + bindings = r.json().get("results", {}).get("bindings", []) + if bindings: + y, g, c = _parse_bindings(bindings) + return {"year": y, "genres": [x for x in g if x not in BOGUS_GENRES], "countries": c} + except Exception: + pass + + # Fallback: Wikidata search + try: + sr = _rq.get("https://www.wikidata.org/w/api.php", + params={"action": "wbsearchentities", "search": search_title, + "language": "de", "type": "item", "limit": "3", "format": "json"}, + headers={"User-Agent": "SaveTV/1.0"}, timeout=8) + for item in sr.json().get("search", []): + qid = item.get("id", "") + sparql_q = ('SELECT ?year ?genreLabel ?countryLabel WHERE {{ ' + 'BIND(wd:{qid} AS ?film) ' + '?film wdt:P31 wd:Q11424 . ' + 'OPTIONAL {{ ?film wdt:P577 ?date }} ' + 'OPTIONAL {{ ?film wdt:P136 ?genre }} ' + 'OPTIONAL {{ ?film wdt:P495 ?country }} ' + 'BIND(YEAR(?date) AS ?year) ' + 'SERVICE wikibase:label {{ bd:serviceParam wikibase:language "de,en" }} ' + '}} LIMIT 20').format(qid=qid) + r2 = _rq.get("https://query.wikidata.org/sparql", + params={"query": sparql_q, "format": "json"}, + headers={"User-Agent": "SaveTV/1.0"}, timeout=8) + bindings = r2.json().get("results", {}).get("bindings", []) + if bindings: + y, g, c = _parse_bindings(bindings) + return {"year": y, "genres": [x for x in g if x not in BOGUS_GENRES], "countries": c} + except Exception: + pass + + return result + + @app.route("/api/filminfo") + def api_filminfo(): + title = request.args.get("title", "").strip() + if not title: + return jsonify({"error": "title missing"}), 400 + cache = _load_filminfo_cache() + if title in cache: + return jsonify(cache[title]) + info = _wikidata_lookup(title) + cache[title] = info + _save_filminfo_cache(cache) + return jsonify(info) + + @app.route("/api/filminfo_batch", methods=["POST"]) + def api_filminfo_batch(): + data = request.get_json() + titles = data.get("titles", []) + cache = _load_filminfo_cache() + results = {} + missing = [] + for t in titles: + if t in cache: + results[t] = cache[t] + else: + missing.append(t) + for t in missing: + info = _wikidata_lookup(t) + cache[t] = info + results[t] = info + if missing: + _save_filminfo_cache(cache) + return jsonify(results) + + @app.route("/api/download_progress") + def api_download_progress(): + import json as _json + import subprocess as _sp + progress_file = SAVETV_DIR / ".download_progress.json" + if not progress_file.exists(): + return jsonify({}) + try: + progress = _json.loads(progress_file.read_text()) + except Exception: + return jsonify({}) + + dl_log_file = SAVETV_DIR / ".download_log.json" + try: + dl_log = _json.loads(dl_log_file.read_text()) if dl_log_file.exists() else {} + except Exception: + dl_log = {} + + # Stale "running"-Eintraege bereinigen: im Log als running, aber kein Progress-Eintrag + # und kein wget-Prozess → Download ist gescheitert, Eintrag entfernen + stale = [] + for tid, status in list(dl_log.items()): + if status != "running": + continue + if tid in progress: + continue + # Pruefen ob wget fuer diesen TID noch laeuft + try: + chk = _sp.run(["pgrep", "-af", f"_{tid}.mp4"], + capture_output=True, text=True, timeout=3) + if "wget" in chk.stdout: + continue + except Exception: + pass + stale.append(tid) + if stale: + for tid in stale: + dl_log.pop(tid, None) + dl_log_file.write_text(_json.dumps(dl_log, ensure_ascii=False, indent=2)) + + result = {} + completed = [] + for tid, info in list(progress.items()): + fp = SAVETV_DIR / info["filename"] + current = fp.stat().st_size if fp.exists() else 0 + expected = info.get("expected_bytes", 0) + + wget_running = False + try: + ps = _sp.run(["pgrep", "-af", info["filename"]], + capture_output=True, text=True, timeout=3) + wget_running = "wget" in ps.stdout + except Exception: + pass + + done = False + if expected > 0 and current >= expected: + done = True + elif not wget_running and current > 100_000: + done = True + + percent = round(current / expected * 100, 1) if expected > 0 else 0 + result[tid] = { + "current_bytes": current, + "expected_bytes": expected, + "percent": min(percent, 100), + "current_mb": round(current / 1024 / 1024, 1), + "expected_mb": round(expected / 1024 / 1024, 1), + "done": done, + } + + if done: + completed.append(tid) + + if completed: + for tid in completed: + info = progress.get(tid, {}) + raw_filename = info.get("filename", "") + # Rename: "Titel_ID.mp4" -> "Titel (Jahr).mp4" + if raw_filename: + _rename_to_jellyfin(raw_filename, tid) + # Auto-Delete aus Save.TV Archiv + try: + import sys as _sys + _sys.path.insert(0, '/opt/homelab-ai-bot') + from tools import savetv as _savetv + ok, err = _savetv._delete_telecast(int(tid)) + if ok: + import logging as _log + _log.getLogger("savetv").info("Archiv-Eintrag %s nach Download gelöscht", tid) + except Exception as _e: + import logging as _log + _log.getLogger("savetv").warning("Archiv-Delete TID %s fehlgeschlagen: %s", tid, _e) + dl_log[tid] = "done" + with _plock: + cur = _load_prog() + for tid in completed: + cur.pop(tid, None) + _save_prog(cur) + dl_log_file.write_text(_json.dumps(dl_log, ensure_ascii=False, indent=2)) + + return jsonify(result) + + def _find_cache_match(cache, clean_title): + """Sucht den besten Cache-Eintrag: exakt, dann normalisiert (Sonderzeichen-tolerant).""" + if clean_title in cache and cache[clean_title].get("year"): + return cache[clean_title] + import re as _re2 + def _norm(s): + return _re2.sub(r'\s+', ' ', _re2.sub(r'[^\w\s]', ' ', s)).strip().lower() + norm = _norm(clean_title) + for key, val in cache.items(): + if not val.get("year"): + continue + if _norm(key) == norm: + return val + return None + + def _rename_to_jellyfin(raw_filename, tid): + """Benennt fertig gedownloadete Datei von 'Titel_ID.mp4' zu 'Titel (Jahr).mp4' um.""" + import re as _re + src = SAVETV_DIR / raw_filename + if not src.exists(): + return + + m = _re.match(r'^(.+)_(\d{6,9})\.mp4$', raw_filename) + if not m: + return + raw_title_part = m.group(1) + + clean_title = raw_title_part.replace('_-_', ' - ').replace('_', ' ').strip() + + cache = _load_filminfo_cache() + matched = _find_cache_match(cache, clean_title) + if matched: + year = matched.get("year", "") + else: + if clean_title not in cache: + cache[clean_title] = _wikidata_lookup(clean_title) + _save_filminfo_cache(cache) + year = cache[clean_title].get("year", "") + + # Zieldateiname bauen + safe_title = _re.sub(r'[\\/:*?"<>|]', '', clean_title).strip() + if year: + dest_name = f"{safe_title} ({year}).mp4" + else: + dest_name = f"{safe_title}.mp4" + + dest = SAVETV_DIR / dest_name + + # Nicht überschreiben falls schon vorhanden + if dest.exists(): + # Alten Raw-File löschen + try: + src.unlink() + except Exception: + pass + return + + try: + src.rename(dest) + # Auch Progress-Info aktualisieren + import logging + logging.getLogger("savetv").info(f"Umbenannt: {raw_filename} -> {dest_name}") + except Exception as e: + import logging + logging.getLogger("savetv").warning(f"Rename fehlgeschlagen {raw_filename}: {e}") + + @app.route("/health") + def health(): + from tools import savetv + checks = {} + ok_count = 0 + total = 0 + total += 1 + try: + s = savetv._get_session() + checks["savetv_login"] = {"ok": s is not None, "detail": "Session aktiv" if s else "Login fehlgeschlagen"} + if s: ok_count += 1 + except Exception as e: + checks["savetv_login"] = {"ok": False, "detail": str(e)} + total += 1 + try: + t0 = _time.time() + entries = savetv._get_archive(count=5) + dur = round(_time.time() - t0, 2) + checks["savetv_archive"] = {"ok": len(entries) > 0, "detail": f"{len(entries)} Eintraege in {dur}s"} + if entries: ok_count += 1 + except Exception as e: + checks["savetv_archive"] = {"ok": False, "detail": str(e)} + total += 1 + try: + usage = shutil.disk_usage("/mnt/savetv") + free_gb = round(usage.free / 1024**3, 1) + mp4s = list(SAVETV_DIR.glob("*.mp4")) + total_size = round(sum(f.stat().st_size for f in mp4s) / 1024**3, 2) + checks["storage"] = {"ok": free_gb > 10, "detail": f"{len(mp4s)} Filme, {total_size} GB belegt, {free_gb} GB frei"} + if free_gb > 10: ok_count += 1 + except Exception as e: + checks["storage"] = {"ok": False, "detail": str(e)} + total += 1 + try: + import requests as _rq + t0 = _time.time() + sparql = 'SELECT ?year WHERE { ?f wdt:P31 wd:Q11424 . ?f rdfs:label "The Revenant"@en . ?f wdt:P577 ?date . BIND(YEAR(?date) AS ?year) } LIMIT 1' + wr = _rq.get("https://query.wikidata.org/sparql", params={"query": sparql, "format": "json"}, headers={"User-Agent": "SaveTV/1.0"}, timeout=10) + bindings = wr.json().get("results", {}).get("bindings", []) + year = bindings[0]["year"]["value"] if bindings else "" + dur = round(_time.time() - t0, 2) + checks["wikidata"] = {"ok": bool(year), "detail": f"The Revenant -> {year} ({dur}s)"} + if year: ok_count += 1 + except Exception as e: + checks["wikidata"] = {"ok": False, "detail": str(e)} + total += 1 + try: + import requests as _rq + r = _rq.get("http://localhost:8766/", timeout=3) + checks["nginx"] = {"ok": r.status_code == 200, "detail": f"Port 8766 -> {r.status_code}"} + if r.status_code == 200: ok_count += 1 + except Exception as e: + checks["nginx"] = {"ok": False, "detail": str(e)} + total += 1 + try: + import subprocess as _sp + result = _sp.run(["systemctl", "is-active", "cloudflared"], capture_output=True, text=True, timeout=5) + active = result.stdout.strip() == "active" + checks["cloudflare_tunnel"] = {"ok": active, "detail": result.stdout.strip()} + if active: ok_count += 1 + except Exception as e: + checks["cloudflare_tunnel"] = {"ok": False, "detail": str(e)} + total += 1 + checks["flask_web"] = {"ok": True, "detail": "Port 8765 aktiv"} + ok_count += 1 + return jsonify({"healthy": ok_count == total, "ok": ok_count, "total": total, "checks": checks}) + + @app.route("/status") + def status_page(): + nav = '
← Archiv📁 Downloads⚙️ Status
' + return ''' +Save.TV Status + + +''' + nav + ''' +

⚙️ System Status

+
Save.TV Download-System — Live-Checks
+
Prüfe Systeme...
+
+
+

📝 Funktionsübersicht

+
Save.TV EPG-Scanner (täglich 14:00, Auto-Aufnahme)
17.03.2026
+
Archiv-Bewertung: Kino-Highlights vs. TV-Filme
17.03.2026
+
Film-Download auf Hetzner (HD werbefrei)
17.03.2026
+
Web-Dashboard: Archiv durchsuchen + Download starten
17.03.2026
+
Jellyfin-Naming: Film (Jahr).mp4 via Wikidata
20.03.2026
+
Download-Seite mit direkten HTTP-Downloads
20.03.2026
+
Löschfunktion (Web-UI)
20.03.2026
+
Cloudflare Tunnel (savetv.orbitalo.net)
20.03.2026
+
Direkter Download (138.201.84.95:9443, Basic Auth)
20.03.2026
+
nginx Reverse Proxy + Static File Serving
20.03.2026
+
Telegram Bot Integration (Aufnahme, Status, Tipps)
17.03.2026
+
Status-Seite + Health-Endpoint
20.03.2026
+
+'''