- savetv_web.py: Archiv-Cache mit Hintergrund-Refresh (kein Blockieren bei Save.TV-Login) - savetv_web.py: Flask threaded=True für parallele Requests - savetv_web.py: Jellyfin-Duplikate-Filter (Checkbox, default: ein) - tools/savetv.py: Login-Timeout (8s connect, 20s read) + modernerer User-Agent - savetv_sync.py: Dateien unter 700 MB werden übersprungen (kein SD-Schrott) - CT 116: www.save.tv statisch in /etc/hosts → kein DNS-GIL-Block mehr - CT 116: RAM von 512 MB auf 1 GB erhöht (war der Hauptgrund für Einfrieren)
118 lines
3.7 KiB
Python
118 lines
3.7 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
SaveTV → NAS Sync
|
|
Wartet 12h ± 0-30min nach Download auf Hetzner bevor Datei auf NAS kommt.
|
|
"""
|
|
import os, json, time, random, urllib.request, urllib.parse, urllib.error, email.utils
|
|
from datetime import datetime
|
|
|
|
ZIEL = "/mnt/nas/Filme zum nachbearbeiten"
|
|
BASE = "http://138.201.84.95:9443/files"
|
|
API = "http://138.201.84.95:9443/api/films"
|
|
CALLBACK = "http://138.201.84.95:9443/api/nas_synced"
|
|
LOG = "/var/log/savetv_sync.log"
|
|
|
|
MIN_ALTER_H = 24
|
|
JITTER_MIN = 30 # ± bis zu 30 Minuten Zufall
|
|
|
|
def log(msg):
|
|
ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
line = f"[{ts}] {msg}"
|
|
print(line, flush=True)
|
|
with open(LOG, "a") as f:
|
|
f.write(line + "\n")
|
|
|
|
def get_filmliste():
|
|
try:
|
|
with urllib.request.urlopen(API, timeout=15) as r:
|
|
d = json.loads(r.read())
|
|
films = d.get("downloads", [])
|
|
return [f if isinstance(f, str) else f.get("name", "") for f in films if f]
|
|
except Exception as e:
|
|
log(f"API-Fehler: {e}")
|
|
return []
|
|
|
|
def get_file_age_hours(name):
|
|
"""Alter der Datei auf Hetzner in Stunden via Last-Modified Header."""
|
|
url = BASE + "/" + urllib.parse.quote(name)
|
|
try:
|
|
req = urllib.request.Request(url, method="HEAD")
|
|
with urllib.request.urlopen(req, timeout=10) as r:
|
|
lm = r.headers.get("Last-Modified")
|
|
if lm:
|
|
ts = email.utils.parsedate_to_datetime(lm).timestamp()
|
|
return (time.time() - ts) / 3600
|
|
except Exception:
|
|
pass
|
|
return None
|
|
|
|
def sync():
|
|
filme = get_filmliste()
|
|
if not filme:
|
|
log("Keine Filme in API")
|
|
return
|
|
|
|
log(f"{len(filme)} Filme in API")
|
|
kopiert = 0
|
|
|
|
for name in filme:
|
|
if not name:
|
|
continue
|
|
|
|
ziel = os.path.join(ZIEL, name)
|
|
|
|
# Schon vorhanden und vollständig?
|
|
if os.path.exists(ziel) and os.path.getsize(ziel) > 700 * 1024 * 1024:
|
|
continue
|
|
|
|
# Dateigröße prüfen (HEAD)
|
|
url = BASE + "/" + urllib.parse.quote(name)
|
|
try:
|
|
req = urllib.request.Request(url, method="HEAD")
|
|
with urllib.request.urlopen(req, timeout=10) as r:
|
|
cl = int(r.headers.get("Content-Length", 0))
|
|
size_mb = cl / 1024 / 1024
|
|
except Exception:
|
|
size_mb = 0
|
|
if size_mb < 700:
|
|
log(f"SKIP (zu klein, {size_mb:.0f} MB): {name}")
|
|
continue
|
|
|
|
# Alter prüfen
|
|
alter_h = get_file_age_hours(name)
|
|
if alter_h is None:
|
|
log(f"SKIP (kein Header): {name}")
|
|
continue
|
|
|
|
# Wartezeit: 12h + zufällige 0-30min
|
|
warte_h = MIN_ALTER_H + random.uniform(0, JITTER_MIN / 60)
|
|
|
|
if alter_h < warte_h:
|
|
rest_min = (warte_h - alter_h) * 60
|
|
log(f"WARTE noch {rest_min:.0f} min: {name} (Alter: {alter_h:.1f}h)")
|
|
continue
|
|
|
|
# Kopieren
|
|
url = BASE + "/" + urllib.parse.quote(name)
|
|
log(f"LADE ({alter_h:.1f}h alt): {name}")
|
|
try:
|
|
urllib.request.urlretrieve(url, ziel)
|
|
size_mb = os.path.getsize(ziel) / 1024 / 1024
|
|
log(f" OK ({size_mb:.0f} MB): {name}")
|
|
kopiert += 1
|
|
try:
|
|
body = json.dumps({"name": name}).encode()
|
|
req = urllib.request.Request(CALLBACK, data=body,
|
|
headers={"Content-Type": "application/json"}, method="POST")
|
|
urllib.request.urlopen(req, timeout=5)
|
|
except Exception:
|
|
pass
|
|
except Exception as e:
|
|
log(f" FEHLER: {e}: {name}")
|
|
if os.path.exists(ziel):
|
|
os.remove(ziel)
|
|
|
|
log(f"Fertig. {kopiert} neue Filme kopiert.")
|
|
|
|
if __name__ == "__main__":
|
|
sync()
|