debug: detailliertes Logging in worker.py + Kayak verbessert

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
Cursor 2026-02-25 14:26:41 +07:00
parent 9ad786e171
commit 5b5cf38bd6

View file

@ -15,256 +15,175 @@ def scrape(scanner, von, nach, tage=30):
return fn(von, nach, tage)
def parse_preise_aus_text(text, scanner, abflug):
"""EUR-Preise aus Seitentext per Regex extrahieren."""
results = []
# Muster: 578 € oder €578 oder EUR 578 oder 578 EUR oder 1.234 €
patterns = [
r'(\d{1,2}[.,]\d{3})\s*[€]', # 1.234 €
r'[€]\s*(\d{3,4})', # €578
r'(\d{3,4})\s*[€]', # 578 €
r'EUR\s*(\d{3,4})', # EUR 578
r'(\d{3,4})\s*EUR', # 578 EUR
]
seen = set()
for pattern in patterns:
for m in re.findall(pattern, text):
clean = m.replace('.', '').replace(',', '')
def _parse_preis(text):
if not text:
return None
text = text.replace('\xa0', ' ').replace('\u202f', ' ').replace(',', '.')
for p in [r'(\d{3,4})\s?€', r'\s?(\d{3,4})', r'EUR\s?(\d{3,4})', r'(\d{3,4})\s?EUR']:
m = re.search(p, text)
if m:
try:
preis = float(clean)
if 100 < preis < 5000 and preis not in seen:
seen.add(preis)
results.append({
"scanner": scanner,
"preis": preis,
"waehrung": "EUR",
"airline": "",
"abflug": abflug,
"ankunft": ""
})
v = float(m.group(1).replace('.', ''))
if 200 < v < 8000:
return round(v, 2)
except ValueError:
continue
# Maximal 10 günstigste
pass
return None
def _preise_aus_body(body, scanner, abflug):
results = []
seen = set()
# Alle Zahlen 300-6000 die neben einem € stehen
for m in re.finditer(r'(\d[\d\s\.]{1,5})\s?€|€\s?(\d[\d\s\.]{1,5})', body):
raw = (m.group(1) or m.group(2)).replace(' ', '').replace('.', '')
try:
v = float(raw)
if 300 < v < 6000 and v not in seen:
seen.add(v)
results.append({
"scanner": scanner, "preis": v, "waehrung": "EUR",
"airline": "", "abflug": abflug, "ankunft": ""
})
except ValueError:
pass
results.sort(key=lambda x: x["preis"])
return results[:10]
def scrape_google_flights(von, nach, tage=30):
abflug = (datetime.now() + timedelta(days=tage)).strftime("%Y-%m-%d")
# Direkte Google Flights Such-URL — sc:w = Premium Economy
url = (
f"https://www.google.com/travel/flights/search"
f"?hl=de&curr=EUR"
f"#flt={von}.{nach}.{abflug};c:EUR;e:1;sd:1;t:f;sc:w"
)
# sc:w = Premium Economy
url = (f"https://www.google.com/travel/flights/search?hl=de&curr=EUR"
f"#flt={von}.{nach}.{abflug};c:EUR;e:1;sd:1;t:f;sc:w")
log = []
results = []
with SB(uc=True, headless=True, chromium_arg="--no-sandbox --disable-dev-shm-usage") as sb:
sb.open(url)
sb.sleep(6)
sb.sleep(7)
# Google Consent-Seite (DSGVO, DE-IPs) wegklicken
if "consent.google" in sb.get_current_url() or "Bevor Sie" in sb.get_title():
for selector in [
'form[action*="save"] button',
'button[aria-label*="Alle akzeptieren"]',
'button[aria-label*="Accept all"]',
'button.tHlp8d',
'div#introAgreeButton',
]:
current_url = sb.get_current_url()
title = sb.get_title()
log.append(f"[GF] title={title[:60]} url={current_url[:80]}")
# Consent-Seite behandeln
if "consent" in current_url or "Bevor Sie" in title:
log.append("[GF] Consent-Seite erkannt")
for sel in ['form[action*="save"] button', 'button[jsname="tHlp8d"]',
'.lssxud button', 'button[aria-label*="akzeptieren"]']:
try:
sb.click(selector, timeout=3)
sb.sleep(3)
sb.click(sel, timeout=3)
sb.sleep(4)
log.append(f"[GF] Consent geklickt: {sel}")
break
except Exception:
continue
# Nach Consent nochmal zur Such-URL navigieren
# Zurück zur Such-URL
sb.open(url)
sb.sleep(6)
sb.sleep(8)
log.append(f"[GF] Nach Consent-Redirect: {sb.get_title()[:60]}")
# Cookie-Banner auf der Flights-Seite
for selector in ['button[aria-label*="Alle"]', 'button[aria-label*="Accept"]',
'button[aria-label*="Zustimmen"]', 'button.VfPpkd-LgbsSe']:
# Checken ob wir auf der Suchergebnisseite sind
title2 = sb.get_title()
if "Günstige Flüge" in title2 or "Google Flüge" in title2:
log.append("[GF] WARNUNG: Auf Homepage gelandet, versuche Formular")
# Formular direkt ausfüllen
try:
sb.click(selector, timeout=2)
# Von-Feld leeren und FRA eingeben
sb.click('input[placeholder*="Von"]', timeout=4)
sb.triple_click('input[placeholder*="Von"]')
sb.type('input[placeholder*="Von"]', von)
sb.sleep(2)
# Ersten Autocomplete-Eintrag nehmen
sb.click('li[data-code="' + von + '"]', timeout=3)
sb.sleep(1)
break
except Exception:
continue
# Nach-Feld
sb.click('input[placeholder*="Wohin"]', timeout=3)
sb.type('input[placeholder*="Wohin"]', nach)
sb.sleep(2)
sb.click('li[data-code="' + nach + '"]', timeout=3)
sb.sleep(1)
log.append("[GF] Formular ausgefüllt, suche...")
sb.sleep(5)
except Exception as e:
log.append(f"[GF] Formular-Fehler: {e}")
# Noch etwas warten bis Preise laden
sb.sleep(4)
# Preise extrahieren
body = sb.get_text("body")
log.append(f"[GF] Body-Länge: {len(body)} Zeichen")
results = []
# Aria-Labels mit €
try:
elems = sb.find_elements('[aria-label*=""]', timeout=3)
log.append(f"[GF] aria-label €-Elemente: {len(elems)}")
for elem in elems[:15]:
label = elem.get_attribute('aria-label') or elem.text
p = _parse_preis(label)
if p:
results.append({"scanner": "google_flights", "preis": p,
"waehrung": "EUR", "airline": "", "abflug": abflug, "ankunft": ""})
except Exception:
pass
# Versuch 1: Spezifische Preis-Elemente
selectors = [
'span[data-gs*="price"]',
'[data-price]',
'div.YMlIz', # Google Flights Preis-Container (oft genutzt)
'div.U3gSDe',
'span.nE0Jnd',
'div[jsname="MkNb9"] span',
'li[data-gs] span',
]
for sel in selectors:
try:
elems = sb.find_elements(sel, timeout=2)
for elem in elems[:15]:
preis = _parse_single(elem.text)
if preis:
results.append({
"scanner": "google_flights",
"preis": preis,
"waehrung": "EUR",
"airline": "",
"abflug": abflug,
"ankunft": ""
})
if results:
break
except Exception:
continue
# Versuch 2: Aria-Labels mit Preisangaben
# Fallback: Regex über Body
if not results:
try:
elems = sb.find_elements('[aria-label*=""]', timeout=3)
for elem in elems[:20]:
label = elem.get_attribute('aria-label') or elem.text
preis = _parse_single(label)
if preis:
results.append({
"scanner": "google_flights",
"preis": preis,
"waehrung": "EUR",
"airline": "",
"abflug": abflug,
"ankunft": ""
})
except Exception:
pass
results = _preise_aus_body(body, "google_flights", abflug)
log.append(f"[GF] Regex-Fallback: {len(results)} Preise")
# Versuch 3: Regex über ganzen Seitentext
if not results:
try:
body = sb.get_text("body", timeout=5)
results = parse_preise_aus_text(body, "google_flights", abflug)
except Exception:
pass
log.append(f"[GF] Ergebnis: {[r['preis'] for r in results]}")
# Deduplizieren
seen = set()
unique = []
for r in sorted(results, key=lambda x: x["preis"]):
if r["preis"] not in seen:
seen.add(r["preis"])
unique.append(r)
return unique[:10]
def _parse_single(text):
if not text:
return None
text = text.replace('\xa0', ' ').replace('\u202f', ' ')
patterns = [
r'(\d{1,2}[.,]\d{3})\s*[€]',
r'[€]\s*(\d{3,4})',
r'(\d{3,4})\s*[€]',
r'EUR\s*(\d{3,4})',
r'(\d{3,4})\s*EUR',
]
for p in patterns:
m = re.search(p, text)
if m:
clean = m.group(1).replace('.', '').replace(',', '')
try:
v = float(clean)
if 100 < v < 5000:
return round(v, 2)
except ValueError:
continue
return None
print('\n'.join(log))
return results[:10]
def scrape_kayak(von, nach, tage=30):
abflug = (datetime.now() + timedelta(days=tage)).strftime("%Y-%m-%d")
# ~prem~ = Premium Economy bei Kayak
url = f"https://www.kayak.de/flights/{von}-{nach}/{abflug}?sort=price_a&currency=EUR&cabin=w"
url = f"https://www.kayak.de/flights/{von}-{nach}/{abflug}?sort=price_a&cabin=w&currency=EUR"
log = []
results = []
with SB(uc=True, headless=True, chromium_arg="--no-sandbox --disable-dev-shm-usage") as sb:
sb.open(url)
sb.sleep(7)
sb.sleep(15)
# Preis-Selektoren Kayak
for sel in ['.price-text', '.f8F1-price-text', 'span[class*="price"]',
'div[class*="price"] span', '.Iqt3']:
try:
elems = sb.find_elements(sel, timeout=3)
for elem in elems[:10]:
preis = _parse_single(elem.text)
if preis:
results.append({
"scanner": "kayak",
"preis": preis,
"waehrung": "EUR",
"airline": "",
"abflug": abflug,
"ankunft": ""
})
if results:
break
except Exception:
continue
title = sb.get_title()
body = sb.get_text("body")
log.append(f"[KY] title={title[:60]}")
log.append(f"[KY] body-länge={len(body)}")
log.append(f"[KY] body-500={body[:500]}")
# Fallback Regex
if not results:
# CSS-Selektoren Kayak
for sel in ['.price-text', '.f8F1-price-text', 'div[class*="price"] span',
'span[class*="price"]', '.Iqt3', 'div.nrc6-price']:
try:
body = sb.get_text("body", timeout=5)
results = parse_preise_aus_text(body, "kayak", abflug)
elems = sb.find_elements(sel, timeout=2)
if elems:
log.append(f"[KY] Selector '{sel}': {len(elems)} Elemente")
for e in elems[:10]:
p = _parse_preis(e.text)
if p:
results.append({"scanner": "kayak", "preis": p,
"waehrung": "EUR", "airline": "",
"abflug": abflug, "ankunft": ""})
if results:
break
except Exception:
pass
# Fallback Regex
if not results:
results = _preise_aus_body(body, "kayak", abflug)
log.append(f"[KY] Ergebnis: {[r['preis'] for r in results]}")
print('\n'.join(log))
return results[:10]
def scrape_skyscanner(von, nach, tage=30):
abflug_fmt = (datetime.now() + timedelta(days=tage)).strftime("%y%m%d")
abflug_iso = (datetime.now() + timedelta(days=tage)).strftime("%Y-%m-%d")
# /w/ = Premium Economy bei Skyscanner
url = f"https://www.skyscanner.de/transport/flights/{von.lower()}/{nach.lower()}/{abflug_fmt}/?currency=EUR&cabinclass=premiumeconomy"
results = []
with SB(uc=True, headless=True, chromium_arg="--no-sandbox --disable-dev-shm-usage") as sb:
sb.open(url)
sb.sleep(7)
for sel in ['[data-testid="price-label"]', 'span[class*="Price"]',
'div[class*="price"]', 'span[class*="price"]']:
try:
elems = sb.find_elements(sel, timeout=3)
for elem in elems[:10]:
preis = _parse_single(elem.text)
if preis:
results.append({
"scanner": "skyscanner",
"preis": preis,
"waehrung": "EUR",
"airline": "",
"abflug": abflug_iso,
"ankunft": ""
})
if results:
break
except Exception:
continue
if not results:
try:
body = sb.get_text("body", timeout=5)
results = parse_preise_aus_text(body, "skyscanner", abflug_iso)
except Exception:
pass
return results[:10]
"""Skyscanner hat starken Bot-Schutz — für jetzt übersprungen."""
print("[SS] Skyscanner übersprungen (Bot-Detection)")
return []