#!/usr/bin/env python3
"""
Daytona Automobil — Scraper standalone
Endpoint Supabase Edge Function (POST) — pas besoin de Playwright.
Stock très petit (~3-24 véhicules hypercars/classics).
"""

import json
import urllib.request

ENDPOINT = "https://qglgijldaxyiggfsylit.supabase.co/functions/v1/get-vehicles"
BASE_URL = "https://daytona-automobil.se/inventory"


def fetch_vehicles():
    """Appelle l'endpoint Supabase et retourne la liste brute."""
    req = urllib.request.Request(
        ENDPOINT,
        data=b"{}",
        headers={"Content-Type": "application/json"},
        method="POST"
    )
    with urllib.request.urlopen(req, timeout=15) as resp:
        data = json.loads(resp.read().decode("utf-8"))
    return data.get("vehicles", [])


def normalize(vehicle):
    """Convertit un véhicule brut en format standardisé AutoPremium."""
    vid = vehicle.get("id", "")
    brand = vehicle.get("brand", "")
    model = vehicle.get("model", "")
    year = vehicle.get("model_year")
    headline = vehicle.get("headline", "")
    identification = vehicle.get("identification", "")

    # Titre = marque + modèle + headline (si informatif)
    titre = f"{brand} {model}".strip()
    if headline and headline.upper() not in ("", titre.upper()):
        titre += f" — {headline}"

    # URL fiche individuelle
    url = f"{BASE_URL}/{vid}" if vid else None

    # Première image (format "main")
    image = None
    images = vehicle.get("images", [])
    if images and isinstance(images[0], dict):
        fmts = images[0].get("formats", {})
        image = fmts.get("large") or fmts.get("main") or fmts.get("thumb")

    return {
        "titre": titre,
        "marque": brand,
        "modele": model,
        "prix": None,       # Non disponible via l'API Supabase
        "devise": "SEK",
        "prix_eur": None,
        "km": None,          # Non disponible via l'API Supabase
        "annee": year,
        "url": url,
        "image": image,
        "source": "daytona-automobil",
        "pays": "SE",
        "description": headline,
        "uuid": vid,
        "identification": identification
    }


def scrape():
    """Scrape et retourne la liste normalisée."""
    raw = fetch_vehicles()
    return [normalize(v) for v in raw]


if __name__ == "__main__":
    vehicles = scrape()
    print(f"\n  Daytona Automobil — {len(vehicles)} véhicule(s)\n")
    for v in vehicles:
        print(f"  {v['marque']} {v['modele']} ({v['annee'] or '?'})")
        print(f"    URL : {v['url']}")
        print(f"    Image : {v['image'][:60] + '...' if v['image'] else 'N/A'}")
        print()
    print(json.dumps(vehicles, indent=2, ensure_ascii=False))
