#!/usr/bin/env python3
from __future__ import annotations

import csv
import json
import re
from collections import defaultdict
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Tuple

ROOT = Path(__file__).resolve().parents[1]
SOURCE_ROOT = (ROOT / "Pulled_Info").resolve()
INVENTORY_DIR = SOURCE_ROOT / "Inventory"
NON_FOOTWEAR_INVENTORY_DIR = INVENTORY_DIR / "Non-Footwear"
SCRAPE_PATH = SOURCE_ROOT / "Boot_Features" / "RW_Scrapers" / "RW_Site_Scrape_Merged.md"
OUTPUT_PATH = ROOT / "data" / "inventory-data.json"
VOUCHER_SCAN_PATH = SOURCE_ROOT / "Vouchers" / "voucher_scanned_accounts.json"
FEATURE_OVERRIDES_PATH = ROOT / "config" / "boot_feature_overrides.json"

DATE_RE = re.compile(r"(\d{8})_On_Hand_Footwear", re.IGNORECASE)
NON_FOOTWEAR_DATE_RE = re.compile(r"(\d{8})_On_Hand_Non_Footwear", re.IGNORECASE)
NON_FOOTWEAR_COMBINED_NAME_RE = re.compile(r"^\d{8}_On_Hand_Non_Footwear\.csv$", re.IGNORECASE)
STYLE_WD_SIZE_RE = re.compile(r"^(?P<style>\d+)(?P<width>[A-Z]\d?|[A-Z]+)?(?P<size>\d{3})$")
WIDTH_ORDER = ["A2", "A", "B", "C", "D", "E", "E2", "E3", "H"]
WIDTH_DISPLAY = {
    "A2": "A2",
    "A": "A",
    "B": "B",
    "C": "C",
    "D": "D",
    "E": "E",
    "E2": "E2",
    "E3": "E3",
    "H": "H",
}

HERITAGE_LABEL = "No details"
HEIGHT_OPTIONS = [
    "Oxford/Athletic",
    "Chukka",
    "Hiker",
    '5"',
    '6"',
    '7"',
    '8"',
    '9"',
    '10"',
    '11"',
    '12"',
]


@dataclass
class StyleFeature:
    style: str
    name: str
    brand: str
    url: str
    image: str
    price: float
    features: Dict[str, int]


@dataclass
class InventoryAggregate:
    norm_style: str
    total_qty: float = 0.0
    raw_styles: set[str] = field(default_factory=set)
    size_map: Dict[float, Dict[str, float]] = field(default_factory=lambda: defaultdict(dict))
    total_cost: float = 0.0
    total_ext_cost: float = 0.0


def normalize_style(style: str) -> str:
    value = str(style or "").strip()
    while value and not value[0].isdigit():
        value = value[1:]
    return value.lstrip("0")


def normalize_width(width_raw: str) -> Optional[str]:
    value = str(width_raw or "").strip().upper()
    if not value:
        return None
    if value == "AA":
        return "A2"
    if value in {"E2", "EE", "2E", "W2"}:
        return "E2"
    if value in {"E3", "EEE", "3E"}:
        return "E3"
    if value in {"E4", "EEEE", "4E", "H"}:
        return "H"
    if value in {"D", "M"}:
        return "D"
    if value in {"A", "B", "C", "E"}:
        return value
    if value == "E1":
        return "E"
    return None


def parse_style_wd_size(style_wd_size: str) -> Tuple[Optional[str], Optional[float], Optional[str]]:
    value = re.sub(r"\s+", "", str(style_wd_size or "").strip().upper())
    if not value:
        return None, None, None

    if len(value) >= 8 and value[:5].isdigit() and value[-3:].isdigit():
        style_digits = value[:5]
        width_raw = value[5:-3]
        size = int(value[-3:]) / 10.0
        width_key = normalize_width(width_raw) or (width_raw or "D")
        return normalize_style(style_digits), size, width_key

    match = STYLE_WD_SIZE_RE.match(value)
    if not match:
        if len(value) >= 4 and value[-3:].isdigit():
            size = int(value[-3:]) / 10.0
            prefix = value[:-3]
            prefix_match = re.match(r"^(?P<style>\d+)(?P<width>.*)$", prefix)
            if not prefix_match:
                return None, None, None
            width_raw = prefix_match.group("width") or ""
            width_key = normalize_width(width_raw) or (width_raw or "D")
            return normalize_style(prefix_match.group("style")), size, width_key
        return None, None, None

    width_raw = match.group("width") or ""
    return (
        normalize_style(match.group("style")),
        int(match.group("size")) / 10.0,
        normalize_width(width_raw) or (width_raw or "D"),
    )


def parse_currency(value: str) -> float:
    return parse_powerbi_number(value)


def parse_powerbi_number(value: str) -> float:
    text = str(value or "").strip()
    if not text:
        return 0.0
    text = text.replace("$", "").replace(",", "").replace("(", "-").replace(")", "")
    try:
        return float(text)
    except ValueError:
        return 0.0


def load_feature_overrides() -> Dict[str, dict]:
    if not FEATURE_OVERRIDES_PATH.is_file():
        return {}
    try:
        raw = json.loads(FEATURE_OVERRIDES_PATH.read_text(encoding="utf-8"))
    except Exception:
        return {}
    if not isinstance(raw, dict):
        return {}
    out: Dict[str, dict] = {}
    for style, payload in raw.items():
        if not isinstance(style, str) or not isinstance(payload, dict):
            continue
        norm_style = normalize_style(style)
        if not norm_style:
            continue
        features = payload.get("features", {})
        if not isinstance(features, dict):
            features = {}
        out[norm_style] = {
            "name": str(payload.get("name", "") or "").strip(),
            "brand": str(payload.get("brand", "") or "").strip(),
            "url": str(payload.get("url", "") or "").strip(),
            "image": str(payload.get("image", "") or "").strip(),
            "price": parse_currency(str(payload.get("price", "") or "")),
            "features": {str(key): 1 if int(value or 0) else 0 for key, value in features.items() if isinstance(key, str)},
        }
    return out


def save_feature_override(style: str, payload: dict) -> dict:
    norm_style = normalize_style(style)
    if not norm_style:
        raise ValueError("Style is required")
    existing = load_feature_overrides()
    features = payload.get("features", {})
    if not isinstance(features, dict):
        features = {}
    existing[norm_style] = {
        "name": str(payload.get("name", "") or "").strip(),
        "brand": str(payload.get("brand", "") or "").strip(),
        "url": str(payload.get("url", "") or "").strip(),
        "image": str(payload.get("image", "") or "").strip(),
        "price": parse_currency(str(payload.get("price", "") or "")),
        "features": {str(key): 1 if int(value or 0) else 0 for key, value in features.items() if isinstance(key, str)},
    }
    FEATURE_OVERRIDES_PATH.parent.mkdir(parents=True, exist_ok=True)
    FEATURE_OVERRIDES_PATH.write_text(json.dumps(existing, indent=2, sort_keys=True), encoding="utf-8")
    return {"style": norm_style, **existing[norm_style]}


def find_latest_inventory_csv() -> Path:
    candidates = [path for path in INVENTORY_DIR.rglob("*On_Hand_Footwear*.csv") if path.is_file()]
    if not candidates:
        raise FileNotFoundError(f"No inventory export found in {INVENTORY_DIR}")

    dated: List[Tuple[datetime, Path]] = []
    undated: List[Path] = []
    for path in candidates:
        match = DATE_RE.search(path.name)
        if not match:
            undated.append(path)
            continue
        dated.append((datetime.strptime(match.group(1), "%Y%m%d"), path))

    if dated:
        dated.sort(key=lambda item: item[0], reverse=True)
        return dated[0][1]

    undated.sort(key=lambda path: path.stat().st_mtime, reverse=True)
    return undated[0]


def non_footwear_csv_has_data(path: Path) -> bool:
    try:
        with path.open("r", encoding="utf-8-sig", newline="") as handle:
            reader = csv.reader(handle)
            next(reader, None)
            for row in reader:
                if any(str(cell or "").strip() for cell in row):
                    return True
    except OSError:
        return False
    return False


def find_latest_non_footwear_inventory_csv() -> Optional[Path]:
    if not NON_FOOTWEAR_INVENTORY_DIR.is_dir():
        return None
    candidates = [path for path in NON_FOOTWEAR_INVENTORY_DIR.rglob("*On_Hand_Non_Footwear*.csv") if path.is_file()]
    if not candidates:
        return None

    dated: List[Tuple[datetime, Path]] = []
    for path in candidates:
        match = NON_FOOTWEAR_DATE_RE.search(path.name)
        if not match:
            continue
        if not non_footwear_csv_has_data(path):
            continue
        dated.append((datetime.strptime(match.group(1), "%Y%m%d"), path))

    if not dated:
        return None

    combined = [item for item in dated if NON_FOOTWEAR_COMBINED_NAME_RE.match(item[1].name or "")]
    if combined:
        combined.sort(key=lambda item: (item[0], item[1].stat().st_mtime), reverse=True)
        return combined[0][1]

    dated.sort(key=lambda item: (item[0], item[1].stat().st_mtime), reverse=True)
    return dated[0][1]


def split_markdown_row(line: str) -> List[str]:
    row = next(csv.reader([line], delimiter="|"))
    if row and row[0] == "":
        row = row[1:]
    if row and row[-1] == "":
        row = row[:-1]
    return [cell.strip() for cell in row]


def load_features() -> Tuple[Dict[str, StyleFeature], List[str]]:
    raw_lines = SCRAPE_PATH.read_text(encoding="utf-8").splitlines()
    table_lines = [line for line in raw_lines if line.strip().startswith("|")]
    if len(table_lines) < 3:
        raise ValueError(f"Could not parse markdown table from {SCRAPE_PATH}")

    headers = split_markdown_row(table_lines[0])
    feature_rows = [split_markdown_row(line) for line in table_lines[2:]]
    rows = [row for row in feature_rows if len(row) == len(headers) and row and row[0] and row[0][0].isdigit()]

    core = {"Style #", "Name", "URL", "Image", "Brand", "Price"}
    feature_names = [header for header in headers if header not in core]

    feature_map: Dict[str, StyleFeature] = {}
    for row in rows:
        record = dict(zip(headers, row))
        norm_style = normalize_style(record.get("Style #", ""))
        if not norm_style:
            continue
        features = {name: int((record.get(name) or "0").strip() or "0") for name in feature_names}
        if "Safety Toe" in features and "Any Safety Toe" not in features:
            features["Any Safety Toe"] = features.pop("Safety Toe")
            feature_names = ["Any Safety Toe" if name == "Safety Toe" else name for name in feature_names]
        feature_map[norm_style] = StyleFeature(
            style=norm_style,
            name=record.get("Name", "").strip(),
            brand=record.get("Brand", "").strip(),
            url=record.get("URL", "").strip(),
            image=record.get("Image", "").strip(),
            price=parse_currency(record.get("Price", "")),
            features=features,
        )

    overrides = load_feature_overrides()
    for norm_style, override in overrides.items():
        existing = feature_map.get(norm_style)
        merged_features = dict(existing.features if existing else {})
        merged_features.update(override.get("features", {}))
        feature_map[norm_style] = StyleFeature(
            style=norm_style,
            name=str(override.get("name", existing.name if existing else "")),
            brand=str(override.get("brand", existing.brand if existing else "")),
            url=str(override.get("url", existing.url if existing else "")),
            image=str(override.get("image", existing.image if existing else "")),
            price=float(override.get("price", existing.price if existing else 0.0) or 0.0),
            features=merged_features,
        )

    deduped_features: List[str] = []
    seen = set()
    for name in feature_names:
        mapped = "Any Safety Toe" if name == "Safety Toe" else name
        if mapped in seen:
            continue
        seen.add(mapped)
        deduped_features.append(mapped)
    for override in overrides.values():
        for feature_name in override.get("features", {}).keys():
            mapped = "Any Safety Toe" if feature_name == "Safety Toe" else feature_name
            if mapped in seen:
                continue
            seen.add(mapped)
            deduped_features.append(mapped)
    for required_feature in ("Heritage", "Resoleable"):
        if required_feature not in seen:
            deduped_features.append(required_feature)
    return feature_map, deduped_features


def load_inventory(csv_path: Path) -> Dict[str, InventoryAggregate]:
    aggregates: Dict[str, InventoryAggregate] = {}
    with csv_path.open("r", encoding="utf-8-sig", newline="") as handle:
        reader = csv.DictReader(handle)
        if "Style" not in (reader.fieldnames or []) or "Qty" not in (reader.fieldnames or []):
            raise ValueError(f"Inventory CSV missing required columns: {reader.fieldnames}")
        for row in reader:
            raw_style = str(row.get("Style", "")).strip()
            if not raw_style or not raw_style[0].isdigit():
                continue
            norm_style = normalize_style(raw_style)
            size = None
            width_key = None
            if row.get("StyleWdSize"):
                parsed_style, parsed_size, parsed_width = parse_style_wd_size(row["StyleWdSize"])
                if parsed_style:
                    norm_style = parsed_style
                    size = parsed_size
                    width_key = parsed_width

            qty = parse_powerbi_number(row.get("Qty", 0))
            agg = aggregates.setdefault(norm_style, InventoryAggregate(norm_style=norm_style))
            agg.total_qty += qty
            agg.raw_styles.add(raw_style)
            agg.total_cost += parse_currency(row.get("Cost", ""))
            agg.total_ext_cost += parse_currency(row.get("ExtCost", ""))
            if size is not None and width_key:
                current = agg.size_map.setdefault(size, {})
                current[width_key] = float(current.get(width_key, 0.0)) + qty
    return aggregates


def merge_inventory_aggregate_map(
    target: Dict[str, InventoryAggregate],
    source: Dict[str, InventoryAggregate],
) -> Dict[str, InventoryAggregate]:
    for norm_style, incoming in source.items():
        agg = target.setdefault(norm_style, InventoryAggregate(norm_style=norm_style))
        agg.total_qty += incoming.total_qty
        agg.raw_styles.update(incoming.raw_styles)
        agg.total_cost += incoming.total_cost
        agg.total_ext_cost += incoming.total_ext_cost
        for size, width_map in incoming.size_map.items():
            current = agg.size_map.setdefault(size, {})
            for width, qty in width_map.items():
                current[width] = float(current.get(width, 0.0)) + float(qty or 0.0)
    return target


def load_mixed_non_footwear_footwear_inventory(
    csv_path: Path,
    existing_inventory: Dict[str, InventoryAggregate] | None = None,
) -> Dict[str, InventoryAggregate]:
    aggregates: Dict[str, InventoryAggregate] = {}
    with csv_path.open("r", encoding="utf-8-sig", newline="") as handle:
        reader = csv.DictReader(handle)
        for row in reader:
            category = str(row.get("Category", "") or "").strip().lower()
            if category != "footwear":
                continue
            raw_style = str(row.get("Style", "")).strip()
            if not raw_style or not raw_style[0].isdigit():
                continue
            norm_style = normalize_style(raw_style)
            size = None
            width_key = None
            if row.get("StyleWdSize"):
                parsed_style, parsed_size, parsed_width = parse_style_wd_size(row["StyleWdSize"])
                if parsed_style:
                    norm_style = parsed_style
                    size = parsed_size
                    width_key = parsed_width

            existing = (existing_inventory or {}).get(norm_style)
            if existing is not None:
                if size is not None and width_key and float(existing.size_map.get(size, {}).get(width_key, 0.0)) > 0:
                    continue
                if size is None or width_key is None:
                    # If the main footwear export already has this style but the stray row
                    # cannot be tied to a specific size/width, skip it to avoid doubling totals.
                    continue

            qty = parse_powerbi_number(row.get("Qty", 0))
            agg = aggregates.setdefault(norm_style, InventoryAggregate(norm_style=norm_style))
            agg.total_qty += qty
            agg.raw_styles.add(raw_style)
            agg.total_cost += parse_currency(row.get("Cost", ""))
            agg.total_ext_cost += parse_currency(row.get("ExtCost", ""))
            if size is not None and width_key:
                current = agg.size_map.setdefault(size, {})
                current[width_key] = float(current.get(width_key, 0.0)) + qty
    return aggregates


def load_inventory_totals(csv_path: Path) -> Dict[str, float]:
    totals: Dict[str, float] = {}
    with csv_path.open("r", encoding="utf-8-sig", newline="") as handle:
        reader = csv.DictReader(handle)
        for row in reader:
            raw_style = str(row.get("Style", "")).strip()
            if not raw_style or not raw_style[0].isdigit():
                continue
            norm_style = normalize_style(raw_style)
            totals[norm_style] = float(totals.get(norm_style, 0.0)) + parse_powerbi_number(row.get("Qty", 0))
    return totals


def load_inventory_sizes(csv_path: Path) -> Dict[str, Dict[float, Dict[str, float]]]:
    out: Dict[str, Dict[float, Dict[str, float]]] = {}
    with csv_path.open("r", encoding="utf-8-sig", newline="") as handle:
        reader = csv.DictReader(handle)
        for row in reader:
            raw_style = str(row.get("Style", "")).strip()
            if not raw_style or not raw_style[0].isdigit():
                continue
            norm_style = normalize_style(raw_style)
            size = None
            width_key = None
            if row.get("StyleWdSize"):
                parsed_style, parsed_size, parsed_width = parse_style_wd_size(row["StyleWdSize"])
                if parsed_style:
                    norm_style = parsed_style
                    size = parsed_size
                    width_key = parsed_width
            if size is None or width_key is None:
                continue
            qty = parse_powerbi_number(row.get("Qty", 0))
            style_map = out.setdefault(norm_style, {})
            width_map = style_map.setdefault(float(size), {})
            width_map[width_key] = float(width_map.get(width_key, 0.0)) + qty
    return out


def inventory_files_with_dates() -> List[Tuple[datetime, Path]]:
    out: List[Tuple[datetime, Path]] = []
    for path in INVENTORY_DIR.rglob("*On_Hand_Footwear*.csv"):
        match = DATE_RE.search(path.name)
        if not match:
            continue
        out.append((datetime.strptime(match.group(1), "%Y%m%d"), path))
    out.sort(key=lambda item: item[0], reverse=True)
    return out


def build_diff_payload(features: Dict[str, StyleFeature]) -> dict:
    files = inventory_files_with_dates()
    if len(files) < 2:
        return {"available": False, "message": "Not enough dated inventory CSVs found."}

    current_dt, current_path = files[0]
    target_dt = current_dt - timedelta(days=7)
    previous_dt = None
    previous_path = None
    for dt, path in files[1:]:
        if dt <= target_dt:
            previous_dt = dt
            previous_path = path
            break
    if previous_path is None:
        previous_dt, previous_path = files[1]

    current_totals = load_inventory_totals(current_path)
    previous_totals = load_inventory_totals(previous_path)
    current_sizes = load_inventory_sizes(current_path)
    previous_sizes = load_inventory_sizes(previous_path)

    rows: List[dict] = []
    for norm_style in sorted(set(current_totals) | set(previous_totals), key=lambda value: (int(value) if value.isdigit() else value)):
        prev_qty = float(previous_totals.get(norm_style, 0.0))
        curr_qty = float(current_totals.get(norm_style, 0.0))
        sold_qty = prev_qty - curr_qty
        if abs(sold_qty) < 1e-9:
            continue

        sold_parts: List[str] = []
        incoming_parts: List[str] = []
        size_delta_map: Dict[str, float] = {}
        prev_size_map = previous_sizes.get(norm_style, {})
        curr_size_map = current_sizes.get(norm_style, {})
        for size in sorted(set(prev_size_map) | set(curr_size_map)):
            prev_widths = prev_size_map.get(size, {})
            curr_widths = curr_size_map.get(size, {})
            for width in sorted(set(prev_widths) | set(curr_widths)):
                delta = float(prev_widths.get(width, 0.0)) - float(curr_widths.get(width, 0.0))
                if abs(delta) < 1e-9:
                    continue
                label = f"{format_size(size)} {WIDTH_DISPLAY.get(width, width)}"
                size_delta_map[label] = delta
                if delta > 0:
                    sold_parts.append(f"{label}:{delta:g}")
                else:
                    incoming_parts.append(f"{label}:{abs(delta):g}")

        feature = features.get(norm_style)
        rows.append(
            {
                "style": norm_style,
                "name": feature.name if feature else "Heritage",
                "brand": feature.brand if feature else "HERITAGE",
                "soldQty": sold_qty,
                "prevQty": prev_qty,
                "currQty": curr_qty,
                "soldSizes": sold_parts,
                "incomingSizes": incoming_parts,
                "sizeDeltaMap": size_delta_map,
                "productUrl": feature.url if feature else "",
            }
        )

    rows.sort(key=lambda row: (abs(float(row["soldQty"])), row["style"]), reverse=True)
    return {
        "available": True,
        "currentFile": current_path.name,
        "currentDate": current_dt.date().isoformat(),
        "previousFile": previous_path.name,
        "previousDate": previous_dt.date().isoformat() if previous_dt else "",
        "rows": rows,
        "fileHistory": [
            {"date": dt.date().isoformat(), "file": path.name}
            for dt, path in files
        ],
    }


def build_voucher_payload() -> dict:
    if not VOUCHER_SCAN_PATH.is_file():
        return {"available": False, "message": "Voucher scan cache not found."}
    payload = json.loads(VOUCHER_SCAN_PATH.read_text(encoding="utf-8"))
    completed_terms = list(payload.get("completed_terms") or [])
    seen_accounts = list(payload.get("seen_accounts") or [])
    return {
        "available": True,
        "completedTermCount": len(completed_terms),
        "completedTermsPreview": completed_terms[:120],
        "accountCount": len(seen_accounts),
        "accountPreview": seen_accounts[:120],
        "sourceFile": VOUCHER_SCAN_PATH.name,
        "notes": "The source folder currently exposes scraper/cache artifacts, not the voucher markdown records used by the desktop preview pane.",
    }


def format_size(value: float) -> str:
    return str(int(value)) if abs(value - round(value)) < 1e-9 else f"{value:g}"


def format_size_map(size_map: Dict[float, Dict[str, float]]) -> str:
    parts: List[str] = []
    for size in sorted(size_map):
        widths = size_map[size]
        matched_widths = [width for width in WIDTH_ORDER if widths.get(width, 0) > 0]
        if not matched_widths:
            continue
        parts.append(f"{format_size(size)}:{','.join(WIDTH_DISPLAY.get(width, width) for width in matched_widths)}")
    return "  ".join(parts)


def serialize_size_map(size_map: Dict[float, Dict[str, float]]) -> Dict[str, Dict[str, float]]:
    serialized: Dict[str, Dict[str, float]] = {}
    for size in sorted(size_map):
        widths = size_map[size]
        ordered = {width: widths[width] for width in WIDTH_ORDER if widths.get(width, 0) > 0}
        extras = {width: qty for width, qty in widths.items() if width not in ordered and qty > 0}
        serialized[format_size(size)] = {**ordered, **extras}
    return serialized


def build_validation_payload(warnings: List[dict], limit: int = 25) -> dict:
    cleaned: List[dict] = []
    for warning in warnings:
        if not isinstance(warning, dict):
            continue
        message = str(warning.get("message") or "").strip()
        if not message:
            continue
        cleaned.append(
            {
                "severity": str(warning.get("severity") or "warning").strip() or "warning",
                "code": str(warning.get("code") or "").strip(),
                "style": str(warning.get("style") or "").strip(),
                "message": message,
            }
        )
    return {
        "warningCount": len(cleaned),
        "warnings": cleaned[: max(1, limit)],
    }


def build_payload() -> dict:
    inventory_csv = find_latest_inventory_csv()
    features, feature_names = load_features()
    inventory = load_inventory(inventory_csv)
    non_footwear_inventory_csv = find_latest_non_footwear_inventory_csv()
    if non_footwear_inventory_csv:
        inventory = merge_inventory_aggregate_map(
            inventory,
            load_mixed_non_footwear_footwear_inventory(non_footwear_inventory_csv, inventory),
        )

    inventory_date_match = DATE_RE.search(inventory_csv.name)
    inventory_as_of = None
    if inventory_date_match:
        inventory_as_of = datetime.strptime(inventory_date_match.group(1), "%Y%m%d").date().isoformat()

    all_styles = sorted(set(features) | set(inventory), key=lambda value: (int(value) if value.isdigit() else value))
    styles: List[dict] = []
    inventory_count = 0
    zero_qty_count = 0
    validation_warnings: List[dict] = []

    for norm_style in all_styles:
        feature = features.get(norm_style)
        agg = inventory.get(norm_style, InventoryAggregate(norm_style=norm_style))
        if agg.total_qty > 0:
            inventory_count += 1
        else:
            zero_qty_count += 1
        size_total = sum(float(qty or 0.0) for width_map in agg.size_map.values() for qty in width_map.values())
        if agg.size_map and abs(size_total - float(agg.total_qty or 0.0)) > 0.001:
            validation_warnings.append(
                {
                    "code": "inventory_size_total_mismatch",
                    "style": norm_style,
                    "message": f"Size map total {size_total:g} does not match style quantity {agg.total_qty:g}.",
                }
            )
        if agg.total_qty > 0 and feature is None:
            validation_warnings.append(
                {
                    "code": "inventory_missing_scrape_details",
                    "style": norm_style,
                    "message": "Positive-qty style is missing scrape details and is falling back to Heritage placeholder data.",
                }
            )
        if agg.total_qty > 0 and feature is not None and not feature.url:
            validation_warnings.append(
                {
                    "code": "inventory_missing_url",
                    "style": norm_style,
                    "message": "Positive-qty style is missing a product URL.",
                }
            )
        if agg.total_qty > 0 and feature is not None and not feature.image:
            validation_warnings.append(
                {
                    "code": "inventory_missing_image",
                    "style": norm_style,
                    "message": "Positive-qty style is missing a product image.",
                }
            )
        styles.append(
            {
                "style": norm_style,
                "name": feature.name if feature else "Heritage",
                "brand": feature.brand if feature else "HERITAGE",
                "productUrl": feature.url if feature else "",
                "image": feature.image if feature else "",
                "price": feature.price if feature else 0.0,
                "quantity": agg.total_qty,
                "rawStyles": sorted(agg.raw_styles),
                "sizesDisplay": format_size_map(agg.size_map),
                "sizeMap": serialize_size_map(agg.size_map),
                "features": feature.features if feature else {},
                "isHeritage": feature is None,
                "source": "inventory+scrape" if feature and norm_style in inventory else ("scrape" if feature else "inventory"),
            }
        )

    return {
        "meta": {
            "inventoryCsv": inventory_csv.name,
            "inventoryAsOf": inventory_as_of,
            "sourceRoot": str(SOURCE_ROOT),
            "generatedAt": datetime.now().isoformat(timespec="seconds"),
            "counts": {
                "styles": len(styles),
                "inventoryStyles": inventory_count,
                "zeroQtyStyles": zero_qty_count,
            },
            "validation": build_validation_payload(validation_warnings),
        },
        "config": {
            "widthOrder": WIDTH_ORDER,
            "widthDisplay": WIDTH_DISPLAY,
            "featureNames": feature_names,
            "heightOptions": HEIGHT_OPTIONS,
            "heritageLabel": HERITAGE_LABEL,
            "orGroups": {
                "toe": ["Any Safety Toe", "Soft Toe", "Steel Toe", "Non-Metallic Toe", "Aluminum Toe"],
                "brand": ["Red Wing", "Irish Setter", "Worx", "Heritage"],
                "gender": ["Male", "Female"],
                "height": HEIGHT_OPTIONS,
            },
        },
        "styles": styles,
        "voucher": build_voucher_payload(),
        "inventoryDiff": build_diff_payload(features),
    }


def main() -> None:
    OUTPUT_PATH.parent.mkdir(parents=True, exist_ok=True)
    payload = build_payload()
    OUTPUT_PATH.write_text(json.dumps(payload, indent=2), encoding="utf-8")
    print(f"Wrote {OUTPUT_PATH} with {payload['meta']['counts']['styles']} styles")


if __name__ == "__main__":
    main()
