// Shared primitives. Uses Tailwind via CDN.

function Card({ children, className = '', ...rest }) {
  return (
    <div className={`bg-white border border-neutral-200 rounded-xl ${className}`} {...rest}>
      {children}
    </div>
  );
}

function CardHeader({ title, subtitle }) {
  return (
    <div className="px-5 pt-5 pb-3">
      <div className="text-[15px] font-semibold tracking-tight text-neutral-900 leading-snug">{title}</div>
      {subtitle && <div className="text-[13px] text-neutral-500 mt-0.5 leading-snug">{subtitle}</div>}
    </div>
  );
}

function ContactLinks() {
  return (
    <div className="flex flex-wrap gap-2">
      <a href="mailto:sales@smith3d.com"
        className="inline-flex items-center gap-1.5 h-9 pl-2.5 pr-3 rounded-full border border-neutral-200 bg-white hover:border-neutral-300 hover:bg-neutral-50 transition text-[12.5px] font-medium text-neutral-800 active:scale-[0.98]">
        <span className="w-5 h-5 rounded-full bg-neutral-100 grid place-items-center">
          <IconMail size={12} className="text-neutral-600"/>
        </span>
        sales@smith3d.com
      </a>
      <a href="https://api.whatsapp.com/send/?phone=60103443128&text&type=phone_number&app_absent=0"
        target="_blank" rel="noopener noreferrer"
        className="inline-flex items-center gap-1.5 h-9 pl-2.5 pr-3 rounded-full border border-neutral-200 bg-white hover:border-neutral-300 hover:bg-neutral-50 transition text-[12.5px] font-medium text-neutral-800 active:scale-[0.98]">
        <span className="w-5 h-5 rounded-full bg-neutral-100 grid place-items-center">
          <IconWhatsapp size={12} className="text-[#25D366]"/>
        </span>
        WhatsApp us
      </a>
    </div>
  );
}

function Button({ variant = 'primary', size = 'md', className = '', children, disabled, ...rest }) {
  const variants = {
    primary: 'bg-[var(--s3-orange)] hover:bg-[var(--s3-orange-600)] text-white shadow-sm shadow-orange-900/10',
    secondary: 'bg-white border border-neutral-300 hover:border-neutral-400 text-neutral-900',
    ghost: 'hover:bg-neutral-100 text-neutral-700',
    subtle: 'bg-neutral-100 hover:bg-neutral-200 text-neutral-800',
  };
  const sizes = {
    sm: 'h-8 px-3 text-[13px]',
    md: 'h-10 px-4 text-[14px]',
    lg: 'h-12 px-5 text-[15px]',
    xl: 'h-14 px-6 text-[16px]',
  };
  return (
    <button
      disabled={disabled}
      className={`inline-flex items-center justify-center gap-2 rounded-lg font-medium transition-all
        disabled:opacity-50 disabled:cursor-not-allowed disabled:shadow-none focus:outline-none
        focus-visible:ring-2 focus-visible:ring-[var(--s3-orange)]/40 active:scale-[0.98]
        ${variants[variant]} ${sizes[size]} ${className}`}
      {...rest}
    >{children}</button>
  );
}

function Label({ children, htmlFor, required, hint }) {
  return (
    <label htmlFor={htmlFor} className="block text-[13px] font-medium text-neutral-800 mb-1.5">
      {children}
      {required && <span className="text-[var(--s3-orange)] ml-0.5">*</span>}
      {hint && <span className="ml-2 font-normal text-neutral-400">{hint}</span>}
    </label>
  );
}

function Input({ mono, className = '', invalid, ...rest }) {
  return (
    <input
      className={`w-full h-11 px-3 rounded-lg border bg-white
        ${invalid ? 'border-red-400 focus:border-red-500 focus:ring-red-200' : 'border-neutral-300 focus:border-[var(--s3-orange)] focus:ring-orange-200'}
        focus:ring-4 focus:outline-none
        text-[15px] text-neutral-900 placeholder:text-neutral-400
        transition-colors
        ${mono ? 'font-mono tracking-wider' : ''}
        ${className}`}
      {...rest}
    />
  );
}

function Select({ className = '', children, ...rest }) {
  return (
    <div className="relative">
      <select
        className={`w-full h-11 px-3 pr-9 rounded-lg border border-neutral-300 bg-white appearance-none
          focus:border-[var(--s3-orange)] focus:ring-4 focus:ring-orange-200 focus:outline-none
          text-[15px] text-neutral-900 ${className}`}
        {...rest}
      >{children}</select>
      <IconChevronDown size={16} className="absolute right-3 top-1/2 -translate-y-1/2 text-neutral-400 pointer-events-none"/>
    </div>
  );
}

// ─────────────────────────────────────────────────────────────
// Photo capture tile — stores the underlying File object so the
// form submit can append it to FormData.
// ─────────────────────────────────────────────────────────────
const MAX_PHOTOS = 5;
let _photoUid = 0;

function PhotoTile({ accept = 'image/*', capture = 'environment', label, helper, max = MAX_PHOTOS, onChange }) {
  const [items, setItems] = React.useState([]);
  const [expandedIdx, setExpandedIdx] = React.useState(null);
  const inputRef = React.useRef(null);

  React.useEffect(() => {
    const done = items.filter(i => i.state === 'done');
    onChange?.(done.length ? done : null);
  }, [items]);

  const handleFiles = (fileList) => {
    const files = Array.from(fileList || []);
    if (!files.length) return;
    const room = max - items.length;
    const accepted = files.slice(0, room);
    const fresh = accepted.map(f => ({
      id: ++_photoUid,
      file: f,
      name: f.name, size: f.size, mime: f.type,
      preview: URL.createObjectURL(f),
      progress: 0, state: 'uploading',
    }));
    setItems(prev => [...prev, ...fresh]);
    fresh.forEach(fi => simulateReady(fi.id));
  };

  // We don't actually upload yet — the file goes along with the form submit.
  // Run a short "readying" animation so the UX matches the design.
  const simulateReady = (id) => {
    let p = 0;
    const tick = () => {
      p = Math.min(100, p + 22 + Math.random() * 18);
      setItems(prev => prev.map(it => it.id === id
        ? { ...it, progress: Math.round(p), state: p >= 100 ? 'done' : 'uploading' }
        : it));
      if (p < 100) setTimeout(tick, 80);
    };
    setTimeout(tick, 80);
  };

  const removeItem = (id) => {
    setItems(prev => prev.filter(it => it.id !== id));
  };

  const fmtSize = (n) => n > 1e6 ? `${(n/1e6).toFixed(1)} MB` : `${Math.round(n/1e3)} KB`;
  const truncate = (s, n=22) => s.length > n ? s.slice(0, n-3) + '...' : s;
  const canAddMore = items.length < max;
  const doneCount = items.filter(i => i.state === 'done').length;

  return (
    <>
      <input ref={inputRef} type="file" accept={accept} capture={capture} multiple className="hidden"
        onChange={(e) => { handleFiles(e.target.files); if (inputRef.current) inputRef.current.value = ''; }}/>

      {items.length === 0 ? (
        <button type="button"
          onClick={() => inputRef.current?.click()}
          className="group w-full rounded-xl border-[1.5px] border-dashed border-neutral-300 hover:border-[var(--s3-orange)] hover:bg-orange-50/40
            transition-colors p-5 flex flex-col items-center gap-2 text-center">
          <div className="w-11 h-11 rounded-full bg-neutral-100 group-hover:bg-[var(--s3-orange)]/10 grid place-items-center transition-colors">
            <IconCamera size={20} className="text-neutral-500 group-hover:text-[var(--s3-orange)] transition-colors"/>
          </div>
          <div className="text-[14px] font-medium text-neutral-800">{label}</div>
          {helper && <div className="text-[12px] text-neutral-500">{helper}</div>}
          <div className="text-[11px] text-neutral-400 mt-0.5">Up to {max} · tap to add multiple</div>
        </button>
      ) : (
        <div className="space-y-2">
          <div className="grid grid-cols-3 gap-2">
            {items.map((it, idx) => (
              <div key={it.id} className="relative rounded-lg overflow-hidden border border-neutral-200 bg-neutral-100 aspect-square group">
                <button type="button" className="block w-full h-full"
                  onClick={() => it.state === 'done' && setExpandedIdx(idx)}>
                  {it.mime && it.mime.startsWith('image/') ? (
                    <img src={it.preview} alt="" className="w-full h-full object-cover"/>
                  ) : (
                    <div className="w-full h-full bg-gradient-to-br from-neutral-50 to-neutral-200 grid place-items-center">
                      <IconFileText size={28} className="text-neutral-400"/>
                    </div>
                  )}
                </button>

                {it.state === 'uploading' && (
                  <>
                    <div className="absolute inset-0 bg-black/30 grid place-items-center">
                      <svg className="animate-spin text-white" width="20" height="20" viewBox="0 0 24 24" fill="none">
                        <circle cx="12" cy="12" r="10" stroke="currentColor" strokeOpacity="0.3" strokeWidth="3"/>
                        <path d="M12 2a10 10 0 0 1 10 10" stroke="currentColor" strokeWidth="3" strokeLinecap="round"/>
                      </svg>
                    </div>
                    <div className="absolute inset-x-0 bottom-0 h-1 bg-black/30">
                      <div className="h-full bg-[var(--s3-orange)] transition-all" style={{ width: `${it.progress}%` }}/>
                    </div>
                  </>
                )}

                <button type="button"
                  onClick={(e) => { e.stopPropagation(); removeItem(it.id); }}
                  className="absolute top-1 right-1 w-6 h-6 rounded-full bg-black/65 backdrop-blur text-white grid place-items-center hover:bg-black/85">
                  <IconX size={12} sw={2.8}/>
                </button>

                <div className="absolute bottom-1 left-1 text-[9.5px] font-mono font-semibold px-1.5 py-0.5 rounded bg-black/60 text-white backdrop-blur">
                  {idx + 1}
                </div>
              </div>
            ))}

            {canAddMore && (
              <button type="button"
                onClick={() => inputRef.current?.click()}
                className="rounded-lg border-[1.5px] border-dashed border-neutral-300 hover:border-[var(--s3-orange)] hover:bg-orange-50/40
                  transition-colors aspect-square flex flex-col items-center justify-center gap-1 text-neutral-500 hover:text-[var(--s3-orange)]">
                <IconCamera size={18}/>
                <span className="text-[11px] font-medium">Add more</span>
              </button>
            )}
          </div>

          <div className="flex items-center justify-between px-1 text-[11.5px] text-neutral-500">
            <span>
              {doneCount === items.length
                ? `${doneCount} of ${max} photo${doneCount === 1 ? '' : 's'}`
                : `${doneCount}/${items.length} uploaded`}
            </span>
            {!canAddMore && <span className="text-neutral-400">Max {max} reached</span>}
          </div>
        </div>
      )}

      {expandedIdx !== null && items[expandedIdx] && (
        <div className="fixed inset-0 z-50 bg-black/90 grid place-items-center p-6" onClick={() => setExpandedIdx(null)}>
          {items[expandedIdx].mime?.startsWith('image/') ? (
            <img src={items[expandedIdx].preview} className="max-w-full max-h-full object-contain rounded-lg" alt=""/>
          ) : (
            <div className="bg-white rounded-lg p-10 max-w-md text-neutral-800 text-center">
              <IconFileText size={48} className="text-neutral-400 mx-auto"/>
              <div className="mt-3 font-medium">{items[expandedIdx].name}</div>
              <a href={items[expandedIdx].preview} target="_blank" rel="noopener noreferrer"
                className="inline-block mt-3 text-[13px] text-[var(--s3-orange)] underline">Open file</a>
            </div>
          )}
          {expandedIdx > 0 && (
            <button onClick={(e) => { e.stopPropagation(); setExpandedIdx(i => i - 1); }}
              className="absolute left-4 top-1/2 -translate-y-1/2 w-10 h-10 rounded-full bg-white/10 text-white grid place-items-center hover:bg-white/20">
              <IconChevronLeft size={22}/>
            </button>
          )}
          {expandedIdx < items.length - 1 && (
            <button onClick={(e) => { e.stopPropagation(); setExpandedIdx(i => i + 1); }}
              className="absolute right-4 top-1/2 -translate-y-1/2 w-10 h-10 rounded-full bg-white/10 text-white grid place-items-center hover:bg-white/20">
              <IconChevronRight size={22}/>
            </button>
          )}
          <button onClick={() => setExpandedIdx(null)}
            className="absolute top-4 right-4 w-10 h-10 rounded-full bg-white/10 text-white grid place-items-center">
            <IconX size={20}/>
          </button>
          <div className="absolute bottom-6 left-1/2 -translate-x-1/2 flex items-center gap-2 px-3 py-1.5 rounded-full bg-black/60 text-white text-[12px] font-medium">
            <span className="font-mono">{expandedIdx + 1} / {items.length}</span>
            <span className="text-white/50">·</span>
            <span className="truncate max-w-[180px]">{truncate(items[expandedIdx].name, 28)}</span>
            <span className="text-white/50">·</span>
            <span>{fmtSize(items[expandedIdx].size)}</span>
          </div>
        </div>
      )}
    </>
  );
}

// ─────────────────────────────────────────────────────────────
// Scanner modal — live camera barcode scanner.
//   - Prefers native BarcodeDetector (Android Chrome / Edge).
//   - Falls back to ZXing UMD from CDN (iOS Safari, older browsers).
//   - Calls onDetect(serialString) once, then closes.
// Whatever the decoder reads, we accept. The user can still edit the SN
// field if the scan picked up the wrong code (e.g. a wifi QR on the sticker).
// ─────────────────────────────────────────────────────────────

function loadZXing() {
  if (window.ZXing?.BrowserMultiFormatReader) return Promise.resolve(window.ZXing);
  if (window.__zxPromise) return window.__zxPromise;
  window.__zxPromise = new Promise((resolve, reject) => {
    const s = document.createElement('script');
    s.src = 'https://unpkg.com/@zxing/library@0.20.0/umd/index.min.js';
    s.async = true;
    s.onload = () => window.ZXing ? resolve(window.ZXing) : reject(new Error('zxing not loaded'));
    s.onerror = () => reject(new Error('zxing script failed to load'));
    document.head.appendChild(s);
  });
  return window.__zxPromise;
}

function ScannerModal({ open, onClose, onDetect }) {
  const videoRef = React.useRef(null);
  const streamRef = React.useRef(null);
  const zxReaderRef = React.useRef(null);
  const stoppedRef = React.useRef(false);
  const snapFnRef = React.useRef(null); // set by the scanner effect; called by the Snap button
  const fileDecodeFnRef = React.useRef(null); // set by the scanner effect; called by file-upload fallback
  const fileInputRef = React.useRef(null);
  const guideRef = React.useRef(null); // the on-screen aim box; used to compute which region of the photo to decode
  const [snapBusy, setSnapBusy] = React.useState(false);
  // URL of the most recent captured still, shown as a thumbnail so the user
  // can see whether the phone actually focused. If the preview is blurry,
  // no decoder will rescue it — the user needs to move farther away or use
  // the file-upload path (native camera has better autofocus).
  const [snapPreview, setSnapPreview] = React.useState(null);

  const [torch, setTorch] = React.useState(false);
  const [torchSupported, setTorchSupported] = React.useState(false);
  const [flashGreen, setFlashGreen] = React.useState(false);
  const [scanline, setScanline] = React.useState(0);
  const [status, setStatus] = React.useState('starting'); // starting | scanning | error
  const [error, setError] = React.useState('');
  const [engine, setEngine] = React.useState(''); // 'native' | 'zxing' | ''
  const [lastSeen, setLastSeen] = React.useState(''); // raw text of the most recent detection
  const [logs, setLogs] = React.useState([]); // on-screen debug log
  const [showLogs, setShowLogs] = React.useState(true);
  const detectCountRef = React.useRef(0);

  const dlog = React.useCallback((...args) => {
    const msg = args.map(a => {
      if (a instanceof Error) return `${a.name}: ${a.message}`;
      if (typeof a === 'object') { try { return JSON.stringify(a); } catch { return String(a); } }
      return String(a);
    }).join(' ');
    const ts = new Date().toISOString().slice(11, 23);
    try { console.log('[scanner]', ...args); } catch {}
    setLogs(prev => [...prev.slice(-19), `${ts} ${msg}`]);
  }, []);

  // Scanline animation
  React.useEffect(() => {
    if (!open) return;
    let raf;
    const start = performance.now();
    const tick = (t) => {
      setScanline(((t - start) / 1800) % 1);
      raf = requestAnimationFrame(tick);
    };
    raf = requestAnimationFrame(tick);
    return () => cancelAnimationFrame(raf);
  }, [open]);

  // Camera + detection lifecycle
  React.useEffect(() => {
    if (!open) return;
    stoppedRef.current = false;
    setError('');
    setStatus('starting');
    setTorch(false);
    setTorchSupported(false);
    setFlashGreen(false);
    setLogs([]);
    setLastSeen('');
    setEngine('');
    setSnapPreview(prev => { if (prev) { try { URL.revokeObjectURL(prev); } catch {} } return null; });
    detectCountRef.current = 0;
    dlog('open: ua=', navigator.userAgent.slice(0, 80));
    dlog('BarcodeDetector=', 'BarcodeDetector' in window, 'mediaDevices=', !!navigator.mediaDevices);

    let rafId = 0;

    // Phantom-decode guard: require the SAME text to be seen twice in a row
    // before accepting. Single-frame reads off patterns/moiré/shadows won't
    // repeat; a real barcode held in frame decodes identically tick-to-tick.
    let lastCandidate = '';
    let candidateHits = 0;

    const handleResult = (raw) => {
      if (stoppedRef.current) return;
      const text = String(raw || '').trim();
      if (!text) return;
      detectCountRef.current += 1;
      setLastSeen(text.slice(0, 64));

      // Phantoms from blurry frames decode to tiny fragments like "C0B" or
      // "x4". Real product barcodes are always 6+ chars. Reject anything
      // shorter outright — it's noise, not a successful scan.
      if (text.length < 6) {
        dlog(`reject too-short (${text.length}): "${text}"`);
        return;
      }

      if (text === lastCandidate) {
        candidateHits += 1;
      } else {
        lastCandidate = text;
        candidateHits = 1;
      }
      dlog(`detect #${detectCountRef.current} (hit ${candidateHits}/2): "${text}"`);
      if (candidateHits < 2) return; // wait for a second matching read

      stoppedRef.current = true;
      setFlashGreen(true);
      try { navigator.vibrate && navigator.vibrate(80); } catch {}
      setTimeout(() => {
        onDetect?.(text);
        onClose?.();
      }, 280);
    };

    (async () => {
      if (!navigator.mediaDevices?.getUserMedia) {
        dlog('FATAL: no getUserMedia');
        setError('Camera not available in this browser — please type the SN manually.');
        setStatus('error');
        return;
      }
      let stream;
      try {
        dlog('requesting camera…');
        stream = await navigator.mediaDevices.getUserMedia({
          video: {
            facingMode: { ideal: 'environment' },
            width: { ideal: 1920 },
            height: { ideal: 1080 },
          },
          audio: false,
        });
        dlog('camera OK:', stream.getVideoTracks()[0]?.label || '(no label)');
      } catch (e) {
        dlog('getUserMedia failed:', e);
        const msg =
          e?.name === 'NotAllowedError' ? 'Camera permission denied. Allow camera access or type the SN manually.'
          : e?.name === 'NotFoundError' ? 'No camera found on this device.'
          : e?.name === 'NotReadableError' ? 'Camera is busy — close other apps and try again.'
          : 'Could not start camera. Please type the SN manually.';
        setError(msg);
        setStatus('error');
        return;
      }
      if (stoppedRef.current) { stream.getTracks().forEach(t => t.stop()); return; }
      streamRef.current = stream;

      const track = stream.getVideoTracks()[0];
      try {
        const caps = track.getCapabilities?.() || {};
        dlog('track caps:', { torch: !!caps.torch, focusMode: caps.focusMode, zoom: caps.zoom, w: caps.width?.max, h: caps.height?.max });
        if (caps.torch) setTorchSupported(true);
        // Request continuous autofocus + modest zoom — dramatically improves
        // small-sticker barcode decode on modern phones.
        const advanced = [];
        if (Array.isArray(caps.focusMode) && caps.focusMode.includes('continuous')) {
          advanced.push({ focusMode: 'continuous' });
        }
        if (caps.zoom && typeof caps.zoom.min === 'number' && typeof caps.zoom.max === 'number') {
          const z = Math.min(2, Math.max(caps.zoom.min, Math.min(caps.zoom.max, 1.6)));
          advanced.push({ zoom: z });
        }
        if (advanced.length) {
          try { await track.applyConstraints({ advanced }); dlog('applied constraints:', advanced); }
          catch (ce) { dlog('applyConstraints failed:', ce); }
        }
      } catch (e) { dlog('getCapabilities failed:', e); }

      const video = videoRef.current;
      if (!video) { dlog('FATAL: no video element'); return; }
      // Prep attributes only — do NOT set srcObject or call play() yet.
      // zxing's decodeFromStream() needs to be the one that fires the video's
      // "playing" event, otherwise its internal playVideoOnLoadAsync never
      // resolves and decodeContinuously is never reached.
      video.setAttribute('playsinline', 'true');
      video.setAttribute('autoplay', 'true');
      video.muted = true;
      setStatus('scanning');

      // Prefer ZXing everywhere — its Code 128 / Data Matrix decoders are
      // significantly more tolerant than Chrome's native BarcodeDetector
      // (which misses small / glossy / angled Bambu SN stickers).
      try {
        dlog('loading zxing…');
        const ZX = await loadZXing();
        if (stoppedRef.current) return;
        dlog('zxing loaded:', {
          hasBMFR: !!ZX?.BrowserMultiFormatReader,
          hasHintType: !!ZX?.DecodeHintType,
          hasFormats: !!ZX?.BarcodeFormat,
        });
        if (!ZX?.BrowserMultiFormatReader) throw new Error('ZXing.BrowserMultiFormatReader missing');
        setEngine('zxing');
        const hints = new Map();
        if (ZX.DecodeHintType && ZX.BarcodeFormat) {
          hints.set(ZX.DecodeHintType.POSSIBLE_FORMATS, [
            ZX.BarcodeFormat.CODE_128,
            ZX.BarcodeFormat.CODE_39,
            ZX.BarcodeFormat.CODE_93,
            ZX.BarcodeFormat.DATA_MATRIX,
            ZX.BarcodeFormat.QR_CODE,
            ZX.BarcodeFormat.EAN_13,
            ZX.BarcodeFormat.EAN_8,
            ZX.BarcodeFormat.ITF,
            ZX.BarcodeFormat.CODABAR,
          ]);
          hints.set(ZX.DecodeHintType.TRY_HARDER, true);
        }
        let reader;
        try { reader = new ZX.BrowserMultiFormatReader(hints, 120); }
        catch (e) { dlog('BMFR(hints,120) ctor failed, retrying no-args:', e); reader = new ZX.BrowserMultiFormatReader(); }
        zxReaderRef.current = reader;

        // Snap-to-decode: capture a real high-res still (not a blurry video
        // frame) and decode ONLY the on-screen aim box.
        //
        // Capture (highest-fidelity first):
        //  1. ImageCapture.takePhoto() — camera shutter at sensor resolution
        //  2. ImageCapture.grabFrame() — ImageBitmap of the current frame
        //  3. canvas.drawImage(video) — last resort
        //
        // Decode: compute the aim-box rect in 0..1 coords (DOM-measured, so
        // it tracks the actual orange guide on screen regardless of
        // object-cover cropping), then decode that rect + two safety-padded
        // expansions. Each rect runs 4 variants: raw, B/W threshold, 2×
        // upscale, 2× upscale + threshold — 12 decode attempts total, all
        // inside what the user framed. Decoding the full photo (old code)
        // caused false matches on surrounding text / noise.
        // Convert a canvas to pure black/white by thresholding luminance.
        // Dense Code-128 often decodes reliably in B/W when the original
        // grayscale frame is too noisy (glare, compression artifacts).
        const thresholdCanvas = (src) => {
          const cv = document.createElement('canvas');
          cv.width = src.width; cv.height = src.height;
          const cx = cv.getContext('2d');
          cx.drawImage(src, 0, 0);
          const img = cx.getImageData(0, 0, cv.width, cv.height);
          const d = img.data;
          // Otsu-ish: sample mean luminance and threshold around it. Pure 128
          // cutoff fails on dim images; adaptive mean tracks exposure.
          let sum = 0;
          const N = d.length / 4;
          for (let i = 0; i < d.length; i += 4) {
            sum += (d[i] * 0.299 + d[i+1] * 0.587 + d[i+2] * 0.114);
          }
          const T = Math.max(60, Math.min(200, (sum / N) | 0));
          for (let i = 0; i < d.length; i += 4) {
            const g = (d[i] * 0.299 + d[i+1] * 0.587 + d[i+2] * 0.114);
            const v = g < T ? 0 : 255;
            d[i] = v; d[i+1] = v; d[i+2] = v;
          }
          cx.putImageData(img, 0, 0);
          return cv;
        };

        // Upscale with nearest-neighbor — keeps bar edges crisp so ZXing's
        // edge detector doesn't smear adjacent bars into one wide module.
        const upscaleCanvas = (src, scale) => {
          const cv = document.createElement('canvas');
          cv.width = Math.round(src.width * scale);
          cv.height = Math.round(src.height * scale);
          const cx = cv.getContext('2d');
          cx.imageSmoothingEnabled = false;
          cx.drawImage(src, 0, 0, cv.width, cv.height);
          return cv;
        };

        // Low-level MultiFormatReader used for Snap-decodes. We avoid
        // BrowserMultiFormatReader.decodeFromImageUrl because in the 0.20.0
        // UMD it silently returns NotFoundException on perfectly-clean
        // barcodes (verified in scanner-test.html: 0/10 via that path,
        // 7/10 via HTMLCanvasElementLuminanceSource + MultiFormatReader).
        // Going directly through the luminance source works reliably.
        const mfReader = ZX.MultiFormatReader ? new ZX.MultiFormatReader() : null;
        if (mfReader) {
          try { mfReader.setHints(hints); } catch (e) { dlog('mfReader.setHints failed', e); }
        } else {
          dlog('WARN: ZX.MultiFormatReader missing — Snap decode will be limited');
        }

        const decodeCanvasLowLevel = (cv) => {
          if (!mfReader || !ZX.HTMLCanvasElementLuminanceSource || !ZX.HybridBinarizer || !ZX.BinaryBitmap) return null;
          try {
            const lum = new ZX.HTMLCanvasElementLuminanceSource(cv);
            const bin = new ZX.BinaryBitmap(new ZX.HybridBinarizer(lum));
            const r = mfReader.decode(bin);
            mfReader.reset();
            return r ? r.getText() : null;
          } catch (e) {
            mfReader.reset();
            const n = e?.name || '';
            if (!/NotFound/i.test(n) && n.length > 2 && e?.message) {
              dlog(`snap[${n}] zxing —`, e.message);
            }
            return null;
          }
        };

        const tryDecodeCanvas = async (cv, label) => {
          // try ZXing (low-level path — see decodeCanvasLowLevel comment)
          const text = decodeCanvasLowLevel(cv);
          if (text) { dlog(`snap[${label}] zxing OK`); return text; }

          // try native BarcodeDetector
          if ('BarcodeDetector' in window) {
            try {
              const det = new window.BarcodeDetector();
              const codes = await det.detect(cv);
              if (codes && codes.length) {
                dlog(`snap[${label}] native OK`);
                return codes[0].rawValue;
              }
            } catch (ne) { /* quiet */ }
          }
          return null;
        };

        // Compute the on-screen aim box as 0..1 relative coords within the
        // video's full native frame. The <video> element uses object-cover,
        // meaning it crops the video to fill the viewport; so a point on the
        // guide is NOT a simple percentage of the photo — we have to back out
        // the object-cover scaling. Returning relative coords lets us apply
        // the same rect to any captured image (takePhoto, grabFrame, canvas)
        // assuming matching field of view.
        const computeAimRelative = () => {
          const v = videoRef.current;
          const g = guideRef.current;
          if (!v || !g) return null;
          const vw = v.videoWidth, vh = v.videoHeight;
          if (!vw || !vh) return null;
          const vRect = v.getBoundingClientRect();
          const gRect = g.getBoundingClientRect();
          if (!vRect.width || !vRect.height) return null;

          const containerAspect = vRect.width / vRect.height;
          const contentAspect = vw / vh;
          let scale, offX = 0, offY = 0;
          if (contentAspect > containerAspect) {
            // video content is wider than viewport → fits height, cropped L/R
            scale = vRect.height / vh;
            offX = (vw * scale - vRect.width) / 2;
          } else {
            // video content is taller than viewport → fits width, cropped T/B
            scale = vRect.width / vw;
            offY = (vh * scale - vRect.height) / 2;
          }
          // Map guide screen rect → video-native px
          const x = (gRect.left - vRect.left + offX) / scale;
          const y = (gRect.top - vRect.top + offY) / scale;
          const w = gRect.width / scale;
          const h = gRect.height / scale;
          // Normalize, clamp
          return {
            rx: Math.max(0, Math.min(1, x / vw)),
            ry: Math.max(0, Math.min(1, y / vh)),
            rw: Math.max(0, Math.min(1, w / vw)),
            rh: Math.max(0, Math.min(1, h / vh)),
          };
        };

        // Expand a relative rect outward, clamped to [0,1]. Used as a safety
        // net in case the user's aim is slightly off or the photo's FoV
        // differs from the video stream's.
        const padRect = (r, pad) => ({
          rx: Math.max(0, r.rx - r.rw * pad),
          ry: Math.max(0, r.ry - r.rh * pad),
          rw: Math.min(1 - Math.max(0, r.rx - r.rw * pad), r.rw * (1 + 2 * pad)),
          rh: Math.min(1 - Math.max(0, r.ry - r.rh * pad), r.rh * (1 + 2 * pad)),
        });

        // Decode one crop (rect in 0..1 relative coords) through the 4
        // pixel-processing variants. Returns text on first hit, else null.
        const decodeRect = async (bitmap, width, height, rel, label) => {
          const x = Math.max(0, Math.round(rel.rx * width));
          const y = Math.max(0, Math.round(rel.ry * height));
          const w = Math.max(1, Math.round(rel.rw * width));
          const h = Math.max(1, Math.round(rel.rh * height));
          const base = document.createElement('canvas');
          base.width = w; base.height = h;
          base.getContext('2d').drawImage(bitmap, x, y, w, h, 0, 0, w, h);
          const variants = [
            { label, cv: base },
            { label: `${label}+thr`, cv: thresholdCanvas(base) },
            { label: `${label}+2x`, cv: upscaleCanvas(base, 2) },
            { label: `${label}+2x+thr`, cv: thresholdCanvas(upscaleCanvas(base, 2)) },
          ];
          for (const v of variants) {
            const text = await tryDecodeCanvas(v.cv, v.label);
            if (text) return text;
          }
          dlog(`snap[${label}] no hit (${w}x${h})`);
          return null;
        };

        // Decode ONLY inside the on-screen aim box (+ small/large safety pads
        // in case the aim is slightly off or photo FoV differs from video).
        // This is what the user actually framed — decoding the full photo
        // causes false positives on surrounding print / noise.
        const decodeAim = async (bitmap, width, height) => {
          const aim = computeAimRelative();
          if (!aim) {
            dlog('snap: no aim rect (video not ready) — falling back to center band');
            return decodeRect(bitmap, width, height, { rx: 0.11, ry: 0.35, rw: 0.78, rh: 0.30 }, 'center-band');
          }
          dlog(`snap: aim = ${aim.rx.toFixed(2)},${aim.ry.toFixed(2)} ${aim.rw.toFixed(2)}x${aim.rh.toFixed(2)}`);
          const rects = [
            { r: aim, label: 'aim' },
            { r: padRect(aim, 0.15), label: 'aim+15%' },
            { r: padRect(aim, 0.35), label: 'aim+35%' },
          ];
          for (const { r, label } of rects) {
            const text = await decodeRect(bitmap, width, height, r, label);
            if (text) return text;
          }
          return null;
        };

        // Broader fallback: used for file-uploaded photos, where we have no
        // aim box (the user shot the barcode with their native camera).
        const decodeBroad = async (bitmap, width, height) => {
          const rects = [
            { r: { rx: 0, ry: 0, rw: 1, rh: 1 }, label: 'full' },
            { r: { rx: 0.2, ry: 0.2, rw: 0.6, rh: 0.6 }, label: 'center60' },
            { r: { rx: 0.11, ry: 0.35, rw: 0.78, rh: 0.30 }, label: 'center-band' },
          ];
          for (const { r, label } of rects) {
            const text = await decodeRect(bitmap, width, height, r, label);
            if (text) return text;
          }
          return null;
        };

        // Render a canvas/ImageBitmap to a blob URL so we can show it as a
        // preview thumbnail. If a rel-rect is given, crop to that region
        // first — letting the user see exactly what the decoder analyzed.
        // If the preview looks blurry or clipped, no decoder will rescue it.
        const bitmapToPreviewURL = async (bitmap, rel) => {
          try {
            const bw = bitmap.width || 400;
            const bh = bitmap.height || 300;
            const sx = rel ? Math.max(0, Math.round(rel.rx * bw)) : 0;
            const sy = rel ? Math.max(0, Math.round(rel.ry * bh)) : 0;
            const sw = rel ? Math.max(1, Math.round(rel.rw * bw)) : bw;
            const sh = rel ? Math.max(1, Math.round(rel.rh * bh)) : bh;
            const cv = document.createElement('canvas');
            // Downscale previews so they don't balloon memory — 400px wide
            // is plenty to eyeball focus quality.
            const scale = Math.min(1, 400 / sw);
            cv.width = Math.max(1, Math.round(sw * scale));
            cv.height = Math.max(1, Math.round(sh * scale));
            cv.getContext('2d').drawImage(bitmap, sx, sy, sw, sh, 0, 0, cv.width, cv.height);
            return await new Promise(res => cv.toBlob(b => res(b ? URL.createObjectURL(b) : null), 'image/jpeg', 0.8));
          } catch { return null; }
        };

        // Track the most recent preview URL so we can revoke the old one
        // before overwriting (avoid leaking object URLs).
        let lastPreviewURL = null;
        const setPreview = (url) => {
          if (lastPreviewURL) { try { URL.revokeObjectURL(lastPreviewURL); } catch {} }
          lastPreviewURL = url;
          setSnapPreview(url);
        };

        snapFnRef.current = async () => {
          if (stoppedRef.current) return;
          const v = videoRef.current;
          const r = zxReaderRef.current;
          if (!v || !r) { dlog('snap: not ready'); return; }
          setSnapBusy(true);
          try {
            let bitmap = null;
            let bw = 0, bh = 0;
            const track = streamRef.current?.getVideoTracks?.()[0];

            // 1. Try ImageCapture.takePhoto — actual camera shutter
            if (track && typeof window.ImageCapture === 'function') {
              try {
                const ic = new window.ImageCapture(track);
                const blob = await ic.takePhoto();
                bitmap = await createImageBitmap(blob);
                bw = bitmap.width; bh = bitmap.height;
                dlog(`snap: takePhoto OK ${bw}x${bh}`);
              } catch (e) {
                dlog('snap: takePhoto failed —', e?.name || e?.message);
              }
            }
            // 2. Fall back to grabFrame
            if (!bitmap && track && typeof window.ImageCapture === 'function') {
              try {
                const ic = new window.ImageCapture(track);
                bitmap = await ic.grabFrame();
                bw = bitmap.width; bh = bitmap.height;
                dlog(`snap: grabFrame OK ${bw}x${bh}`);
              } catch (e) {
                dlog('snap: grabFrame failed —', e?.name || e?.message);
              }
            }
            // 3. Fall back to canvas draw from <video>
            if (!bitmap) {
              if (!v.videoWidth || !v.videoHeight) { dlog('snap: video dims 0'); return; }
              const cv = document.createElement('canvas');
              cv.width = v.videoWidth;
              cv.height = v.videoHeight;
              cv.getContext('2d').drawImage(v, 0, 0, cv.width, cv.height);
              bitmap = cv;
              bw = cv.width; bh = cv.height;
              dlog(`snap: video-frame fallback ${bw}x${bh}`);
            }

            // Preview the aim-area crop (what the decoder actually looks at).
            // If this looks blurry or mis-framed, no decode will rescue it —
            // user knows to adjust aim or try Upload photo.
            const aim = computeAimRelative();
            const url = await bitmapToPreviewURL(bitmap, aim || undefined);
            if (url) setPreview(url);

            const text = await decodeAim(bitmap, bw, bh);
            if (text) {
              dlog('snap: decoded —', text.slice(0, 80));
              if (!stoppedRef.current) {
                stoppedRef.current = true;
                setLastSeen(text.slice(0, 64));
                setFlashGreen(true);
                try { navigator.vibrate && navigator.vibrate(80); } catch {}
                setTimeout(() => { onDetect?.(text); onClose?.(); }, 280);
              }
              return;
            }
            dlog('snap: NO BARCODE found in any region');
          } finally {
            setSnapBusy(false);
          }
        };

        // File upload fallback: native camera UI has real autofocus and
        // tap-to-focus, so a photo the user takes with it is usually sharper
        // than our webcam-stream grab. Decode path is identical.
        fileDecodeFnRef.current = async (file) => {
          if (!file || stoppedRef.current) return;
          const r = zxReaderRef.current;
          if (!r) { dlog('file: not ready'); return; }
          setSnapBusy(true);
          try {
            let bitmap;
            try {
              bitmap = await createImageBitmap(file);
            } catch (e) {
              dlog('file: createImageBitmap failed —', e?.name || e?.message);
              return;
            }
            const bw = bitmap.width, bh = bitmap.height;
            dlog(`file: loaded ${bw}x${bh} (${(file.size / 1024) | 0}KB)`);
            const url = await bitmapToPreviewURL(bitmap);
            if (url) setPreview(url);
            const text = await decodeBroad(bitmap, bw, bh);
            if (text) {
              dlog('file: decoded —', text.slice(0, 80));
              if (!stoppedRef.current) {
                stoppedRef.current = true;
                setLastSeen(text.slice(0, 64));
                setFlashGreen(true);
                try { navigator.vibrate && navigator.vibrate(80); } catch {}
                setTimeout(() => { onDetect?.(text); onClose?.(); }, 280);
              }
              return;
            }
            dlog('file: NO BARCODE found in any region');
          } finally {
            setSnapBusy(false);
          }
        };

        // zxing's NotFoundException gets minified to various short names
        // depending on the build (N, NotFoundException, NotFoundException2, t,
        // etc.). We treat any error with no message as "no barcode in frame"
        // — it's just the normal "kept looking" signal, not a failure.
        const isNoBarcode = (err) => {
          if (!err) return false;
          const n = err.name || '';
          return /NotFound/i.test(n) || n.length <= 2 || !err.message;
        };
        const tickCb = (result, err) => {
          if (stoppedRef.current) return;
          if (result) {
            handleResult(result.getText());
          } else if (err && !isNoBarcode(err)) {
            dlog('zxing err:', err.name || '?', err.message || '');
          }
        };

        // decodeFromStream takes the MediaStream directly and lets zxing own
        // the video's play lifecycle — essential, because its internal
        // playVideoOnLoadAsync subscribes to the "playing" event AFTER
        // addStreamSource, so the video must not be pre-playing.
        const method =
          typeof reader.decodeFromStream === 'function' ? 'decodeFromStream'
          : typeof reader.decodeFromVideoElementContinuously === 'function' ? 'decodeFromVideoElementContinuously'
          : 'decodeFromVideoElement';
        dlog('zxing method:', method);
        try {
          const p = method === 'decodeFromStream'
            ? reader.decodeFromStream(stream, video, tickCb)
            : reader[method](video, tickCb);
          if (p && typeof p.then === 'function') {
            p.then(r => dlog(`${method} resolved`, r ? r.getText?.() || '(ok)' : ''))
             .catch(e => dlog(`${method} rejected:`, e?.name || e?.message || e));
          }
        } catch (e) {
          dlog(`${method} threw:`, e);
        }
      } catch (e) {
        dlog('zxing path failed, falling back to native:', e);
        // Fallback: native BarcodeDetector if available (Android Chrome).
        if ('BarcodeDetector' in window) {
          setEngine('native');
          // Native path needs us to wire the stream + play the video.
          try {
            video.srcObject = stream;
            await video.play();
            dlog('native: video playing', video.videoWidth, 'x', video.videoHeight);
          } catch (pe) { dlog('native: video.play failed', pe); }
          let formats = ['code_128', 'data_matrix', 'qr_code', 'code_39', 'code_93', 'ean_13', 'ean_8', 'codabar', 'itf'];
          try {
            const supported = await window.BarcodeDetector.getSupportedFormats();
            dlog('native supported formats:', supported);
            const filtered = formats.filter(f => supported.includes(f));
            if (filtered.length) formats = filtered;
          } catch (ee) { dlog('getSupportedFormats failed:', ee); }
          let detector;
          try { detector = new window.BarcodeDetector({ formats }); }
          catch (ee) { dlog('BarcodeDetector(opts) ctor failed:', ee); detector = new window.BarcodeDetector(); }
          dlog('native detector ready, looping…');
          const loop = async () => {
            if (stoppedRef.current || !video) return;
            try {
              if (video.readyState >= 2) {
                const codes = await detector.detect(video);
                if (codes && codes.length) handleResult(codes[0].rawValue);
              }
            } catch (ee) { /* frequent */ }
            if (!stoppedRef.current) rafId = requestAnimationFrame(loop);
          };
          loop();
        } else {
          dlog('FATAL: no scanner engine available');
          setError('Could not load the barcode scanner. Please type the SN manually.');
          setStatus('error');
        }
      }
    })();

    return () => {
      stoppedRef.current = true;
      snapFnRef.current = null;
      fileDecodeFnRef.current = null;
      setSnapPreview(prev => { if (prev) { try { URL.revokeObjectURL(prev); } catch {} } return null; });
      if (rafId) cancelAnimationFrame(rafId);
      if (zxReaderRef.current) {
        try { zxReaderRef.current.reset(); } catch {}
        zxReaderRef.current = null;
      }
      if (streamRef.current) {
        streamRef.current.getTracks().forEach(t => { try { t.stop(); } catch {} });
        streamRef.current = null;
      }
      if (videoRef.current) {
        try { videoRef.current.srcObject = null; } catch {}
      }
    };
  }, [open]);

  // Torch toggle
  React.useEffect(() => {
    if (!open || !streamRef.current || !torchSupported) return;
    const track = streamRef.current.getVideoTracks()[0];
    if (!track) return;
    track.applyConstraints({ advanced: [{ torch }] }).catch(() => {});
  }, [torch, open, torchSupported]);

  const tapToFocus = React.useCallback(async () => {
    const track = streamRef.current?.getVideoTracks?.()[0];
    if (!track) return;
    const caps = track.getCapabilities?.() || {};
    if (!Array.isArray(caps.focusMode)) return;
    try {
      if (caps.focusMode.includes('single-shot')) {
        await track.applyConstraints({ advanced: [{ focusMode: 'single-shot' }] });
        setTimeout(() => track.applyConstraints({ advanced: [{ focusMode: 'continuous' }] }).catch(() => {}), 800);
      } else if (caps.focusMode.includes('manual')) {
        await track.applyConstraints({ advanced: [{ focusMode: 'manual' }] });
      }
      dlog('tap-to-focus triggered');
    } catch (e) { dlog('focus trigger failed:', e); }
  }, [dlog]);

  if (!open) return null;
  return (
    <div className="fixed inset-0 z-40 flex flex-col bg-black">
      {/* Live camera feed — tap anywhere on the video to refocus */}
      <video ref={videoRef} autoPlay playsInline muted
        onClick={tapToFocus}
        className="absolute inset-0 w-full h-full object-cover"
        style={{ background: '#000' }}/>
      {/* Vignette overlay */}
      <div className="absolute inset-0 pointer-events-none" style={{
        background: 'radial-gradient(ellipse at center, transparent 35%, rgba(0,0,0,0.55) 100%)'
      }}/>

      <div className="relative z-10 flex items-center justify-between p-4 pt-[max(1rem,env(safe-area-inset-top))]">
        <button onClick={onClose}
          className="w-10 h-10 rounded-full bg-black/50 backdrop-blur text-white grid place-items-center active:scale-95">
          <IconX size={20}/>
        </button>
        <div className="text-white/90 text-[14px] font-medium drop-shadow">Scan serial number</div>
        {torchSupported ? (
          <button onClick={() => setTorch(t => !t)}
            className={`w-10 h-10 rounded-full grid place-items-center active:scale-95
              ${torch ? 'bg-yellow-300 text-neutral-900' : 'bg-black/50 backdrop-blur text-white'}`}>
            {torch ? <IconZap size={18}/> : <IconFlashOff size={18}/>}
          </button>
        ) : <div className="w-10 h-10"/>}
      </div>

      <div className="relative z-10 flex-1 grid place-items-center">
        <div ref={guideRef} className="relative" style={{ width: '78%', aspectRatio: '3/1' }}>
          {[['top-0 left-0', 'border-t-[3px] border-l-[3px] rounded-tl-xl'],
            ['top-0 right-0', 'border-t-[3px] border-r-[3px] rounded-tr-xl'],
            ['bottom-0 left-0', 'border-b-[3px] border-l-[3px] rounded-bl-xl'],
            ['bottom-0 right-0', 'border-b-[3px] border-r-[3px] rounded-br-xl']].map(([pos, cls], i) => (
              <div key={i} className={`absolute ${pos} w-8 h-8 ${cls} transition-colors`}
                style={{ borderColor: flashGreen ? '#22c55e' : 'var(--s3-orange)',
                  animation: 'pulseCorner 1.8s ease-in-out infinite' }}/>
          ))}
          {status === 'scanning' && (
            <div className="absolute left-2 right-2 h-[2px] rounded"
              style={{
                top: `${scanline * 100}%`,
                background: `linear-gradient(90deg, transparent, ${flashGreen ? '#22c55e' : '#FF6A1A'}, transparent)`,
                boxShadow: `0 0 12px ${flashGreen ? '#22c55e' : '#FF6A1A'}`,
                opacity: 0.85,
              }}/>
          )}
        </div>
        <div className="absolute top-[calc(50%+80px)] text-center px-8 max-w-sm">
          {status === 'error' ? (
            <div className="text-white text-[14px] leading-snug bg-red-500/20 border border-red-400/30 rounded-lg px-4 py-3 backdrop-blur">
              {error}
            </div>
          ) : status === 'starting' ? (
            <div className="text-white/80 text-[14px]">Starting camera…</div>
          ) : (
            <>
              <div className="text-white text-[15px] mb-1 drop-shadow">Point at the barcode on your printer's SN sticker.</div>
              <div className="text-white/70 text-[13px] drop-shadow">Hold steady · tap screen to focus · try the torch in dim light.</div>
            </>
          )}
        </div>
      </div>

      <div className="relative z-10 p-4 pb-[max(1rem,env(safe-area-inset-bottom))] flex flex-col items-stretch gap-2">
        {snapPreview && (
          <div className="self-center flex items-center gap-2 px-2 py-1.5 rounded-lg bg-black/70 backdrop-blur border border-white/15">
            <img src={snapPreview} alt="captured"
              className="w-20 h-14 object-contain rounded bg-black"/>
            <div className="text-white/80 text-[10px] leading-tight font-mono max-w-[180px]">
              <div className="text-white/95 mb-0.5">aim area (what we decoded)</div>
              <div className="text-white/60">blurry / barcode off? re-aim or Upload photo</div>
            </div>
            <button
              onClick={() => setSnapPreview(prev => { if (prev) { try { URL.revokeObjectURL(prev); } catch {} } return null; })}
              className="ml-1 w-6 h-6 rounded-full bg-white/10 text-white/80 grid place-items-center text-[11px]"
              aria-label="Dismiss preview">×</button>
          </div>
        )}
        {lastSeen && (
          <div className="self-center max-w-full px-3 py-1.5 rounded-md bg-black/60 backdrop-blur text-white/90 text-[11px] font-mono truncate">
            last seen: {lastSeen}
          </div>
        )}
        <div className="flex items-center justify-center gap-2 flex-wrap">
          <button
            onClick={() => snapFnRef.current?.()}
            disabled={snapBusy || status !== 'scanning'}
            className="px-5 h-11 rounded-full bg-[var(--s3-orange)] text-white text-[14px] font-semibold active:scale-95 disabled:opacity-50">
            {snapBusy ? 'Reading…' : 'Snap to decode'}
          </button>
          <button
            onClick={() => fileInputRef.current?.click()}
            disabled={snapBusy || status !== 'scanning'}
            className="px-4 h-11 rounded-full bg-blue-500 text-white text-[13px] font-semibold active:scale-95 disabled:opacity-50"
            title="Take a photo with native camera — better autofocus on close-up barcodes">
            Upload photo
          </button>
          <button onClick={onClose}
            className="px-4 h-11 rounded-full bg-white/95 text-neutral-900 text-[14px] font-medium active:scale-95">
            Type it in
          </button>
          <button
            onClick={() => {
              const u = new URL(window.location.href);
              u.searchParams.set('_v', Date.now());
              window.location.replace(u.toString());
            }}
            className="px-4 h-11 rounded-full bg-yellow-300 text-neutral-900 text-[13px] font-semibold active:scale-95"
            title="Force reload to clear cached JS">
            ↻ Force refresh
          </button>
          <input
            ref={fileInputRef}
            type="file"
            accept="image/*"
            capture="environment"
            className="hidden"
            onChange={(e) => {
              const f = e.target.files?.[0];
              if (f) fileDecodeFnRef.current?.(f);
              e.target.value = '';
            }}
          />
        </div>
        <div className="flex items-center justify-center gap-3 text-white/50 text-[10px] font-mono">
          <span>engine: {engine || '—'}</span>
          <span>·</span>
          <span>detections: {detectCountRef.current}</span>
          <span>·</span>
          <button onClick={() => setShowLogs(s => !s)} className="underline underline-offset-2">
            {showLogs ? 'hide' : 'show'} log
          </button>
        </div>
        {showLogs && (
          <div className="max-h-[30vh] overflow-auto rounded-lg bg-black/70 backdrop-blur border border-white/10 p-2 font-mono text-[10px] leading-snug text-white/85">
            {logs.length === 0
              ? <div className="text-white/40">(no events yet)</div>
              : logs.map((l, i) => <div key={i} className="whitespace-pre-wrap break-words">{l}</div>)
            }
          </div>
        )}
      </div>

      <style>{`
        @keyframes pulseCorner {
          0%, 100% { opacity: 1; transform: scale(1); }
          50% { opacity: 0.6; transform: scale(0.95); }
        }
      `}</style>
    </div>
  );
}

function BottomSheet({ open, onClose, title, children }) {
  if (!open) return null;
  return (
    <div className="fixed inset-0 z-30" onClick={onClose}>
      <div className="absolute inset-0 bg-black/50" style={{animation:'fadeIn 180ms ease'}}/>
      <div
        onClick={e => e.stopPropagation()}
        className="absolute left-0 right-0 bottom-0 bg-white rounded-t-2xl max-h-[85%] flex flex-col shadow-2xl"
        style={{ animation: 'slideUp 260ms cubic-bezier(0.22, 1, 0.36, 1)' }}
      >
        <div className="flex items-center justify-center pt-2 pb-1">
          <div className="w-10 h-1 rounded-full bg-neutral-300"/>
        </div>
        <div className="flex items-center justify-between px-5 py-3 border-b border-neutral-100">
          <div className="font-semibold text-[16px] tracking-tight">{title}</div>
          <button onClick={onClose} className="w-8 h-8 rounded-full hover:bg-neutral-100 grid place-items-center">
            <IconX size={18}/>
          </button>
        </div>
        <div className="overflow-auto px-5 py-4 text-[14px] text-neutral-700 leading-relaxed">
          {children}
        </div>
      </div>
    </div>
  );
}

function FilamentIllustration({ size }) {
  const s = size || 52;
  return (
    <svg width={s} height={s} viewBox="0 0 52 52">
      <defs>
        <radialGradient id="spool" cx="0.3" cy="0.3">
          <stop offset="0" stopColor="#FF8A3D"/>
          <stop offset="1" stopColor="#E55500"/>
        </radialGradient>
      </defs>
      <circle cx="26" cy="26" r="20" fill="url(#spool)"/>
      <circle cx="26" cy="26" r="8" fill="#fff"/>
      <circle cx="26" cy="26" r="3" fill="#FF6A1A"/>
      <g stroke="#fff" strokeWidth="0.6" opacity="0.35" fill="none">
        <circle cx="26" cy="26" r="11"/>
        <circle cx="26" cy="26" r="14"/>
        <circle cx="26" cy="26" r="17"/>
      </g>
    </svg>
  );
}

Object.assign(window, {
  Card, CardHeader, Button, Label, Input, Select,
  PhotoTile, ScannerModal, BottomSheet, ContactLinks, FilamentIllustration,
});
