import { useEffect, useState } from "react"; import { useVLMContext } from "../context/useVLMContext"; import GlassContainer from "./GlassContainer"; import { GLASS_EFFECTS } from "../constants"; interface LoadingScreenProps { onComplete: () => void; } export default function LoadingScreen({ onComplete }: LoadingScreenProps) { const [progress, setProgress] = useState(0); const [currentStep, setCurrentStep] = useState("Initializing..."); const [isError, setIsError] = useState(false); const [hasStartedLoading, setHasStartedLoading] = useState(false); const { loadModel, isLoaded, isLoading } = useVLMContext(); useEffect(() => { // Prevent multiple loading attempts if (hasStartedLoading || isLoading || isLoaded) return; const loadModelAndProgress = async () => { setHasStartedLoading(true); try { setCurrentStep("Checking WebGPU support..."); setProgress(5); // Check for WebGPU support first if (!navigator.gpu) { setCurrentStep("WebGPU not available in this browser"); setIsError(true); return; } // Load the actual AI model await loadModel((message) => { setCurrentStep(message); if (message.includes("Loading processor")) { setProgress(10); } else if (message.includes("Processor loaded")) { setProgress(20); } else if (message.includes("Model loaded")) { setProgress(80); } }); setCurrentStep("Ready to start!"); setProgress(100); // Small delay before completing await new Promise((resolve) => setTimeout(resolve, 300)); onComplete(); } catch (error) { console.error("Error loading model:", error); setCurrentStep(`Error loading model: ${error instanceof Error ? error.message : String(error)}`); setIsError(true); } }; loadModelAndProgress(); }, [hasStartedLoading, isLoading, isLoaded, loadModel, onComplete]); // Handle case where model is already loaded useEffect(() => { if (isLoaded && !hasStartedLoading) { setProgress(100); setCurrentStep("Model already loaded!"); setTimeout(onComplete, 300); } }, [isLoaded, hasStartedLoading, onComplete]); return (
{currentStep}
{Math.round(progress)}% complete