File size: 1,933 Bytes
ac1b201
5eab736
ac1b201
ecfc393
5d7bc4a
ecfc393
 
 
d2a61cf
ecfc393
5eab736
ecfc393
 
 
 
 
d2a61cf
 
ecfc393
5d7bc4a
ac1b201
ecfc393
ac1b201
ecfc393
 
 
ac1b201
 
ecfc393
 
 
 
5d7bc4a
5eab736
 
d2a61cf
5d7bc4a
d2a61cf
 
 
 
 
 
ac1b201
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# Final app.py using FastAPI wrapper

from fastapi import FastAPI
import gradio as gr
import tensorflow as tf
from huggingface_hub import hf_hub_download
import numpy as np
from PIL import Image
import os

# --- 1. Load the Model ---
try:
    model_path = hf_hub_download(
        repo_id="skibi11/leukolook-eye-detector", 
        filename="MobileNetV1_best.keras"
    )
    model = tf.keras.models.load_model(model_path)
    print("--- MODEL LOADED SUCCESSFULLY! ---")
except Exception as e:
    print(f"--- ERROR LOADING MODEL: {e} ---")
    raise RuntimeError(f"Failed to load model: {e}")

# --- 2. Pre-processing & Prediction Logic (remains the same) ---
def preprocess_image(img_pil):
    img = img_pil.resize((224, 224))
    img_array = np.array(img)
    if img_array.ndim == 2: img_array = np.stack((img_array,)*3, axis=-1)
    if img_array.shape[-1] == 4: img_array = img_array[..., :3]
    img_array = img_array / 255.0
    img_array = np.expand_dims(img_array, axis=0)
    return img_array

def predict(image_from_gradio):
    if not isinstance(image_from_gradio, np.ndarray):
        return {"error": "Invalid input type. Expected an image."}
    try:
        pil_image = Image.fromarray(image_from_gradio)
        processed_image = preprocess_image(pil_image)
        prediction = model.predict(processed_image)
        labels = [f"Class_{i}" for i in range(prediction.shape[1])]
        confidences = {label: float(score) for label, score in zip(labels, prediction[0])}
        return confidences
    except Exception as e:
        return {"error": f"Error during prediction: {e}"}

# --- 3. Create the Gradio Interface (without launching) ---
gradio_interface = gr.Interface(
    fn=predict,
    inputs=gr.Image(type="numpy"),
    outputs=gr.JSON(),
    api_name="predict"
)

# --- 4. Create the FastAPI app and mount the Gradio app to it ---
app = FastAPI()
app = gr.mount_gradio_app(app, gradio_interface, path="/")