skibi11 commited on
Commit
ac1b201
·
verified ·
1 Parent(s): 449f537

creates a FastAPI server and "mounts" our Gradio app inside

Browse files
Files changed (1) hide show
  1. app.py +19 -23
app.py CHANGED
@@ -1,5 +1,6 @@
1
- # Final robust app.py for your Hugging Face Space
2
 
 
3
  import gradio as gr
4
  import tensorflow as tf
5
  from huggingface_hub import hf_hub_download
@@ -8,7 +9,6 @@ from PIL import Image
8
  import os
9
 
10
  # --- 1. Load the Model ---
11
- model = None
12
  try:
13
  model_path = hf_hub_download(
14
  repo_id="skibi11/leukolook-eye-detector",
@@ -18,43 +18,39 @@ try:
18
  print("--- MODEL LOADED SUCCESSFULLY! ---")
19
  except Exception as e:
20
  print(f"--- ERROR LOADING MODEL: {e} ---")
21
- raise gr.Error(f"Failed to load model: {e}")
22
 
23
- # --- 2. Pre-processing Logic ---
24
  def preprocess_image(img_pil):
25
  img = img_pil.resize((224, 224))
26
  img_array = np.array(img)
27
- if img_array.ndim == 2:
28
- img_array = np.stack((img_array,)*3, axis=-1)
29
- if img_array.shape[-1] == 4:
30
- img_array = img_array[..., :3]
31
  img_array = img_array / 255.0
32
  img_array = np.expand_dims(img_array, axis=0)
33
  return img_array
34
 
35
- # --- 3. Prediction Logic ---
36
  def predict(image_from_gradio):
37
  if not isinstance(image_from_gradio, np.ndarray):
38
  return {"error": "Invalid input type. Expected an image."}
39
-
40
  try:
41
  pil_image = Image.fromarray(image_from_gradio)
42
  processed_image = preprocess_image(pil_image)
43
  prediction = model.predict(processed_image)
44
-
45
  labels = [f"Class_{i}" for i in range(prediction.shape[1])]
46
  confidences = {label: float(score) for label, score in zip(labels, prediction[0])}
47
  return confidences
48
  except Exception as e:
49
- raise gr.Error(f"Error during prediction: {e}")
50
-
51
- # --- 4. Create Gradio Interface using gr.Blocks for stability ---
52
- with gr.Blocks() as demo:
53
- gr.Interface(
54
- fn=predict,
55
- inputs=gr.Image(type="numpy"),
56
- outputs=gr.JSON(),
57
- api_name="predict"
58
- )
59
-
60
- demo.launch()
 
 
1
+ # Final app.py using FastAPI wrapper
2
 
3
+ from fastapi import FastAPI
4
  import gradio as gr
5
  import tensorflow as tf
6
  from huggingface_hub import hf_hub_download
 
9
  import os
10
 
11
  # --- 1. Load the Model ---
 
12
  try:
13
  model_path = hf_hub_download(
14
  repo_id="skibi11/leukolook-eye-detector",
 
18
  print("--- MODEL LOADED SUCCESSFULLY! ---")
19
  except Exception as e:
20
  print(f"--- ERROR LOADING MODEL: {e} ---")
21
+ raise RuntimeError(f"Failed to load model: {e}")
22
 
23
+ # --- 2. Pre-processing & Prediction Logic (remains the same) ---
24
  def preprocess_image(img_pil):
25
  img = img_pil.resize((224, 224))
26
  img_array = np.array(img)
27
+ if img_array.ndim == 2: img_array = np.stack((img_array,)*3, axis=-1)
28
+ if img_array.shape[-1] == 4: img_array = img_array[..., :3]
 
 
29
  img_array = img_array / 255.0
30
  img_array = np.expand_dims(img_array, axis=0)
31
  return img_array
32
 
 
33
  def predict(image_from_gradio):
34
  if not isinstance(image_from_gradio, np.ndarray):
35
  return {"error": "Invalid input type. Expected an image."}
 
36
  try:
37
  pil_image = Image.fromarray(image_from_gradio)
38
  processed_image = preprocess_image(pil_image)
39
  prediction = model.predict(processed_image)
 
40
  labels = [f"Class_{i}" for i in range(prediction.shape[1])]
41
  confidences = {label: float(score) for label, score in zip(labels, prediction[0])}
42
  return confidences
43
  except Exception as e:
44
+ return {"error": f"Error during prediction: {e}"}
45
+
46
+ # --- 3. Create the Gradio Interface (without launching) ---
47
+ gradio_interface = gr.Interface(
48
+ fn=predict,
49
+ inputs=gr.Image(type="numpy"),
50
+ outputs=gr.JSON(),
51
+ api_name="predict"
52
+ )
53
+
54
+ # --- 4. Create the FastAPI app and mount the Gradio app to it ---
55
+ app = FastAPI()
56
+ app = gr.mount_gradio_app(app, gradio_interface, path="/")