skibi11 commited on
Commit
a4d01ce
·
verified ·
1 Parent(s): c052030

removed the Gradio UI

Browse files
Files changed (1) hide show
  1. app.py +3 -35
app.py CHANGED
@@ -1,18 +1,14 @@
1
- # Final, Complete, and Corrected app.py for Hugging Face Space
2
 
3
  import os
4
  import cv2
5
  import tempfile
6
  import numpy as np
7
  import uvicorn
8
- import requests
9
- import base64
10
- import io
11
  from PIL import Image
12
  from inference_sdk import InferenceHTTPClient
13
  from fastapi import FastAPI, File, UploadFile
14
  from fastapi.responses import JSONResponse
15
- import gradio as gr
16
  import tensorflow as tf
17
  from huggingface_hub import hf_hub_download
18
 
@@ -52,7 +48,6 @@ def detect_eyes_roboflow(image_path, raw_image):
52
  crops.append(crop)
53
  return crops
54
 
55
- # --- ADDED MISSING FUNCTION ---
56
  def get_largest_iris_prediction(eye_crop):
57
  is_success, buffer = cv2.imencode(".jpg", eye_crop)
58
  if not is_success: return None
@@ -61,7 +56,7 @@ def get_largest_iris_prediction(eye_crop):
61
  return max(preds, key=lambda p: p["width"] * p["height"]) if preds else None
62
 
63
  def run_leukocoria_prediction(iris_crop):
64
- if leuko_model is None: return {"error": "Leukocoria model not loaded"}
65
 
66
  img_pil = Image.fromarray(cv2.cvtColor(iris_crop, cv2.COLOR_BGR2RGB))
67
  enh = enhance_image_unsharp_mask(np.array(img_pil))
@@ -115,33 +110,6 @@ async def full_detection_pipeline(image: UploadFile = File(...)):
115
  finally:
116
  os.remove(temp_image_path)
117
 
118
- # --- 4. Create and Mount the Gradio UI for a professional homepage ---
119
- def gradio_wrapper(image_array):
120
- """A wrapper function to call our own FastAPI endpoint from the Gradio UI."""
121
- try:
122
- pil_image = Image.fromarray(image_array)
123
- with io.BytesIO() as buffer:
124
- pil_image.save(buffer, format="JPEG")
125
- files = {'image': ('image.jpg', buffer.getvalue(), 'image/jpeg')}
126
- response = requests.post("http://127.0.0.1:7860/detect/", files=files)
127
-
128
- if response.status_code == 200:
129
- return response.json()
130
- else:
131
- return {"error": f"API Error {response.status_code}", "details": response.text}
132
- except Exception as e:
133
- return {"error": str(e)}
134
-
135
- gradio_ui = gr.Interface(
136
- fn=gradio_wrapper,
137
- inputs=gr.Image(type="numpy", label="Upload an eye image to test the full pipeline"),
138
- outputs=gr.JSON(label="Analysis Results"),
139
- title="LeukoLook Eye Detector",
140
- description="A demonstration of the LeukoLook detection model pipeline."
141
- )
142
-
143
- app = gr.mount_gradio_app(app, gradio_ui, path="/")
144
-
145
- # --- 5. Run the server ---
146
  if __name__ == "__main__":
147
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ # Final, API-only app.py for Hugging Face Space
2
 
3
  import os
4
  import cv2
5
  import tempfile
6
  import numpy as np
7
  import uvicorn
 
 
 
8
  from PIL import Image
9
  from inference_sdk import InferenceHTTPClient
10
  from fastapi import FastAPI, File, UploadFile
11
  from fastapi.responses import JSONResponse
 
12
  import tensorflow as tf
13
  from huggingface_hub import hf_hub_download
14
 
 
48
  crops.append(crop)
49
  return crops
50
 
 
51
  def get_largest_iris_prediction(eye_crop):
52
  is_success, buffer = cv2.imencode(".jpg", eye_crop)
53
  if not is_success: return None
 
56
  return max(preds, key=lambda p: p["width"] * p["height"]) if preds else None
57
 
58
  def run_leukocoria_prediction(iris_crop):
59
+ if leuko_model is None: return {"error": "Leukocoria model not loaded"}, 0.0
60
 
61
  img_pil = Image.fromarray(cv2.cvtColor(iris_crop, cv2.COLOR_BGR2RGB))
62
  enh = enhance_image_unsharp_mask(np.array(img_pil))
 
110
  finally:
111
  os.remove(temp_image_path)
112
 
113
+ # --- 4. Run the Server ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  if __name__ == "__main__":
115
  uvicorn.run(app, host="0.0.0.0", port=7860)