Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,30 +4,123 @@ import os
|
|
4 |
|
5 |
BACKEND_URL = os.environ.get("BACKEND_URL", "").strip()
|
6 |
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
try:
|
9 |
-
|
10 |
-
demo = gr.Interface.load(
|
11 |
-
name=BACKEND_URL,
|
12 |
-
title="PrimateFace Detection, Pose & Gaze Demo",
|
13 |
-
description="Connected to GPU backend server"
|
14 |
-
)
|
15 |
-
# Add header to bypass ngrok warning
|
16 |
-
demo.headers = {"ngrok-skip-browser-warning": "true"}
|
17 |
except Exception as e:
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
)
|
25 |
-
|
26 |
-
|
27 |
-
fn=lambda x: "Backend URL not configured in Space settings.",
|
28 |
-
inputs=gr.Textbox(),
|
29 |
-
outputs=gr.Textbox(),
|
30 |
-
title="PrimateFace Demo - Not Configured"
|
31 |
-
)
|
32 |
|
33 |
demo.launch()
|
|
|
4 |
|
5 |
BACKEND_URL = os.environ.get("BACKEND_URL", "").strip()
|
6 |
|
7 |
+
# Create persistent client
|
8 |
+
try:
|
9 |
+
client = Client(BACKEND_URL, headers={"ngrok-skip-browser-warning": "true"})
|
10 |
+
backend_available = True
|
11 |
+
except:
|
12 |
+
client = None
|
13 |
+
backend_available = False
|
14 |
+
|
15 |
+
def forward_to_backend(fn_name, *args):
|
16 |
+
"""Generic function to forward any call to backend"""
|
17 |
+
if not client:
|
18 |
+
return [gr.update() for _ in range(7)]
|
19 |
try:
|
20 |
+
return client.predict(*args, api_name=fn_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
except Exception as e:
|
22 |
+
print(f"Error calling {fn_name}: {e}")
|
23 |
+
return [gr.update() for _ in range(7)]
|
24 |
+
|
25 |
+
# Wrapper functions
|
26 |
+
def handle_file_upload_preview(file_obj):
|
27 |
+
return forward_to_backend("/handle_file_upload_preview", file_obj)
|
28 |
+
|
29 |
+
def handle_webcam_capture(snapshot):
|
30 |
+
return forward_to_backend("/handle_webcam_capture", snapshot)
|
31 |
+
|
32 |
+
def process_media(file_obj, webcam_img, model_type, conf_thresh, max_dets, task_type):
|
33 |
+
return forward_to_backend("/process_media", file_obj, webcam_img, model_type, conf_thresh, max_dets, task_type)
|
34 |
+
|
35 |
+
def clear_all_media_and_outputs():
|
36 |
+
return forward_to_backend("/clear_all_media_and_outputs")
|
37 |
+
|
38 |
+
def handle_example_select(evt: gr.SelectData):
|
39 |
+
"""Handle example selection locally and update input_file"""
|
40 |
+
# Extract the image path from the dataset
|
41 |
+
if isinstance(evt.value, dict) and 'image' in evt.value:
|
42 |
+
return evt.value['image']
|
43 |
+
return None
|
44 |
+
|
45 |
+
# Build the interface
|
46 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
47 |
+
gr.Markdown("<center><h1>PrimateFace Detection, Pose Estimation, and Gaze Estimation Demo</h1></center>")
|
48 |
+
|
49 |
+
if not backend_available:
|
50 |
+
gr.Markdown("### 🔴 GPU Server Offline - Please check back later")
|
51 |
+
else:
|
52 |
+
gr.Markdown("Upload an image/video or use your webcam. For webcam, press 'Enter' to take a snapshot.")
|
53 |
+
gr.Markdown("Click 'Detect Faces' for results.")
|
54 |
+
|
55 |
+
with gr.Row():
|
56 |
+
with gr.Column(scale=1):
|
57 |
+
with gr.Tabs():
|
58 |
+
with gr.TabItem("Upload File"):
|
59 |
+
input_file = gr.File(label="Upload Image or Video Here", file_types=["image", ".mp4", ".avi", ".mov", ".mkv", ".webm", ".gif"])
|
60 |
+
display_raw_image_file = gr.Image(label="Raw Image Preview", type="pil", interactive=False, visible=False)
|
61 |
+
display_raw_video_file = gr.Video(label="Raw Video Preview", interactive=False, visible=False)
|
62 |
+
|
63 |
+
with gr.TabItem("Webcam"):
|
64 |
+
gr.Markdown("**Using the Webcam:** Click on feed or press Enter to capture")
|
65 |
+
input_webcam = gr.Image(sources=["webcam"], type="pil", label="Live Webcam")
|
66 |
+
display_raw_image_webcam = gr.Image(label="Captured Snapshot Preview", type="pil", interactive=False, visible=False)
|
67 |
+
|
68 |
+
clear_all_button = gr.Button("Clear All Inputs & Outputs")
|
69 |
+
|
70 |
+
with gr.Column(scale=1):
|
71 |
+
gr.Markdown("### Processed Output")
|
72 |
+
display_processed_image = gr.Image(label="Processed Image", type="pil", interactive=False, visible=False)
|
73 |
+
display_processed_video = gr.Video(label="Processed Video", interactive=False, visible=False)
|
74 |
+
|
75 |
+
# Example images - host them on HF Space
|
76 |
+
example_paths = [
|
77 |
+
"images/allocebus_000003.jpeg",
|
78 |
+
"images/tarsius_000120.jpeg",
|
79 |
+
"images/nasalis_proboscis-monkey.png",
|
80 |
+
"images/macaca_000032.jpeg",
|
81 |
+
"images/mandrillus_000011.jpeg",
|
82 |
+
"images/pongo_000006.jpeg"
|
83 |
+
]
|
84 |
+
|
85 |
+
example_dataset = gr.Dataset(
|
86 |
+
components=["image"],
|
87 |
+
samples=[[path] for path in example_paths],
|
88 |
+
label="Example Images (Click to use)",
|
89 |
+
samples_per_page=6
|
90 |
+
)
|
91 |
+
|
92 |
+
submit_button = gr.Button("Detect Faces", variant="primary", scale=2)
|
93 |
+
|
94 |
+
with gr.Column():
|
95 |
+
gr.Markdown("### Detection Controls")
|
96 |
+
model_choice_radio = gr.Radio(choices=["MMDetection"], value="MMDetection", label="Inferencer", visible=False)
|
97 |
+
task_type_dropdown = gr.Dropdown(
|
98 |
+
choices=["Face Detection", "Face Pose Estimation", "Gaze Estimation [experimental]"],
|
99 |
+
value="Face Detection",
|
100 |
+
label="Select Task"
|
101 |
+
)
|
102 |
+
conf_slider = gr.Slider(minimum=0.05, maximum=0.95, value=0.25, step=0.05, label="Confidence Threshold")
|
103 |
+
max_det_slider = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Max Detections")
|
104 |
+
|
105 |
+
# Define outputs
|
106 |
+
file_preview_outputs = [display_raw_image_file, display_raw_video_file, input_file, display_processed_image, display_processed_video]
|
107 |
+
webcam_outputs = [display_raw_image_webcam, input_webcam, display_processed_image, display_processed_video]
|
108 |
+
process_outputs = [display_raw_image_file, display_raw_video_file, display_raw_image_webcam, display_processed_image, display_processed_video]
|
109 |
+
clear_outputs = [input_file, input_webcam, display_raw_image_file, display_raw_video_file, display_raw_image_webcam, display_processed_image, display_processed_video]
|
110 |
+
|
111 |
+
# Wire events
|
112 |
+
input_file.change(handle_file_upload_preview, inputs=[input_file], outputs=file_preview_outputs)
|
113 |
+
input_webcam.change(handle_webcam_capture, inputs=[input_webcam], outputs=webcam_outputs)
|
114 |
+
|
115 |
+
# Handle example selection
|
116 |
+
example_dataset.select(handle_example_select, outputs=[input_file])
|
117 |
+
|
118 |
+
submit_button.click(
|
119 |
+
process_media,
|
120 |
+
inputs=[input_file, display_raw_image_webcam, model_choice_radio, conf_slider, max_det_slider, task_type_dropdown],
|
121 |
+
outputs=process_outputs
|
122 |
)
|
123 |
+
|
124 |
+
clear_all_button.click(clear_all_media_and_outputs, outputs=clear_outputs)
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
demo.launch()
|