Spaces:
Sleeping
Sleeping
update app v2
Browse files
app.py
CHANGED
@@ -1,261 +1,303 @@
|
|
1 |
import gradio as gr
|
2 |
import cv2
|
|
|
3 |
import pandas as pd
|
|
|
|
|
4 |
import os
|
5 |
import tempfile
|
6 |
-
import
|
7 |
-
|
8 |
-
import
|
9 |
-
import numpy as np
|
10 |
-
|
11 |
-
# Global variables to store detector
|
12 |
-
detector = None
|
13 |
-
detector_loaded = False
|
14 |
-
|
15 |
-
def load_video_detector():
|
16 |
-
"""Load FER detector with error handling"""
|
17 |
-
global detector, detector_loaded
|
18 |
-
|
19 |
-
if detector_loaded:
|
20 |
-
return detector
|
21 |
-
|
22 |
-
try:
|
23 |
-
from fer import FER
|
24 |
-
detector = FER(mtcnn=True)
|
25 |
-
detector_loaded = True
|
26 |
-
return detector
|
27 |
-
except ImportError as e:
|
28 |
-
raise Exception(f"Failed to import FER: {e}")
|
29 |
-
except Exception as e:
|
30 |
-
raise Exception(f"Failed to initialize FER detector: {e}")
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
37 |
try:
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
except Exception as e:
|
40 |
-
|
|
|
|
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
return "Could not open video file", None, None, None
|
48 |
-
|
49 |
-
emotions = []
|
50 |
-
frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
|
51 |
-
|
52 |
-
# Handle cases where frame rate detection fails
|
53 |
-
if frame_rate <= 0:
|
54 |
-
frame_rate = 30
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
progress(progress_val, desc=f"Analyzing frame {frame_count}/{total_frames}")
|
70 |
|
71 |
try:
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
except Exception as e:
|
77 |
-
print(f"
|
78 |
continue
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
error_msg = f"Error during video analysis: {e}\nTraceback: {traceback.format_exc()}"
|
126 |
-
return error_msg, None, None, None
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
if video_file is None:
|
131 |
-
return "Please upload a video file to analyze facial emotions.", None
|
132 |
-
|
133 |
-
# Get file info
|
134 |
-
file_size = os.path.getsize(video_file) / (1024 * 1024) # MB
|
135 |
-
file_info = f"File uploaded successfully: {os.path.basename(video_file)}\nFile size: {file_size:.2f} MB\n\nAnalyzing facial emotions in video... This may take a few minutes.\n\n"
|
136 |
-
|
137 |
-
try:
|
138 |
-
results_text, plot, emotion_counts, dominant_emotion = analyze_video_emotions(video_file)
|
139 |
-
return file_info + results_text, plot
|
140 |
-
|
141 |
-
except Exception as e:
|
142 |
-
error_msg = f"Analysis failed: {e}\nPlease try with a different video file or check the file format."
|
143 |
-
return file_info + error_msg, None
|
144 |
|
|
|
145 |
def create_interface():
|
146 |
-
""
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
margin-bottom: 2rem;
|
157 |
-
}
|
158 |
-
"""
|
159 |
-
|
160 |
-
with gr.Blocks(css=css, title="Video Emotion Detection", theme=gr.themes.Soft()) as iface:
|
161 |
-
|
162 |
-
# Header
|
163 |
-
gr.HTML("""
|
164 |
-
<div class="main-header">
|
165 |
-
<h1>π Video Emotion Detection</h1>
|
166 |
-
<p>Upload a video file to analyze facial emotions using advanced AI</p>
|
167 |
-
</div>
|
168 |
-
""")
|
169 |
|
170 |
-
with gr.
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
process_btn = gr.Button("Analyze Video", variant="primary", size="lg")
|
181 |
-
|
182 |
-
# Info section
|
183 |
-
with gr.Accordion("βΉοΈ About this app", open=False):
|
184 |
-
gr.Markdown("""
|
185 |
-
### How it works:
|
186 |
-
- **Facial Detection**: Uses MTCNN for face detection
|
187 |
-
- **Emotion Recognition**: Analyzes facial expressions using FER (Facial Emotion Recognition)
|
188 |
-
- **Sampling**: Analyzes frames every 2 seconds for efficiency
|
189 |
-
- **Supported Formats**: MP4, AVI, MOV
|
190 |
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
# Results section
|
200 |
-
results_output = gr.Textbox(
|
201 |
-
label="Analysis Results",
|
202 |
-
lines=15,
|
203 |
-
max_lines=20,
|
204 |
-
interactive=False,
|
205 |
-
placeholder="Upload a video and click 'Analyze Video' to see results here..."
|
206 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
fn=process_video,
|
214 |
-
inputs=[video_input],
|
215 |
-
outputs=[results_output, plot_output],
|
216 |
-
show_progress=True
|
217 |
-
)
|
218 |
|
219 |
-
#
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
)
|
225 |
-
|
226 |
-
# Footer
|
227 |
-
gr.HTML("""
|
228 |
-
<div style="text-align: center; margin-top: 2rem; padding: 1rem; background-color: #f8f9fa; border-radius: 0.5rem;">
|
229 |
-
<p><strong>Video Emotion Detection App</strong> - Powered by FER and MTCNN</p>
|
230 |
-
<p>Upload your video files and get detailed emotion analysis with visualizations</p>
|
231 |
-
</div>
|
232 |
-
""")
|
233 |
-
|
234 |
-
return iface
|
235 |
-
|
236 |
-
def main():
|
237 |
-
"""Main function to launch the app"""
|
238 |
-
# Initialize detector on startup
|
239 |
-
try:
|
240 |
-
print("Loading emotion detection model...")
|
241 |
-
load_video_detector()
|
242 |
-
print("Model loaded successfully!")
|
243 |
-
except Exception as e:
|
244 |
-
print(f"Warning: Could not pre-load detector: {e}")
|
245 |
-
print("Detector will be loaded when first video is processed.")
|
246 |
-
|
247 |
-
# Create and launch interface
|
248 |
-
iface = create_interface()
|
249 |
|
250 |
-
|
251 |
-
iface.launch(
|
252 |
-
server_name="0.0.0.0", # Allow external access
|
253 |
-
server_port=7860, # Default Gradio port
|
254 |
-
share=False, # Set to True to create public link
|
255 |
-
debug=False,
|
256 |
-
show_error=True,
|
257 |
-
inbrowser=True # Auto-open in browser
|
258 |
-
)
|
259 |
|
|
|
260 |
if __name__ == "__main__":
|
261 |
-
|
|
|
|
1 |
import gradio as gr
|
2 |
import cv2
|
3 |
+
import numpy as np
|
4 |
import pandas as pd
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
from deepface import DeepFace
|
7 |
import os
|
8 |
import tempfile
|
9 |
+
from PIL import Image
|
10 |
+
import io
|
11 |
+
import base64
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
class EmotionDetector:
|
14 |
+
def __init__(self):
|
15 |
+
self.emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
16 |
+
|
17 |
+
def detect_emotions_image(self, image):
|
18 |
+
"""Detect emotions in a single image"""
|
19 |
try:
|
20 |
+
if image is None:
|
21 |
+
return None, "No image provided"
|
22 |
+
|
23 |
+
# Convert PIL Image to numpy array if needed
|
24 |
+
if isinstance(image, Image.Image):
|
25 |
+
image = np.array(image)
|
26 |
+
|
27 |
+
# Convert RGB to BGR for OpenCV
|
28 |
+
if len(image.shape) == 3 and image.shape[2] == 3:
|
29 |
+
image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
30 |
+
else:
|
31 |
+
image_bgr = image
|
32 |
+
|
33 |
+
# Save temporary image for DeepFace
|
34 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
|
35 |
+
cv2.imwrite(tmp_file.name, image_bgr)
|
36 |
+
temp_path = tmp_file.name
|
37 |
+
|
38 |
+
try:
|
39 |
+
# Analyze emotions using DeepFace
|
40 |
+
result = DeepFace.analyze(
|
41 |
+
img_path=temp_path,
|
42 |
+
actions=['emotion'],
|
43 |
+
enforce_detection=False,
|
44 |
+
detector_backend='opencv'
|
45 |
+
)
|
46 |
+
|
47 |
+
# Handle both single face and multiple faces results
|
48 |
+
if isinstance(result, list):
|
49 |
+
emotions_data = result[0]['emotion']
|
50 |
+
else:
|
51 |
+
emotions_data = result['emotion']
|
52 |
+
|
53 |
+
# Create emotion chart
|
54 |
+
emotion_df = pd.DataFrame(list(emotions_data.items()),
|
55 |
+
columns=['Emotion', 'Confidence'])
|
56 |
+
emotion_df = emotion_df.sort_values('Confidence', ascending=True)
|
57 |
+
|
58 |
+
# Create matplotlib plot
|
59 |
+
plt.figure(figsize=(10, 6))
|
60 |
+
bars = plt.barh(emotion_df['Emotion'], emotion_df['Confidence'])
|
61 |
+
plt.xlabel('Confidence (%)')
|
62 |
+
plt.title('Emotion Detection Results')
|
63 |
+
plt.grid(axis='x', alpha=0.3)
|
64 |
+
|
65 |
+
# Color bars based on emotion
|
66 |
+
colors = {
|
67 |
+
'happy': '#FFD700',
|
68 |
+
'sad': '#4169E1',
|
69 |
+
'angry': '#DC143C',
|
70 |
+
'fear': '#800080',
|
71 |
+
'surprise': '#FF8C00',
|
72 |
+
'disgust': '#228B22',
|
73 |
+
'neutral': '#708090'
|
74 |
+
}
|
75 |
+
|
76 |
+
for bar, emotion in zip(bars, emotion_df['Emotion']):
|
77 |
+
bar.set_color(colors.get(emotion, '#708090'))
|
78 |
+
|
79 |
+
plt.tight_layout()
|
80 |
+
|
81 |
+
# Save plot to bytes
|
82 |
+
img_buffer = io.BytesIO()
|
83 |
+
plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight')
|
84 |
+
img_buffer.seek(0)
|
85 |
+
plt.close()
|
86 |
+
|
87 |
+
# Convert to PIL Image
|
88 |
+
chart_image = Image.open(img_buffer)
|
89 |
+
|
90 |
+
# Get dominant emotion
|
91 |
+
dominant_emotion = max(emotions_data, key=emotions_data.get)
|
92 |
+
confidence = emotions_data[dominant_emotion]
|
93 |
+
|
94 |
+
result_text = f"**Dominant Emotion:** {dominant_emotion.title()}\n"
|
95 |
+
result_text += f"**Confidence:** {confidence:.1f}%\n\n"
|
96 |
+
result_text += "**All Emotions:**\n"
|
97 |
+
|
98 |
+
for emotion, conf in sorted(emotions_data.items(), key=lambda x: x[1], reverse=True):
|
99 |
+
result_text += f"β’ {emotion.title()}: {conf:.1f}%\n"
|
100 |
+
|
101 |
+
return chart_image, result_text
|
102 |
+
|
103 |
+
finally:
|
104 |
+
# Clean up temporary file
|
105 |
+
if os.path.exists(temp_path):
|
106 |
+
os.unlink(temp_path)
|
107 |
+
|
108 |
except Exception as e:
|
109 |
+
error_msg = f"Error analyzing image: {str(e)}"
|
110 |
+
print(error_msg) # For debugging
|
111 |
+
return None, error_msg
|
112 |
|
113 |
+
def detect_emotions_video(self, video_path, sample_rate=30):
|
114 |
+
"""Detect emotions in video by sampling frames"""
|
115 |
+
try:
|
116 |
+
if video_path is None:
|
117 |
+
return None, "No video provided"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
+
cap = cv2.VideoCapture(video_path)
|
120 |
+
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
121 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
122 |
+
|
123 |
+
if frame_count == 0:
|
124 |
+
return None, "Invalid video file"
|
125 |
+
|
126 |
+
# Sample frames every 'sample_rate' frames
|
127 |
+
frame_indices = range(0, frame_count, sample_rate)
|
128 |
+
emotions_over_time = []
|
129 |
+
|
130 |
+
for frame_idx in frame_indices:
|
131 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
|
132 |
+
ret, frame = cap.read()
|
133 |
|
134 |
+
if not ret:
|
135 |
+
continue
|
|
|
136 |
|
137 |
try:
|
138 |
+
# Save frame temporarily
|
139 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
|
140 |
+
cv2.imwrite(tmp_file.name, frame)
|
141 |
+
temp_path = tmp_file.name
|
142 |
+
|
143 |
+
# Analyze frame
|
144 |
+
result = DeepFace.analyze(
|
145 |
+
img_path=temp_path,
|
146 |
+
actions=['emotion'],
|
147 |
+
enforce_detection=False,
|
148 |
+
detector_backend='opencv'
|
149 |
+
)
|
150 |
+
|
151 |
+
if isinstance(result, list):
|
152 |
+
emotions_data = result[0]['emotion']
|
153 |
+
else:
|
154 |
+
emotions_data = result['emotion']
|
155 |
+
|
156 |
+
# Add timestamp
|
157 |
+
timestamp = frame_idx / fps
|
158 |
+
emotions_data['timestamp'] = timestamp
|
159 |
+
emotions_over_time.append(emotions_data)
|
160 |
+
|
161 |
+
# Clean up
|
162 |
+
os.unlink(temp_path)
|
163 |
+
|
164 |
except Exception as e:
|
165 |
+
print(f"Error processing frame {frame_idx}: {e}")
|
166 |
continue
|
167 |
|
168 |
+
cap.release()
|
169 |
+
|
170 |
+
if not emotions_over_time:
|
171 |
+
return None, "No emotions detected in video"
|
172 |
+
|
173 |
+
# Create DataFrame for plotting
|
174 |
+
df = pd.DataFrame(emotions_over_time)
|
175 |
+
|
176 |
+
# Plot emotions over time
|
177 |
+
plt.figure(figsize=(12, 8))
|
178 |
+
|
179 |
+
for emotion in self.emotions:
|
180 |
+
if emotion in df.columns:
|
181 |
+
plt.plot(df['timestamp'], df[emotion], label=emotion.title(), linewidth=2)
|
182 |
+
|
183 |
+
plt.xlabel('Time (seconds)')
|
184 |
+
plt.ylabel('Confidence (%)')
|
185 |
+
plt.title('Emotions Over Time')
|
186 |
+
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
|
187 |
+
plt.grid(True, alpha=0.3)
|
188 |
+
plt.tight_layout()
|
189 |
+
|
190 |
+
# Save plot
|
191 |
+
img_buffer = io.BytesIO()
|
192 |
+
plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight')
|
193 |
+
img_buffer.seek(0)
|
194 |
+
plt.close()
|
195 |
+
|
196 |
+
chart_image = Image.open(img_buffer)
|
197 |
+
|
198 |
+
# Calculate average emotions
|
199 |
+
avg_emotions = df[self.emotions].mean().sort_values(ascending=False)
|
200 |
+
|
201 |
+
result_text = f"**Video Analysis Complete**\n"
|
202 |
+
result_text += f"**Frames Analyzed:** {len(emotions_over_time)}\n"
|
203 |
+
result_text += f"**Duration:** {df['timestamp'].max():.1f} seconds\n\n"
|
204 |
+
result_text += "**Average Emotions:**\n"
|
205 |
+
|
206 |
+
for emotion, confidence in avg_emotions.items():
|
207 |
+
result_text += f"β’ {emotion.title()}: {confidence:.1f}%\n"
|
208 |
+
|
209 |
+
return chart_image, result_text
|
210 |
+
|
211 |
+
except Exception as e:
|
212 |
+
return None, f"Error processing video: {str(e)}"
|
|
|
|
|
213 |
|
214 |
+
# Initialize detector
|
215 |
+
detector = EmotionDetector()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
|
217 |
+
# Create Gradio interface
|
218 |
def create_interface():
|
219 |
+
with gr.Blocks(title="Emotion Detection App", theme=gr.themes.Soft()) as demo:
|
220 |
+
gr.Markdown(
|
221 |
+
"""
|
222 |
+
# π Emotion Detection App
|
223 |
+
|
224 |
+
Upload an image or video to detect emotions using AI. This app uses DeepFace for accurate emotion recognition.
|
225 |
+
|
226 |
+
**Supported emotions:** Happy, Sad, Angry, Fear, Surprise, Disgust, Neutral
|
227 |
+
"""
|
228 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
|
230 |
+
with gr.Tabs():
|
231 |
+
# Image Analysis Tab
|
232 |
+
with gr.Tab("πΈ Image Analysis"):
|
233 |
+
with gr.Row():
|
234 |
+
with gr.Column():
|
235 |
+
image_input = gr.Image(
|
236 |
+
label="Upload Image",
|
237 |
+
type="pil"
|
238 |
+
)
|
239 |
+
image_button = gr.Button("Analyze Emotions", variant="primary")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
240 |
|
241 |
+
with gr.Column():
|
242 |
+
image_chart = gr.Image(label="Emotion Chart")
|
243 |
+
image_results = gr.Markdown(label="Results")
|
244 |
+
|
245 |
+
image_button.click(
|
246 |
+
fn=detector.detect_emotions_image,
|
247 |
+
inputs=[image_input],
|
248 |
+
outputs=[image_chart, image_results]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
)
|
250 |
+
|
251 |
+
# Video Analysis Tab
|
252 |
+
with gr.Tab("π₯ Video Analysis"):
|
253 |
+
with gr.Row():
|
254 |
+
with gr.Column():
|
255 |
+
video_input = gr.Video(label="Upload Video")
|
256 |
+
with gr.Row():
|
257 |
+
sample_rate = gr.Slider(
|
258 |
+
minimum=10,
|
259 |
+
maximum=60,
|
260 |
+
value=30,
|
261 |
+
step=5,
|
262 |
+
label="Frame Sampling Rate"
|
263 |
+
)
|
264 |
+
video_button = gr.Button("Analyze Video", variant="primary")
|
265 |
+
|
266 |
+
with gr.Column():
|
267 |
+
video_chart = gr.Image(label="Emotions Over Time")
|
268 |
+
video_results = gr.Markdown(label="Results")
|
269 |
|
270 |
+
video_button.click(
|
271 |
+
fn=detector.detect_emotions_video,
|
272 |
+
inputs=[video_input, sample_rate],
|
273 |
+
outputs=[video_chart, video_results]
|
274 |
+
)
|
|
|
|
|
|
|
|
|
|
|
275 |
|
276 |
+
# Examples
|
277 |
+
gr.Markdown("### π Instructions")
|
278 |
+
gr.Markdown(
|
279 |
+
"""
|
280 |
+
**For Images:**
|
281 |
+
- Upload any image with visible faces
|
282 |
+
- The app will detect and analyze emotions
|
283 |
+
- Results show confidence percentages for each emotion
|
284 |
+
|
285 |
+
**For Videos:**
|
286 |
+
- Upload video files (MP4, AVI, MOV, etc.)
|
287 |
+
- Adjust frame sampling rate to balance speed vs accuracy
|
288 |
+
- Lower values = more frames analyzed = more accurate but slower
|
289 |
+
- Higher values = fewer frames analyzed = faster but less detailed
|
290 |
+
|
291 |
+
**Tips:**
|
292 |
+
- Ensure faces are clearly visible and well-lit
|
293 |
+
- The app works best with front-facing faces
|
294 |
+
- Multiple faces in one image/video are supported
|
295 |
+
"""
|
296 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
|
298 |
+
return demo
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
|
300 |
+
# Launch the app
|
301 |
if __name__ == "__main__":
|
302 |
+
demo = create_interface()
|
303 |
+
demo.launch()
|