Xalphinions commited on
Commit
945fdb4
·
verified ·
1 Parent(s): 83d5b74

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +282 -150
app.py CHANGED
@@ -78,165 +78,297 @@ def app_process_audio_data(waveform, sample_rate):
78
  # Similarly for images, but let's import the original one
79
  from preprocess import process_image_data
80
 
81
- # Define prediction function
82
- def predict_sweetness(audio, image, model_path):
83
- """Predict sweetness of a watermelon from audio and image input"""
84
- try:
85
- # Now check CUDA availability inside the GPU-decorated function
86
- if torch.cuda.is_available():
87
- device = torch.device("cuda")
88
- print(f"\033[92mINFO\033[0m: CUDA is available. Using device: {device}")
89
- else:
90
- device = torch.device("cpu")
91
- print(f"\033[92mINFO\033[0m: CUDA is not available. Using device: {device}")
92
-
93
- # Load model inside the function to ensure it's on the correct device
94
- model = WatermelonModel().to(device)
95
- model.load_state_dict(torch.load(model_path, map_location=device))
96
- model.eval()
97
- print(f"\033[92mINFO\033[0m: Loaded model from {model_path}")
98
-
99
- # Debug information about input types
100
- print(f"\033[92mDEBUG\033[0m: Audio input type: {type(audio)}")
101
- print(f"\033[92mDEBUG\033[0m: Audio input shape/length: {len(audio)}")
102
- print(f"\033[92mDEBUG\033[0m: Image input type: {type(image)}")
103
- if isinstance(image, np.ndarray):
104
- print(f"\033[92mDEBUG\033[0m: Image input shape: {image.shape}")
105
-
106
- # Handle different audio input formats
107
- if isinstance(audio, tuple) and len(audio) == 2:
108
- # Standard Gradio format: (sample_rate, audio_data)
109
- sample_rate, audio_data = audio
110
- print(f"\033[92mDEBUG\033[0m: Audio sample rate: {sample_rate}")
111
- print(f"\033[92mDEBUG\033[0m: Audio data shape: {audio_data.shape}")
112
- elif isinstance(audio, tuple) and len(audio) > 2:
113
- # Sometimes Gradio returns (sample_rate, audio_data, other_info...)
114
- sample_rate, audio_data = audio[0], audio[-1]
115
- print(f"\033[92mDEBUG\033[0m: Audio sample rate: {sample_rate}")
116
- print(f"\033[92mDEBUG\033[0m: Audio data shape: {audio_data.shape}")
117
- elif isinstance(audio, str):
118
- # Direct path to audio file
119
- audio_data, sample_rate = torchaudio.load(audio)
120
- print(f"\033[92mDEBUG\033[0m: Loaded audio from path with shape: {audio_data.shape}")
121
- else:
122
- return f"Error: Unsupported audio format. Got {type(audio)}"
123
-
124
- # Create a temporary file path for the audio and image
125
- temp_dir = "temp"
126
- os.makedirs(temp_dir, exist_ok=True)
127
-
128
- temp_audio_path = os.path.join(temp_dir, "temp_audio.wav")
129
- temp_image_path = os.path.join(temp_dir, "temp_image.jpg")
130
-
131
- # Import necessary libraries
132
- from PIL import Image
133
-
134
- # Audio handling - direct processing from the data in memory
135
- if isinstance(audio_data, np.ndarray):
136
- # Convert numpy array to tensor
137
- print(f"\033[92mDEBUG\033[0m: Converting numpy audio with shape {audio_data.shape} to tensor")
138
- audio_tensor = torch.tensor(audio_data).float()
139
-
140
- # Handle different audio dimensions
141
- if audio_data.ndim == 1:
142
- # Single channel audio
143
- audio_tensor = audio_tensor.unsqueeze(0)
144
- elif audio_data.ndim == 2:
145
- # Ensure channels are first dimension
146
- if audio_data.shape[0] > audio_data.shape[1]:
147
- # More rows than columns, probably (samples, channels)
148
- audio_tensor = torch.tensor(audio_data.T).float()
149
- else:
150
- # Already a tensor
151
- audio_tensor = audio_data.float()
152
-
153
- print(f"\033[92mDEBUG\033[0m: Audio tensor shape before processing: {audio_tensor.shape}")
154
-
155
- # Skip saving/loading and process directly
156
- mfcc = app_process_audio_data(audio_tensor, sample_rate)
157
- print(f"\033[92mDEBUG\033[0m: MFCC tensor shape after processing: {mfcc.shape if mfcc is not None else None}")
158
-
159
- # Image handling
160
- if isinstance(image, np.ndarray):
161
- print(f"\033[92mDEBUG\033[0m: Converting numpy image with shape {image.shape} to PIL")
162
- pil_image = Image.fromarray(image)
163
- pil_image.save(temp_image_path)
164
- print(f"\033[92mDEBUG\033[0m: Saved image to {temp_image_path}")
165
- elif isinstance(image, str):
166
- # If image is already a path
167
- temp_image_path = image
168
- print(f"\033[92mDEBUG\033[0m: Using provided image path: {temp_image_path}")
169
- else:
170
- return f"Error: Unsupported image format. Got {type(image)}"
171
-
172
- # Process image
173
- print(f"\033[92mDEBUG\033[0m: Loading and preprocessing image from {temp_image_path}")
174
- image_tensor = torchvision.io.read_image(temp_image_path)
175
- print(f"\033[92mDEBUG\033[0m: Loaded image shape: {image_tensor.shape}")
176
- image_tensor = image_tensor.float()
177
- processed_image = process_image_data(image_tensor)
178
- print(f"\033[92mDEBUG\033[0m: Processed image shape: {processed_image.shape if processed_image is not None else None}")
179
-
180
- # Add batch dimension for inference and move to device
181
- if mfcc is not None:
182
- mfcc = mfcc.unsqueeze(0).to(device)
183
- print(f"\033[92mDEBUG\033[0m: Final MFCC shape with batch dimension: {mfcc.shape}")
184
-
185
- if processed_image is not None:
186
- processed_image = processed_image.unsqueeze(0).to(device)
187
- print(f"\033[92mDEBUG\033[0m: Final image shape with batch dimension: {processed_image.shape}")
188
-
189
- # Run inference
190
- print(f"\033[92mDEBUG\033[0m: Running inference on device: {device}")
191
- if mfcc is not None and processed_image is not None:
192
- with torch.no_grad():
193
- sweetness = model(mfcc, processed_image)
194
- print(f"\033[92mDEBUG\033[0m: Prediction successful: {sweetness.item()}")
195
- else:
196
- return "Error: Failed to process inputs. Please check the debug logs."
197
-
198
- # Format the result
199
- if sweetness is not None:
200
- result = f"Predicted Sweetness: {sweetness.item():.2f}/13"
201
-
202
- # Add a qualitative description
203
- if sweetness.item() < 9:
204
- result += "\n\nThis watermelon is not very sweet. You might want to choose another one."
205
- elif sweetness.item() < 10:
206
- result += "\n\nThis watermelon has moderate sweetness."
207
- elif sweetness.item() < 11:
208
- result += "\n\nThis watermelon is sweet! A good choice."
209
  else:
210
- result += "\n\nThis watermelon is very sweet! Excellent choice!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
 
212
- return result
213
- else:
214
- return "Error: Could not predict sweetness. Please try again with different inputs."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
 
216
- except Exception as e:
217
- import traceback
218
- error_msg = f"Error: {str(e)}\n\n"
219
- error_msg += traceback.format_exc()
220
- print(f"\033[91mERR!\033[0m: {error_msg}")
221
- return error_msg
222
-
223
- # Apply GPU decorator if available in Gradio Spaces environment
224
- if HAS_SPACES:
225
- predict_sweetness_gpu = spaces.GPU(predict_sweetness)
226
- print("\033[92mINFO\033[0m: GPU optimization enabled for prediction function")
227
  else:
228
- predict_sweetness_gpu = predict_sweetness
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
 
230
  def create_app(model_path):
231
  """Create and launch the Gradio interface"""
232
  # Define the prediction function with model path
233
  def predict_fn(audio, image):
234
- if HAS_SPACES:
235
- # Use GPU-optimized function if available
236
- return predict_sweetness_gpu(audio, image, model_path)
237
- else:
238
- # Use regular function otherwise
239
- return predict_sweetness(audio, image, model_path)
240
 
241
  # Create Gradio interface
242
  with gr.Blocks(title="Watermelon Sweetness Predictor", theme=gr.themes.Soft()) as interface:
 
78
  # Similarly for images, but let's import the original one
79
  from preprocess import process_image_data
80
 
81
+ # Apply GPU decorator directly to the function if available
82
+ if HAS_SPACES:
83
+ # Using the decorator directly on the function definition
84
+ @spaces.GPU
85
+ def predict_sweetness(audio, image, model_path):
86
+ """Function with GPU acceleration"""
87
+ try:
88
+ # Now check CUDA availability inside the GPU-decorated function
89
+ if torch.cuda.is_available():
90
+ device = torch.device("cuda")
91
+ print(f"\033[92mINFO\033[0m: CUDA is available. Using device: {device}")
92
+ else:
93
+ device = torch.device("cpu")
94
+ print(f"\033[92mINFO\033[0m: CUDA is not available. Using device: {device}")
95
+
96
+ # Load model inside the function to ensure it's on the correct device
97
+ model = WatermelonModel().to(device)
98
+ model.load_state_dict(torch.load(model_path, map_location=device))
99
+ model.eval()
100
+ print(f"\033[92mINFO\033[0m: Loaded model from {model_path}")
101
+
102
+ # Debug information about input types
103
+ print(f"\033[92mDEBUG\033[0m: Audio input type: {type(audio)}")
104
+ print(f"\033[92mDEBUG\033[0m: Audio input shape/length: {len(audio)}")
105
+ print(f"\033[92mDEBUG\033[0m: Image input type: {type(image)}")
106
+ if isinstance(image, np.ndarray):
107
+ print(f"\033[92mDEBUG\033[0m: Image input shape: {image.shape}")
108
+
109
+ # Handle different audio input formats
110
+ if isinstance(audio, tuple) and len(audio) == 2:
111
+ # Standard Gradio format: (sample_rate, audio_data)
112
+ sample_rate, audio_data = audio
113
+ print(f"\033[92mDEBUG\033[0m: Audio sample rate: {sample_rate}")
114
+ print(f"\033[92mDEBUG\033[0m: Audio data shape: {audio_data.shape}")
115
+ elif isinstance(audio, tuple) and len(audio) > 2:
116
+ # Sometimes Gradio returns (sample_rate, audio_data, other_info...)
117
+ sample_rate, audio_data = audio[0], audio[-1]
118
+ print(f"\033[92mDEBUG\033[0m: Audio sample rate: {sample_rate}")
119
+ print(f"\033[92mDEBUG\033[0m: Audio data shape: {audio_data.shape}")
120
+ elif isinstance(audio, str):
121
+ # Direct path to audio file
122
+ audio_data, sample_rate = torchaudio.load(audio)
123
+ print(f"\033[92mDEBUG\033[0m: Loaded audio from path with shape: {audio_data.shape}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  else:
125
+ return f"Error: Unsupported audio format. Got {type(audio)}"
126
+
127
+ # Create a temporary file path for the audio and image
128
+ temp_dir = "temp"
129
+ os.makedirs(temp_dir, exist_ok=True)
130
+
131
+ temp_audio_path = os.path.join(temp_dir, "temp_audio.wav")
132
+ temp_image_path = os.path.join(temp_dir, "temp_image.jpg")
133
+
134
+ # Import necessary libraries
135
+ from PIL import Image
136
+
137
+ # Audio handling - direct processing from the data in memory
138
+ if isinstance(audio_data, np.ndarray):
139
+ # Convert numpy array to tensor
140
+ print(f"\033[92mDEBUG\033[0m: Converting numpy audio with shape {audio_data.shape} to tensor")
141
+ audio_tensor = torch.tensor(audio_data).float()
142
 
143
+ # Handle different audio dimensions
144
+ if audio_data.ndim == 1:
145
+ # Single channel audio
146
+ audio_tensor = audio_tensor.unsqueeze(0)
147
+ elif audio_data.ndim == 2:
148
+ # Ensure channels are first dimension
149
+ if audio_data.shape[0] > audio_data.shape[1]:
150
+ # More rows than columns, probably (samples, channels)
151
+ audio_tensor = torch.tensor(audio_data.T).float()
152
+ else:
153
+ # Already a tensor
154
+ audio_tensor = audio_data.float()
155
+
156
+ print(f"\033[92mDEBUG\033[0m: Audio tensor shape before processing: {audio_tensor.shape}")
157
+
158
+ # Skip saving/loading and process directly
159
+ mfcc = app_process_audio_data(audio_tensor, sample_rate)
160
+ print(f"\033[92mDEBUG\033[0m: MFCC tensor shape after processing: {mfcc.shape if mfcc is not None else None}")
161
+
162
+ # Image handling
163
+ if isinstance(image, np.ndarray):
164
+ print(f"\033[92mDEBUG\033[0m: Converting numpy image with shape {image.shape} to PIL")
165
+ pil_image = Image.fromarray(image)
166
+ pil_image.save(temp_image_path)
167
+ print(f"\033[92mDEBUG\033[0m: Saved image to {temp_image_path}")
168
+ elif isinstance(image, str):
169
+ # If image is already a path
170
+ temp_image_path = image
171
+ print(f"\033[92mDEBUG\033[0m: Using provided image path: {temp_image_path}")
172
+ else:
173
+ return f"Error: Unsupported image format. Got {type(image)}"
174
+
175
+ # Process image
176
+ print(f"\033[92mDEBUG\033[0m: Loading and preprocessing image from {temp_image_path}")
177
+ image_tensor = torchvision.io.read_image(temp_image_path)
178
+ print(f"\033[92mDEBUG\033[0m: Loaded image shape: {image_tensor.shape}")
179
+ image_tensor = image_tensor.float()
180
+ processed_image = process_image_data(image_tensor)
181
+ print(f"\033[92mDEBUG\033[0m: Processed image shape: {processed_image.shape if processed_image is not None else None}")
182
+
183
+ # Add batch dimension for inference and move to device
184
+ if mfcc is not None:
185
+ mfcc = mfcc.unsqueeze(0).to(device)
186
+ print(f"\033[92mDEBUG\033[0m: Final MFCC shape with batch dimension: {mfcc.shape}")
187
+
188
+ if processed_image is not None:
189
+ processed_image = processed_image.unsqueeze(0).to(device)
190
+ print(f"\033[92mDEBUG\033[0m: Final image shape with batch dimension: {processed_image.shape}")
191
+
192
+ # Run inference
193
+ print(f"\033[92mDEBUG\033[0m: Running inference on device: {device}")
194
+ if mfcc is not None and processed_image is not None:
195
+ with torch.no_grad():
196
+ sweetness = model(mfcc, processed_image)
197
+ print(f"\033[92mDEBUG\033[0m: Prediction successful: {sweetness.item()}")
198
+ else:
199
+ return "Error: Failed to process inputs. Please check the debug logs."
200
+
201
+ # Format the result
202
+ if sweetness is not None:
203
+ result = f"Predicted Sweetness: {sweetness.item():.2f}/13"
204
+
205
+ # Add a qualitative description
206
+ if sweetness.item() < 9:
207
+ result += "\n\nThis watermelon is not very sweet. You might want to choose another one."
208
+ elif sweetness.item() < 10:
209
+ result += "\n\nThis watermelon has moderate sweetness."
210
+ elif sweetness.item() < 11:
211
+ result += "\n\nThis watermelon is sweet! A good choice."
212
+ else:
213
+ result += "\n\nThis watermelon is very sweet! Excellent choice!"
214
+
215
+ return result
216
+ else:
217
+ return "Error: Could not predict sweetness. Please try again with different inputs."
218
+
219
+ except Exception as e:
220
+ import traceback
221
+ error_msg = f"Error: {str(e)}\n\n"
222
+ error_msg += traceback.format_exc()
223
+ print(f"\033[91mERR!\033[0m: {error_msg}")
224
+ return error_msg
225
 
226
+ print("\033[92mINFO\033[0m: GPU-accelerated prediction function created with @spaces.GPU decorator")
 
 
 
 
 
 
 
 
 
 
227
  else:
228
+ # Regular version without GPU decorator for non-Spaces environments
229
+ def predict_sweetness(audio, image, model_path):
230
+ """Predict sweetness of a watermelon from audio and image input"""
231
+ try:
232
+ # Check for device - will be CPU in this case
233
+ device = torch.device("cpu")
234
+ print(f"\033[92mINFO\033[0m: Using device: {device}")
235
+
236
+ # Load model inside the function
237
+ model = WatermelonModel().to(device)
238
+ model.load_state_dict(torch.load(model_path, map_location=device))
239
+ model.eval()
240
+ print(f"\033[92mINFO\033[0m: Loaded model from {model_path}")
241
+
242
+ # Rest of function identical - processing code
243
+ # Debug information about input types
244
+ print(f"\033[92mDEBUG\033[0m: Audio input type: {type(audio)}")
245
+ print(f"\033[92mDEBUG\033[0m: Audio input shape/length: {len(audio)}")
246
+ print(f"\033[92mDEBUG\033[0m: Image input type: {type(image)}")
247
+ if isinstance(image, np.ndarray):
248
+ print(f"\033[92mDEBUG\033[0m: Image input shape: {image.shape}")
249
+
250
+ # Handle different audio input formats
251
+ if isinstance(audio, tuple) and len(audio) == 2:
252
+ # Standard Gradio format: (sample_rate, audio_data)
253
+ sample_rate, audio_data = audio
254
+ print(f"\033[92mDEBUG\033[0m: Audio sample rate: {sample_rate}")
255
+ print(f"\033[92mDEBUG\033[0m: Audio data shape: {audio_data.shape}")
256
+ elif isinstance(audio, tuple) and len(audio) > 2:
257
+ # Sometimes Gradio returns (sample_rate, audio_data, other_info...)
258
+ sample_rate, audio_data = audio[0], audio[-1]
259
+ print(f"\033[92mDEBUG\033[0m: Audio sample rate: {sample_rate}")
260
+ print(f"\033[92mDEBUG\033[0m: Audio data shape: {audio_data.shape}")
261
+ elif isinstance(audio, str):
262
+ # Direct path to audio file
263
+ audio_data, sample_rate = torchaudio.load(audio)
264
+ print(f"\033[92mDEBUG\033[0m: Loaded audio from path with shape: {audio_data.shape}")
265
+ else:
266
+ return f"Error: Unsupported audio format. Got {type(audio)}"
267
+
268
+ # Create a temporary file path for the audio and image
269
+ temp_dir = "temp"
270
+ os.makedirs(temp_dir, exist_ok=True)
271
+
272
+ temp_audio_path = os.path.join(temp_dir, "temp_audio.wav")
273
+ temp_image_path = os.path.join(temp_dir, "temp_image.jpg")
274
+
275
+ # Import necessary libraries
276
+ from PIL import Image
277
+
278
+ # Audio handling - direct processing from the data in memory
279
+ if isinstance(audio_data, np.ndarray):
280
+ # Convert numpy array to tensor
281
+ print(f"\033[92mDEBUG\033[0m: Converting numpy audio with shape {audio_data.shape} to tensor")
282
+ audio_tensor = torch.tensor(audio_data).float()
283
+
284
+ # Handle different audio dimensions
285
+ if audio_data.ndim == 1:
286
+ # Single channel audio
287
+ audio_tensor = audio_tensor.unsqueeze(0)
288
+ elif audio_data.ndim == 2:
289
+ # Ensure channels are first dimension
290
+ if audio_data.shape[0] > audio_data.shape[1]:
291
+ # More rows than columns, probably (samples, channels)
292
+ audio_tensor = torch.tensor(audio_data.T).float()
293
+ else:
294
+ # Already a tensor
295
+ audio_tensor = audio_data.float()
296
+
297
+ print(f"\033[92mDEBUG\033[0m: Audio tensor shape before processing: {audio_tensor.shape}")
298
+
299
+ # Skip saving/loading and process directly
300
+ mfcc = app_process_audio_data(audio_tensor, sample_rate)
301
+ print(f"\033[92mDEBUG\033[0m: MFCC tensor shape after processing: {mfcc.shape if mfcc is not None else None}")
302
+
303
+ # Image handling
304
+ if isinstance(image, np.ndarray):
305
+ print(f"\033[92mDEBUG\033[0m: Converting numpy image with shape {image.shape} to PIL")
306
+ pil_image = Image.fromarray(image)
307
+ pil_image.save(temp_image_path)
308
+ print(f"\033[92mDEBUG\033[0m: Saved image to {temp_image_path}")
309
+ elif isinstance(image, str):
310
+ # If image is already a path
311
+ temp_image_path = image
312
+ print(f"\033[92mDEBUG\033[0m: Using provided image path: {temp_image_path}")
313
+ else:
314
+ return f"Error: Unsupported image format. Got {type(image)}"
315
+
316
+ # Process image
317
+ print(f"\033[92mDEBUG\033[0m: Loading and preprocessing image from {temp_image_path}")
318
+ image_tensor = torchvision.io.read_image(temp_image_path)
319
+ print(f"\033[92mDEBUG\033[0m: Loaded image shape: {image_tensor.shape}")
320
+ image_tensor = image_tensor.float()
321
+ processed_image = process_image_data(image_tensor)
322
+ print(f"\033[92mDEBUG\033[0m: Processed image shape: {processed_image.shape if processed_image is not None else None}")
323
+
324
+ # Add batch dimension for inference and move to device
325
+ if mfcc is not None:
326
+ mfcc = mfcc.unsqueeze(0).to(device)
327
+ print(f"\033[92mDEBUG\033[0m: Final MFCC shape with batch dimension: {mfcc.shape}")
328
+
329
+ if processed_image is not None:
330
+ processed_image = processed_image.unsqueeze(0).to(device)
331
+ print(f"\033[92mDEBUG\033[0m: Final image shape with batch dimension: {processed_image.shape}")
332
+
333
+ # Run inference
334
+ print(f"\033[92mDEBUG\033[0m: Running inference on device: {device}")
335
+ if mfcc is not None and processed_image is not None:
336
+ with torch.no_grad():
337
+ sweetness = model(mfcc, processed_image)
338
+ print(f"\033[92mDEBUG\033[0m: Prediction successful: {sweetness.item()}")
339
+ else:
340
+ return "Error: Failed to process inputs. Please check the debug logs."
341
+
342
+ # Format the result
343
+ if sweetness is not None:
344
+ result = f"Predicted Sweetness: {sweetness.item():.2f}/13"
345
+
346
+ # Add a qualitative description
347
+ if sweetness.item() < 9:
348
+ result += "\n\nThis watermelon is not very sweet. You might want to choose another one."
349
+ elif sweetness.item() < 10:
350
+ result += "\n\nThis watermelon has moderate sweetness."
351
+ elif sweetness.item() < 11:
352
+ result += "\n\nThis watermelon is sweet! A good choice."
353
+ else:
354
+ result += "\n\nThis watermelon is very sweet! Excellent choice!"
355
+
356
+ return result
357
+ else:
358
+ return "Error: Could not predict sweetness. Please try again with different inputs."
359
+
360
+ except Exception as e:
361
+ import traceback
362
+ error_msg = f"Error: {str(e)}\n\n"
363
+ error_msg += traceback.format_exc()
364
+ print(f"\033[91mERR!\033[0m: {error_msg}")
365
+ return error_msg
366
 
367
  def create_app(model_path):
368
  """Create and launch the Gradio interface"""
369
  # Define the prediction function with model path
370
  def predict_fn(audio, image):
371
+ return predict_sweetness(audio, image, model_path)
 
 
 
 
 
372
 
373
  # Create Gradio interface
374
  with gr.Blocks(title="Watermelon Sweetness Predictor", theme=gr.themes.Soft()) as interface: