jsbeaudry commited on
Commit
e69b450
·
verified ·
1 Parent(s): 865151e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -67
app.py CHANGED
@@ -300,62 +300,6 @@ async def demo_conversation():
300
  # Initialize all models
301
  await ai_conversation.initialize_models()
302
 
303
- # # Example usage
304
- # audio_file = "/content/Recording 2.wav" # Replace with your audio file path
305
- # system_prompt = "You are a helpful assistant. Please provide clear and concise responses."
306
-
307
- # # Process the conversation
308
- # result = await ai_conversation.process_conversation(audio_file, system_prompt)
309
-
310
- # if "error" in result:
311
- # print(f"Error: {result['error']}")
312
- # else:
313
- # print(f"Transcribed: {result['transcribed_text']}")
314
- # print(f"Thinking: {result['thinking']}")
315
- # print(f"Response: {result['response_text']}")
316
- # print(f"Audio saved to: {result['output_audio']}")
317
- # print(f"Processing times: {result['processing_times']}")
318
-
319
- # async def demo_batch_processing():
320
- # """Demonstration of batch processing"""
321
- # ai_conversation = AsyncAIConversation()
322
- # await ai_conversation.initialize_models()
323
-
324
- # # Example batch processing
325
- # audio_files = [
326
- # "/content/Recording 1.wav",
327
- # "/content/Recording 2.wav",
328
- # "/content/Recording 3.wav"
329
- # ]
330
-
331
- # results = await ai_conversation.batch_process(audio_files)
332
-
333
- # for i, result in enumerate(results):
334
- # print(f"File {i+1}: {result}")
335
-
336
- # Additional utility function for testing Kokoro TTS standalone
337
- # async def test_kokoro_tts():
338
- # """Test Kokoro TTS functionality standalone"""
339
- # try:
340
- # tts_synthesizer = KPipeline(lang_code='a')
341
-
342
- # test_text = "Hello, this is a test of the Kokoro text-to-speech system."
343
-
344
- # # Generate audio
345
- # generator = tts_synthesizer(test_text, voice='af_heart')
346
-
347
- # for i, (gs, ps, audio) in enumerate(generator):
348
- # output_path = f"kokoro_test_{i}.wav"
349
- # sf.write(output_path, audio, 24000)
350
- # print(f"Test audio {i} saved to: {output_path}")
351
-
352
- # # Only process first chunk for testing
353
- # if i == 0:
354
- # break
355
-
356
- # except Exception as e:
357
- # print(f"Error testing Kokoro TTS: {e}")
358
-
359
 
360
 
361
  # Create the async function wrapper for Gradio
@@ -411,19 +355,13 @@ with gr.Blocks() as demo:
411
  outputs=[status_output, response_text_output, response_audio_output, processing_times_output]
412
  )
413
 
414
- # Launch the Gradio interface
415
- # We need to run the Gradio app within an async context if we're using await inside the handler.
416
- # However, Gradio's launch already handles the async loop for the button clicks.
417
- # The key is that ai_conversation.initialize_models() must be awaited *before* launching Gradio.
418
-
419
- # Since the notebook already executed the initialization:
420
- # ai_conversation = AsyncAIConversation()
421
- # await ai_conversation.initialize_models()
422
- # We can directly launch the demo.
423
 
424
  if __name__ == "__main__":
425
-
426
- awit demo_conversation()
 
 
 
427
 
428
  # Gradio launch itself runs an event loop.
429
  # Ensure ai_conversation is initialized in the notebook before this cell is run.
 
300
  # Initialize all models
301
  await ai_conversation.initialize_models()
302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
 
304
 
305
  # Create the async function wrapper for Gradio
 
355
  outputs=[status_output, response_text_output, response_audio_output, processing_times_output]
356
  )
357
 
 
 
 
 
 
 
 
 
 
358
 
359
  if __name__ == "__main__":
360
+
361
+ def initiate():
362
+ asyncio.run(demo_conversation())
363
+
364
+ initiate()
365
 
366
  # Gradio launch itself runs an event loop.
367
  # Ensure ai_conversation is initialized in the notebook before this cell is run.