Futuresony commited on
Commit
85142b9
·
verified ·
1 Parent(s): c7321b9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -0
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_client import Client
3
+
4
+ # Call the existing chat model
5
+ chat_client = Client("Futuresony/Mr.Events")
6
+
7
+ # Load the Whisper model - using the public space as the specific model was not found
8
+ # You might need to adjust this if a specific space for whisper-large-v3-turbo becomes available or accessible
9
+ try:
10
+ whisper_client = Client("openai/whisper-large-v3-turbo")
11
+ except Exception as e:
12
+ print(f"Could not load 'openai/whisper-large-v3-turbo', falling back to 'openai/whisper'. Error: {e}")
13
+ whisper_client = Client("openai/whisper")
14
+
15
+ def chat_with_model(user_input):
16
+ """Sends text input to the chat model and returns the response."""
17
+ result = chat_client.predict(
18
+ query=user_input,
19
+ api_name="/chat"
20
+ )
21
+ return result
22
+
23
+ def transcribe_audio(audio_input):
24
+ """Transcribes audio input using the Whisper model."""
25
+ if audio_input is None:
26
+ return ""
27
+ try:
28
+ # The API name might vary depending on the specific Whisper space
29
+ # Common API names are "/predict", "/run"
30
+ transcript = whisper_client.predict(audio_input, api_name="/predict")
31
+ return transcript
32
+ except Exception as e:
33
+ print(f"Error during transcription: {e}")
34
+ return f"Transcription failed: {e}"
35
+
36
+ def send_message(transcribed_text, manual_text_input):
37
+ """Determines which input to send to the chat model and returns the response."""
38
+ if transcribed_text:
39
+ return chat_with_model(transcribed_text)
40
+ elif manual_text_input:
41
+ return chat_with_model(manual_text_input)
42
+ else:
43
+ return "Please provide audio or text input."
44
+
45
+ # Create the Gradio interface
46
+ with gr.Blocks() as demo:
47
+ gr.Markdown("## 💬 Test the ABSA Model Chat with Audio Input")
48
+ with gr.Row():
49
+ with gr.Column(scale=3):
50
+ audio_input = gr.Audio(sources=["microphone", "upload"], label="Audio Input")
51
+ transcribed_text = gr.Textbox(label="Transcribed Text", lines=3, interactive=False) # Make transcribed text non-interactive
52
+ manual_text_input = gr.Textbox(label="Or type your message here", lines=3)
53
+ submit_btn = gr.Button("Send")
54
+ with gr.Column(scale=5):
55
+ output_text = gr.Textbox(label="Model Response", lines=6, interactive=False) # Make output non-interactive
56
+
57
+ # Connect interface elements to functions
58
+ audio_input.change(fn=transcribe_audio, inputs=audio_input, outputs=transcribed_text)
59
+ submit_btn.click(fn=send_message, inputs=[transcribed_text, manual_text_input], outputs=output_text)
60
+
61
+ # Launch the Gradio application
62
+ demo.launch(share=True) # share=True is required for Colab