Spaces:
Sleeping
Sleeping
import gradio as gr | |
from gradio_client import Client | |
# Call the existing chat model | |
chat_client = Client("Futuresony/Mr.Events") | |
# Load the Whisper model - using the public space as the specific model was not found | |
# You might need to adjust this if a specific space for whisper-large-v3-turbo becomes available or accessible | |
try: | |
whisper_client = Client("openai/whisper-large-v3-turbo") | |
except Exception as e: | |
print(f"Could not load 'openai/whisper-large-v3-turbo', falling back to 'openai/whisper'. Error: {e}") | |
whisper_client = Client("openai/whisper") | |
def chat_with_model(user_input): | |
"""Sends text input to the chat model and returns the response.""" | |
result = chat_client.predict( | |
query=user_input, | |
api_name="/chat" | |
) | |
return result | |
def transcribe_audio(audio_input): | |
"""Transcribes audio input using the Whisper model.""" | |
if audio_input is None: | |
return "" | |
try: | |
# The API name might vary depending on the specific Whisper space | |
# Common API names are "/predict", "/run" | |
transcript = whisper_client.predict(audio_input, api_name="/predict") | |
return transcript | |
except Exception as e: | |
print(f"Error during transcription: {e}") | |
return f"Transcription failed: {e}" | |
def send_message(transcribed_text, manual_text_input): | |
"""Determines which input to send to the chat model and returns the response.""" | |
if transcribed_text: | |
return chat_with_model(transcribed_text) | |
elif manual_text_input: | |
return chat_with_model(manual_text_input) | |
else: | |
return "Please provide audio or text input." | |
# Create the Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("## 💬 Test the ABSA Model Chat with Audio Input") | |
with gr.Row(): | |
with gr.Column(scale=3): | |
audio_input = gr.Audio(sources=["microphone", "upload"], label="Audio Input") | |
transcribed_text = gr.Textbox(label="Transcribed Text", lines=3, interactive=False) # Make transcribed text non-interactive | |
manual_text_input = gr.Textbox(label="Or type your message here", lines=3) | |
submit_btn = gr.Button("Send") | |
with gr.Column(scale=5): | |
output_text = gr.Textbox(label="Model Response", lines=6, interactive=False) # Make output non-interactive | |
# Connect interface elements to functions | |
audio_input.change(fn=transcribe_audio, inputs=audio_input, outputs=transcribed_text) | |
submit_btn.click(fn=send_message, inputs=[transcribed_text, manual_text_input], outputs=output_text) | |
# Launch the Gradio application | |
demo.launch(share=True) # share=True is required for Colab |