| # import gradio as gr | |
| # gr.load("models/openai/whisper-large-v3-turbo").launch() | |
| import gradio as gr | |
| model = gr.load("models/openai/whisper-large-v3-turbo") | |
| # Define a function to process the output and extract only the transcription text | |
| def process_transcription(audio_input): | |
| result = model(audio_input) | |
| print(result) | |
| # Extract the transcription text directly | |
| transcription = result["text"] | |
| return transcription | |
| # Launch the interface | |
| gr.Interface( | |
| fn=process_transcription, | |
| inputs="audio", | |
| outputs="text" | |
| ).launch() |