Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,21 +4,18 @@ from transformers import pipeline
|
|
4 |
# Load the Whisper model for speech recognition
|
5 |
model = pipeline("automatic-speech-recognition", model="openai/whisper-medium")
|
6 |
|
7 |
-
def transcribe_audio(audio_file
|
8 |
-
# Transcribe the audio file
|
9 |
-
transcription = model(audio_file
|
10 |
return transcription["text"]
|
11 |
|
12 |
# Define the Gradio interface
|
13 |
iface = gr.Interface(
|
14 |
fn=transcribe_audio,
|
15 |
-
inputs=
|
16 |
-
gr.Audio(type="filepath", label="Upload Audio File"),
|
17 |
-
gr.Dropdown(choices=["english", "spanish", "french", "german", "chinese", "japanese", "korean", "hindi"], label="Select Language", value="english")
|
18 |
-
],
|
19 |
outputs=gr.Textbox(label="Transcription"),
|
20 |
-
title="
|
21 |
-
description="Upload an audio file and
|
22 |
)
|
23 |
|
24 |
# Launch the Gradio interface
|
|
|
4 |
# Load the Whisper model for speech recognition
|
5 |
model = pipeline("automatic-speech-recognition", model="openai/whisper-medium")
|
6 |
|
7 |
+
def transcribe_audio(audio_file):
|
8 |
+
# Transcribe the audio file and automatically detect the language
|
9 |
+
transcription = model(audio_file)
|
10 |
return transcription["text"]
|
11 |
|
12 |
# Define the Gradio interface
|
13 |
iface = gr.Interface(
|
14 |
fn=transcribe_audio,
|
15 |
+
inputs=gr.Audio(type="filepath", label="Upload Audio File"),
|
|
|
|
|
|
|
16 |
outputs=gr.Textbox(label="Transcription"),
|
17 |
+
title="Automatic Language Detection & Audio Transcription",
|
18 |
+
description="Upload an audio file, and the system will automatically detect the language and transcribe it."
|
19 |
)
|
20 |
|
21 |
# Launch the Gradio interface
|