Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,39 +1,25 @@
|
|
1 |
-
# Install required libraries
|
2 |
-
# Uncomment the line below if you are running locally to install dependencies
|
3 |
-
# !pip install gradio openai-whisper
|
4 |
-
|
5 |
import gradio as gr
|
6 |
-
import
|
7 |
|
8 |
-
# Load Whisper model
|
9 |
-
model =
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
""
|
14 |
-
|
15 |
-
"""
|
16 |
-
try:
|
17 |
-
# Transcribe the audio
|
18 |
-
result = model.transcribe(audio)
|
19 |
-
transcription = result["text"]
|
20 |
-
return transcription
|
21 |
-
except Exception as e:
|
22 |
-
return f"Error transcribing audio: {str(e)}"
|
23 |
|
24 |
-
# Gradio
|
25 |
-
|
26 |
fn=transcribe_audio,
|
27 |
-
inputs=
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
theme="compact"
|
35 |
)
|
36 |
|
37 |
-
# Launch the interface
|
38 |
-
|
39 |
-
interface.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
|
4 |
+
# Load the Whisper model for speech recognition
|
5 |
+
model = pipeline("automatic-speech-recognition", model="openai/whisper-medium")
|
6 |
|
7 |
+
def transcribe_audio(audio_file, language="english"):
|
8 |
+
# Transcribe the audio file
|
9 |
+
transcription = model(audio_file, generate_kwargs={"language": language})
|
10 |
+
return transcription["text"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
# Define the Gradio interface
|
13 |
+
iface = gr.Interface(
|
14 |
fn=transcribe_audio,
|
15 |
+
inputs=[
|
16 |
+
gr.Audio(type="filepath", label="Upload Audio File"),
|
17 |
+
gr.Dropdown(choices=["english", "spanish", "french", "german", "chinese", "japanese", "korean", "hindi"], label="Select Language", value="english")
|
18 |
+
],
|
19 |
+
outputs=gr.Textbox(label="Transcription"),
|
20 |
+
title="Multi-Language Audio Transcription",
|
21 |
+
description="Upload an audio file and select the language to transcribe it."
|
|
|
22 |
)
|
23 |
|
24 |
+
# Launch the Gradio interface
|
25 |
+
iface.launch()
|
|