Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,8 @@ import gradio as gr
|
|
3 |
import os
|
4 |
import requests
|
5 |
from transformers import pipeline
|
|
|
|
|
6 |
|
7 |
# Set your FastAPI backend endpoint
|
8 |
BACKEND_URL = "https://asr-evaluation-backend.emergentai.ug/submit-feedback"
|
@@ -33,7 +35,7 @@ os.makedirs("responses", exist_ok=True)
|
|
33 |
|
34 |
# Transcription function
|
35 |
def transcribe(audio, language):
|
36 |
-
asr = pipeline("automatic-speech-recognition", model=model_map[language], device
|
37 |
text = asr(audio)["text"]
|
38 |
return text, audio
|
39 |
|
@@ -136,6 +138,6 @@ with gr.Blocks() as demo:
|
|
136 |
else:
|
137 |
return gr.update(visible=False)
|
138 |
|
139 |
-
|
140 |
# Launch the interface
|
141 |
demo.launch()
|
|
|
3 |
import os
|
4 |
import requests
|
5 |
from transformers import pipeline
|
6 |
+
import torch
|
7 |
+
device = 0 if torch.cuda.is_available() else -1
|
8 |
|
9 |
# Set your FastAPI backend endpoint
|
10 |
BACKEND_URL = "https://asr-evaluation-backend.emergentai.ug/submit-feedback"
|
|
|
35 |
|
36 |
# Transcription function
|
37 |
def transcribe(audio, language):
|
38 |
+
asr = pipeline("automatic-speech-recognition", model=model_map[language], device)
|
39 |
text = asr(audio)["text"]
|
40 |
return text, audio
|
41 |
|
|
|
138 |
else:
|
139 |
return gr.update(visible=False)
|
140 |
|
141 |
+
proceed_btn.click(fn=reveal_ui, inputs=[user_id], outputs=[main_ui])
|
142 |
# Launch the interface
|
143 |
demo.launch()
|