Beijuka commited on
Commit
d337e80
·
verified ·
1 Parent(s): 58be87c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -4,7 +4,6 @@ import os
4
  import requests
5
  from transformers import pipeline
6
  import torch
7
- device = 0 if torch.cuda.is_available() else -1
8
 
9
  # Set your FastAPI backend endpoint
10
  BACKEND_URL = "https://asr-evaluation-backend.emergentai.ug/submit-feedback"
@@ -34,8 +33,11 @@ model_map = {
34
  os.makedirs("responses", exist_ok=True)
35
 
36
  # Transcription function
 
 
 
37
  def transcribe(audio, language):
38
- asr = pipeline("automatic-speech-recognition", model=model_map[language], device)
39
  text = asr(audio)["text"]
40
  return text, audio
41
 
@@ -124,14 +126,14 @@ with gr.Blocks() as demo:
124
  output_msg = gr.Textbox(interactive=False)
125
 
126
  save_btn.click(
127
- fn=save_feedback,
128
- inputs=[
129
- audio_input, transcribed_text, user_id, lang, env, device, domain, accuracy,
130
- transcript_edit, orthography, orthography_issues,
131
- meaning, meaning_loss, errors, error_examples, performance
132
- ],
133
- outputs=[output_msg]
134
- )
135
  def reveal_ui(user_input):
136
  if user_input.strip():
137
  return gr.update(visible=True)
 
4
  import requests
5
  from transformers import pipeline
6
  import torch
 
7
 
8
  # Set your FastAPI backend endpoint
9
  BACKEND_URL = "https://asr-evaluation-backend.emergentai.ug/submit-feedback"
 
33
  os.makedirs("responses", exist_ok=True)
34
 
35
  # Transcription function
36
+ device = 0 if torch.cuda.is_available() else -1
37
+ asr_pipelines = {lang: pipeline("automatic-speech-recognition", model=model_name, device=device) for lang, model_name in model_map.items()}
38
+
39
  def transcribe(audio, language):
40
+ asr = asr_pipelines[language]
41
  text = asr(audio)["text"]
42
  return text, audio
43
 
 
126
  output_msg = gr.Textbox(interactive=False)
127
 
128
  save_btn.click(
129
+ fn=save_feedback,
130
+ inputs=[
131
+ audio_input, transcribed_text, user_id, lang, env, device, domain, accuracy,
132
+ transcript_edit, orthography, orthography_issues,
133
+ meaning, meaning_loss, errors, error_examples, performance
134
+ ],
135
+ outputs=[output_msg]
136
+ )
137
  def reveal_ui(user_input):
138
  if user_input.strip():
139
  return gr.update(visible=True)