Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,8 @@ import requests
|
|
5 |
from transformers import pipeline
|
6 |
import torch
|
7 |
|
|
|
|
|
8 |
# Set your FastAPI backend endpoint
|
9 |
BACKEND_URL = "https://asr-evaluation-backend.emergentai.ug/submit-feedback"
|
10 |
|
@@ -34,7 +36,15 @@ os.makedirs("responses", exist_ok=True)
|
|
34 |
|
35 |
# Transcription function
|
36 |
device = 0 if torch.cuda.is_available() else -1
|
37 |
-
asr_pipelines = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
def transcribe(audio, language):
|
40 |
asr = asr_pipelines[language]
|
|
|
5 |
from transformers import pipeline
|
6 |
import torch
|
7 |
|
8 |
+
HF_TOKEN = os.getenv("Hf_token")
|
9 |
+
|
10 |
# Set your FastAPI backend endpoint
|
11 |
BACKEND_URL = "https://asr-evaluation-backend.emergentai.ug/submit-feedback"
|
12 |
|
|
|
36 |
|
37 |
# Transcription function
|
38 |
device = 0 if torch.cuda.is_available() else -1
|
39 |
+
asr_pipelines = {
|
40 |
+
lang: pipeline(
|
41 |
+
"automatic-speech-recognition",
|
42 |
+
model=model_name,
|
43 |
+
device=device,
|
44 |
+
token=HF_TOKEN
|
45 |
+
)
|
46 |
+
for lang, model_name in model_map.items()
|
47 |
+
}
|
48 |
|
49 |
def transcribe(audio, language):
|
50 |
asr = asr_pipelines[language]
|