Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,8 +3,8 @@ import numpy as np
|
|
| 3 |
import torch
|
| 4 |
from datasets import load_dataset
|
| 5 |
|
| 6 |
-
from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
|
| 7 |
-
|
| 8 |
|
| 9 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 10 |
|
|
@@ -12,13 +12,19 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
| 12 |
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
|
| 13 |
|
| 14 |
# load text-to-speech checkpoint and speaker embeddings
|
| 15 |
-
processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl").to(device)
|
| 18 |
-
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
def translate(audio):
|
|
@@ -26,9 +32,17 @@ def translate(audio):
|
|
| 26 |
return outputs["text"]
|
| 27 |
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
def synthesise(text):
|
| 30 |
-
inputs =
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
| 32 |
return speech.cpu()
|
| 33 |
|
| 34 |
|
|
|
|
| 3 |
import torch
|
| 4 |
from datasets import load_dataset
|
| 5 |
|
| 6 |
+
# from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline
|
| 7 |
+
from transformers import pipeline
|
| 8 |
|
| 9 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 10 |
|
|
|
|
| 12 |
asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
|
| 13 |
|
| 14 |
# load text-to-speech checkpoint and speaker embeddings
|
| 15 |
+
# processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl")
|
| 16 |
+
|
| 17 |
+
# model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl").to(device)
|
| 18 |
+
# vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
|
| 19 |
+
|
| 20 |
+
# embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
|
| 21 |
+
# speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
|
| 22 |
|
|
|
|
|
|
|
| 23 |
|
| 24 |
+
from transformers import VitsModel, VitsTokenizer
|
| 25 |
+
|
| 26 |
+
model = VitsModel.from_pretrained("Matthijs/mms-tts-nld")
|
| 27 |
+
tokenizer = VitsTokenizer.from_pretrained("Matthijs/mms-tts-nld")
|
| 28 |
|
| 29 |
|
| 30 |
def translate(audio):
|
|
|
|
| 32 |
return outputs["text"]
|
| 33 |
|
| 34 |
|
| 35 |
+
# def synthesise(text):
|
| 36 |
+
# inputs = processor(text=text, return_tensors="pt")
|
| 37 |
+
# speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder)
|
| 38 |
+
# return speech.cpu()
|
| 39 |
+
|
| 40 |
def synthesise(text):
|
| 41 |
+
inputs = tokenizer(text=text, return_tensors="pt")
|
| 42 |
+
input_ids = inputs["input_ids"]
|
| 43 |
+
with torch.no_grad():
|
| 44 |
+
outputs = model(input_ids)
|
| 45 |
+
speech = outputs.audio[0]
|
| 46 |
return speech.cpu()
|
| 47 |
|
| 48 |
|