File size: 1,449 Bytes
0e42d56
 
 
 
447df5c
0e42d56
 
 
 
 
 
 
 
447df5c
0e42d56
 
 
 
447df5c
0e42d56
 
 
 
 
447df5c
0e42d56
 
 
 
 
 
 
447df5c
0e42d56
 
447df5c
0e42d56
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import torch
import gradio as gr
import soundfile as sf
import tempfile
import librosa

from transformers import (
    SpeechT5Processor,
    SpeechT5ForSpeechToSpeech,
    SpeechT5HifiGan
)
from datasets import load_dataset

# Modelos
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_vc")
model = SpeechT5ForSpeechToSpeech.from_pretrained("microsoft/speecht5_vc")
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")

# Embeddings de voz
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)

# Función principal
def voice_conversion(audio_file):
    audio, sr = librosa.load(audio_file, sr=16000)
    inputs = processor(audio=audio, sampling_rate=16000, return_tensors="pt")
    with torch.no_grad():
        speech = model.generate_speech(inputs["input_values"], speaker_embeddings, vocoder=vocoder)
    with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
        sf.write(f.name, speech.numpy(), samplerate=16000)
        return f.name

# Interfaz
interface = gr.Interface(
    fn=voice_conversion,
    inputs=gr.Audio(type="filepath", label="Sube un audio (voz hablada)"),
    outputs=gr.Audio(type="filepath", label="Voz convertida"),
    title="SpeechT5 Voice Conversion",
    description="Convierte una voz hablada en otra con SpeechT5 de Microsoft"
)

interface.launch()