practiceAI / app.py
MusIre's picture
Update app.py
3a161de
raw
history blame
1.7 kB
import subprocess
subprocess.run(["python", "-m", "pip", "install", "--upgrade", "pip"])
subprocess.run(["pip", "install", "gradio", "--upgrade"])
subprocess.run(["pip", "install", "datasets"])
subprocess.run(["pip", "install", "transformers"])
subprocess.run(["pip", "install", "torch", "torchvision", "torchaudio", "-f", "https://download.pytorch.org/whl/torch_stable.html"])
import gradio as gr
import numpy as np
import torch
from transformers import WhisperProcessor, WhisperForConditionalGeneration
# Load model and processor
processor = WhisperProcessor.from_pretrained("openai/whisper-large")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
forced_decoder_ids = processor.get_decoder_prompt_ids(language="italian", task="transcribe")
# Custom preprocessing function
def preprocess_audio(audio_data, sampling_rate=16_000):
sample_rate, raw_audio = audio_data
raw_speech = np.asarray(raw_audio, dtype=np.float32)
return {"input_values": raw_speech, "sampling_rate": sample_rate}
# Function to perform ASR on audio data
def transcribe_audio(audio_data):
input_features = preprocess_audio(audio_data)
input_values = torch.tensor(input_features["input_values"]).unsqueeze(0) # Add batch dimension
# Ensure the input tensor has the correct shape
input_values = input_values.view(1, -1, 1)
input_values = input_values.permute(0, 2, 1)
predicted_ids = model.generate(input_values)
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
return transcription[0]
# Create Gradio interface
audio_input = gr.Audio()
gr.Interface(fn=transcribe_audio, inputs=audio_input, outputs="text").launch()