File size: 1,530 Bytes
32b6530
4fb7b6c
32b6530
 
 
 
 
 
 
 
 
 
 
e55e420
32b6530
e55e420
32b6530
 
 
 
 
 
 
 
 
 
 
 
 
 
4fb7b6c
32b6530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4fb7b6c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import torch
import torchaudio  # βœ… Added torchaudio to handle audio resampling
import gradio as gr
import time  
import numpy as np
import scipy.io.wavfile
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline

# βœ… 1️⃣ Force Model to Run on CPU
device = "cpu"
torch_dtype = torch.float32  # Use CPU-friendly float type
MODEL_NAME = "openai/whisper-tiny"  # βœ… Switched to smallest model for fastest performance

# βœ… 2️⃣ Load Whisper Tiny Model on CPU (Removed `low_cpu_mem_usage=True`)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
    MODEL_NAME, torch_dtype=torch_dtype, use_safetensors=True  # βœ… Removed low_cpu_mem_usage
)
model.to(device)

# βœ… 3️⃣ Load Processor & Pipeline
processor = AutoProcessor.from_pretrained(MODEL_NAME)

pipe = pipeline(
    task="automatic-speech-recognition",
    model=model,
    tokenizer=processor.tokenizer,
    feature_extractor=processor.feature_extractor,
    chunk_length_s=2,  # βœ… Process in 2-second chunks for ultra-low latency
    torch_dtype=torch_dtype,
    device=device,
    sampling_rate=16000,  # βœ… Explicitly set sampling rate to avoid resampling issues
)

# βœ… 4️⃣ Real-Time Streaming Transcription (Microphone)
def stream_transcribe(stream, new_chunk):
    start_time = time.time()
    try:
        sr, y = new_chunk

        # βœ… Convert stereo to mono
        if y.ndim > 1:
            y = y.mean(axis=1)
            
        y = y.astype(np.float32)
        y /= np.max(np.abs(y))

        # βœ… Resample audio