import asyncio import websockets import streamlit as st from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer import numpy as np import torch import soundfile as sf import io # Load pre-trained model and tokenizer tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") async def recognize_speech(websocket): async for message in websocket: try: # Read audio data from message wf, samplerate = sf.read(io.BytesIO(message)) # Tokenize input values input_values = tokenizer(wf, return_tensors="pt").input_values # Predict logits with torch.no_grad(): logits = model(input_values).logits # Decode predictions predicted_ids = torch.argmax(logits, dim=-1) transcription = tokenizer.decode(predicted_ids[0]) # Send transcription back to the client await websocket.send(transcription) except Exception as e: print(f"Error in recognize_speech: {e}") await websocket.send("Error processing audio data.") async def main_logic(): async with websockets.serve(recognize_speech, "localhost", 8000): await asyncio.Future() # run forever # Streamlit interface st.title("Real-Time ASR with Transformers") # WebSocket script for the frontend st.markdown("""