|
""" |
|
Real-time Speech Translation Demo |
|
|
|
This demo performs the following: |
|
1. Accepts a 15-second audio recording from the microphone. |
|
2. Uses OpenAI’s Whisper model to transcribe the speech. |
|
3. Splits the transcription into segments (each roughly corresponding to a sentence). |
|
4. Translates each segment on-the-fly using Facebook’s M2M100 model (via Hugging Face Transformers). |
|
5. Streams the cumulative translation output to the user. |
|
|
|
Make sure to install all dependencies from requirements.txt. |
|
""" |
|
|
|
import gradio as gr |
|
import whisper |
|
import torch |
|
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer |
|
|
|
|
|
|
|
|
|
|
|
|
|
whisper_model = whisper.load_model("base") |
|
|
|
|
|
|
|
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M") |
|
m2m100_model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") |
|
|
|
|
|
|
|
|
|
|
|
|
|
LANGUAGES = { |
|
"English": "en", |
|
"Spanish": "es", |
|
"French": "fr", |
|
"German": "de", |
|
"Chinese": "zh" |
|
} |
|
|
|
|
|
|
|
|
|
def translate_audio(audio, target_language): |
|
""" |
|
Process the input audio, transcribe it using Whisper, and translate each segment |
|
to the chosen target language. Yields a cumulative translation string for streaming. |
|
|
|
Parameters: |
|
audio (str): Path to the recorded audio file. |
|
target_language (str): Display name of the target language (e.g., "English"). |
|
|
|
Yields: |
|
str: The cumulative translated text after processing each segment. |
|
""" |
|
if audio is None: |
|
yield "No audio provided." |
|
return |
|
|
|
|
|
|
|
result = whisper_model.transcribe(audio, fp16=False) |
|
|
|
|
|
|
|
source_lang = result.get("language", "en") |
|
|
|
|
|
target_lang_code = LANGUAGES.get(target_language, "en") |
|
|
|
cumulative_translation = "" |
|
|
|
|
|
|
|
for segment in result.get("segments", []): |
|
|
|
segment_text = segment.get("text", "").strip() |
|
if segment_text == "": |
|
continue |
|
|
|
|
|
if source_lang == target_lang_code: |
|
translated_segment = segment_text |
|
else: |
|
|
|
tokenizer.src_lang = source_lang |
|
|
|
encoded = tokenizer(segment_text, return_tensors="pt") |
|
|
|
|
|
generated_tokens = m2m100_model.generate( |
|
**encoded, |
|
forced_bos_token_id=tokenizer.get_lang_id(target_lang_code) |
|
) |
|
|
|
translated_segment = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0] |
|
|
|
|
|
cumulative_translation += translated_segment + " " |
|
|
|
|
|
yield cumulative_translation.strip() |
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Real-time Speech Translation Demo") |
|
gr.Markdown( |
|
"Speak into the microphone and your speech will be transcribed and translated " |
|
"segment-by-segment. (Recording is limited to 15 seconds.)" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
audio_input = gr.Audio( |
|
sources=["microphone"], |
|
type="filepath", |
|
label="Record your speech (max 15 seconds)", |
|
elem_id="audio_input" |
|
) |
|
|
|
target_lang_dropdown = gr.Dropdown( |
|
choices=list(LANGUAGES.keys()), |
|
value="English", |
|
label="Select Target Language" |
|
) |
|
|
|
|
|
output_text = gr.Textbox(label="Translated Text", lines=10) |
|
|
|
|
|
|
|
audio_input.change( |
|
fn=translate_audio, |
|
inputs=[audio_input, target_lang_dropdown], |
|
outputs=output_text |
|
) |
|
|
|
|
|
demo.launch() |
|
|