|
import gradio as gr |
|
from google import genai |
|
import nemo.collections.asr as nemo_asr |
|
from pydub import AudioSegment |
|
import os |
|
from huggingface_hub import login |
|
from hazm import Normalizer |
|
import numpy as np |
|
import re |
|
|
|
|
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") |
|
|
|
if not HF_TOKEN: |
|
raise ValueError("HF_TOKEN environment variable not set. Please provide a valid Hugging Face token.") |
|
|
|
if not GEMINI_API_KEY: |
|
raise ValueError("GEMINI_API_KEY environment variable not set. Please provide a valid GEMINI_API_KEY.") |
|
|
|
|
|
login(HF_TOKEN) |
|
|
|
|
|
try: |
|
asr_model = nemo_asr.models.EncDecHybridRNNTCTCBPEModel.from_pretrained( |
|
model_name="faimlab/stt_fa_fastconformer_hybrid_large_dataset_v30" |
|
) |
|
except Exception as e: |
|
raise RuntimeError(f"Failed to load model: {str(e)}") |
|
|
|
|
|
normalizer = Normalizer() |
|
|
|
|
|
def load_audio(audio_path): |
|
audio = AudioSegment.from_file(audio_path) |
|
audio = audio.set_channels(1).set_frame_rate(16000) |
|
audio_samples = np.array(audio.get_array_of_samples(), dtype=np.float32) |
|
audio_samples /= np.max(np.abs(audio_samples)) |
|
return audio_samples, audio.frame_rate |
|
|
|
|
|
def transcribe_chunk(audio_chunk, model): |
|
transcription = model.transcribe([audio_chunk], batch_size=1, verbose=False) |
|
return transcription[0].text |
|
|
|
|
|
def transcribe_audio(file_path, model, chunk_size=30*16000): |
|
waveform, _ = load_audio(file_path) |
|
transcriptions = [] |
|
for start in range(0, len(waveform), chunk_size): |
|
end = min(len(waveform), start + chunk_size) |
|
transcription = transcribe_chunk(waveform[start:end], model) |
|
transcriptions.append(transcription) |
|
|
|
transcriptions = ' '.join(transcriptions) |
|
transcriptions = re.sub(' +', ' ', transcriptions) |
|
transcriptions = normalizer.normalize(transcriptions) |
|
|
|
return transcriptions |
|
|
|
|
|
def transcribe(audio): |
|
if audio is None: |
|
return "Please upload an audio file.", gr.update(interactive=False), gr.update(interactive=False) |
|
|
|
transcription = transcribe_audio(audio, asr_model) |
|
|
|
|
|
return transcription |
|
|
|
|
|
languages = [ |
|
"English", "Persian", "French", "Spanish", "German", "Italian", "Portuguese", "Dutch", "Swedish", "Danish", |
|
"Finnish", "Norwegian", "Russian", "Polish", "Turkish", "Arabic", "Hindi", "Chinese", "Japanese", "Korean", |
|
"Thai", "Vietnamese", "Indonesian", "Hebrew", "Greek", "Czech", "Hungarian", "Romanian", "Bulgarian", "Serbian", |
|
"Croatian", "Slovak", "Slovenian", "Ukrainian", "Lithuanian", "Latvian", "Estonian", "Macedonian", "Albanian", |
|
"Basque", "Catalan", "Maltese", "Icelandic", "Georgian", "Armenian", "Belarusian", "Yiddish", "Pashto", "Urdu", |
|
"Bengali", "Punjabi", "Tamil", "Telugu", "Malayalam", "Sinhala", "Burmese", "Lao", "Khmer", "Mongolian", |
|
"Nepali", "Marathi", "Gujarati", "Kannada", "Odia", "Assamese", "Maithili", "Kurdish", "Azerbaijani", "Kazakh", |
|
"Uzbek", "Turkmen", "Tajik", "Kyrgyz", "Uighur", "Tatar", "Haitian Creole", "Swahili", "Hausa", "Yoruba", |
|
"Zulu", "Xhosa", "Amharic", "Somali", "Tigrinya", "Shona", "Igbo", "Malagasy", "Quechua", "Aymara", "Guarani", |
|
"Sundanese", "Javanese", "Filipino", "Hmong", "Fijian", "Tongan", "Samoan", "Chamorro", "Hawaiian" |
|
] |
|
languages = sorted(languages) |
|
|
|
|
|
model_selections = [ |
|
'gemini-2.0-flash', |
|
'gemini-2.0-pro-exp-02-05', |
|
'gemini-2.0-flash-lite' |
|
] |
|
|
|
def translate(text, target_language, model_sel): |
|
client = genai.Client(api_key=GEMINI_API_KEY) |
|
prompt = f"Translate the following text into {target_language}. Only reply with the translation.\n'{text}'." |
|
|
|
response = client.models.generate_content( |
|
model=model_sel, |
|
contents=[prompt] |
|
) |
|
|
|
return response.text |
|
|
|
|
|
def summarize(transcript_text, word_count, model_sel, lang_sel): |
|
client = genai.Client(api_key=GEMINI_API_KEY) |
|
prompt = f"Summarize the following text in {word_count} words in {lang_sel}: '{transcript_text}'." |
|
|
|
response = client.models.generate_content( |
|
model=model_sel, |
|
contents=[prompt] |
|
) |
|
|
|
return response.text |
|
|
|
def punctuate(transcript_text, model_sel): |
|
client = genai.Client(api_key=GEMINI_API_KEY) |
|
prompt = f""" |
|
Task: Punctuate the given Persian transcript text from an ASR model accurately according to Persian punctuation rules. Do not change any characters, correct mistakes, or modify the text in any way. You are ONLY allowed to add appropriate punctuations. |
|
|
|
Guidelines: |
|
1. Period (نقطه .) |
|
Ends a sentence. |
|
|
|
Used after abbreviations (e.g., آی.ب.ام.). |
|
|
|
2. Comma (ویرگول ,) |
|
Separates words/phrases in a list. |
|
|
|
Used in compound or conditional sentences. |
|
|
|
Prevents ambiguity (e.g., شلنگ مخصوص، آتشنشانی). |
|
|
|
Surrounds parenthetical phrases (e.g., استیو جابز، بنیانگذار اپل،…). |
|
|
|
Distinguishes adverbs (e.g., بعد از چندین ماه، ورزش کردم). |
|
|
|
Separates repeated words (e.g., آن کشور، کشور خوبی است). |
|
|
|
3. Semicolon (نقطهویرگول ؛) |
|
Connects related sentences when a full stop is too strong. |
|
|
|
Separates clauses in complex lists (e.g., آلمان، ایتالیا و ژاپن؛ انگلیس، شوروی و آمریکا). |
|
|
|
Used before explanatory phrases (e.g., فتوسنتز مهم است؛ یعنی…). |
|
|
|
4. Colon (دونقطه :) |
|
Introduces explanations, lists, or direct quotes (e.g., او گفت: «من آمادهام.»). |
|
|
|
5. Ellipsis (سهنقطه …) |
|
Indicates omitted words (e.g., فرهنگی، سیاسی، اجتماعی و …). |
|
|
|
6. Parentheses (پرانتز ()) |
|
Encloses extra information, dates, or clarifications (e.g., جنگ جهانی دوم (۱۹۴۵)). |
|
|
|
7. Quotation Marks (گیومه «») |
|
Encloses direct speech (e.g., او گفت: «سلام!»). |
|
|
|
Highlights specific words (e.g., او را «نابغه» خواندند). |
|
|
|
8. Question Mark (علامت سؤال ؟) |
|
Ends direct questions (e.g., آیا آمدی؟). |
|
|
|
9. Exclamation Mark (علامت تعجب !) |
|
Expresses surprise, emphasis, or commands (e.g., چه پرنده زیبایی!). |
|
|
|
|
|
Instructions: |
|
Apply these punctuation rules without modifying the original text. |
|
|
|
Do not correct typos, spelling, or grammar mistakes. |
|
|
|
Only return the punctuated text as output—no explanations or additional comments. |
|
Input: |
|
{transcript_text} |
|
""" |
|
response = client.models.generate_content( |
|
model=model_sel, |
|
contents=[prompt] |
|
) |
|
|
|
return response.text |
|
|
|
|
|
with gr.Blocks(theme="huggingface") as demo: |
|
gr.Markdown( |
|
""" |
|
# 📝 Persian ASR, Translation & Summarization |
|
|
|
Welcome to the **Persian Speech-to-Text & NLP** platform! This app allows you to upload an audio file, |
|
get an accurate **transcription**, and enhance the output with **translation**, **summarization**, |
|
and **punctuation restoration**. |
|
|
|
## 🎯 How It Works |
|
1️⃣ Upload an **audio file** containing Persian speech. To transcribe YouTube videos, open this [Colab Notebook](https://colab.research.google.com/github/saeedzou/NeMo-Gradio/blob/main/yt_transcribe_gradio.ipynb) (If your request was detected as bot, disconnect and run all cells again). |
|
2️⃣ Click **"Transcribe"** to generate the text output. |
|
3️⃣ Use additional features: **Translate**, **Summarize**, or **Restore Punctuation**. |
|
4️⃣ Customize settings: Select a **language**, **AI model**, and **summary length**. |
|
5️⃣ View and copy the processed text for your use! |
|
|
|
""" |
|
) |
|
|
|
with gr.Row(): |
|
audio_input = gr.Audio(type="filepath", label="🎵 Upload Audio File") |
|
transcript_output = gr.Textbox(label="📝 Transcription", interactive=True) |
|
translation_output = gr.Textbox(label="🌍 Translation", interactive=False) |
|
summarized_output = gr.Textbox(label="📖 Summarized Text", interactive=False) |
|
|
|
transcribe_button = gr.Button("🎙️ Transcribe") |
|
translate_button = gr.Button("🌐 Translate", interactive=True) |
|
summarize_button = gr.Button("✂️ Summarize", interactive=True) |
|
punctuate_button = gr.Button("🔤 Restore Punctuation", interactive=True) |
|
|
|
with gr.Row(): |
|
word_count_input = gr.Number(value=50, label="📏 Word Count for Summary") |
|
lang_selection = gr.Dropdown(choices=languages, value="English", label="🌎 Select Language") |
|
model_selection = gr.Dropdown(choices=model_selections, value="gemini-2.0-flash", label="🤖 Select AI Model") |
|
|
|
|
|
transcribe_button.click( |
|
transcribe, |
|
inputs=audio_input, |
|
outputs=transcript_output |
|
) |
|
|
|
translate_button.click( |
|
translate, |
|
inputs=[transcript_output, lang_selection, model_selection], |
|
outputs=translation_output |
|
) |
|
|
|
summarize_button.click( |
|
summarize, |
|
inputs=[transcript_output, word_count_input, model_selection, lang_selection], |
|
outputs=summarized_output |
|
) |
|
|
|
punctuate_button.click( |
|
punctuate, |
|
inputs=[transcript_output, model_selection], |
|
outputs=transcript_output |
|
) |
|
|
|
gr.Markdown( |
|
""" |
|
\n\n |
|
--- |
|
|
|
Powered by NVIDIA’s **NeMo Fast Conformer**, this tool is optimized for high-quality **Persian ASR (Automatic Speech Recognition)**. |
|
|
|
**📚 Trained on 800+ Hours of Speech Data:** |
|
- Common Voice 17 (~300 hours) |
|
- YouTube (~400 hours) |
|
- NasleMana (~90 hours) |
|
- In-house dataset (~70 hours) |
|
|
|
--- |
|
|
|
## 📜 License & Business Inquiries |
|
|
|
This application is licensed under **Creative Commons Attribution-NonCommercial 4.0 (CC BY-NC 4.0)**. |
|
- **🛑 Non-Commercial Use Only** – Commercial use is not permitted without prior approval. |
|
- **🔗 Attribution Required** – Credit must be given to FAIM Group, Sharif University of Technology. |
|
- **❌ No Derivatives** – Modifications or adaptations of this work are not allowed. |
|
|
|
📜 Full License Details: [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/) |
|
|
|
📩 **Business Inquiries:** |
|
If you're interested in commercial applications, please contact us at: |
|
✉️ **Email:** [[email protected]](mailto:[email protected]) |
|
|
|
--- |
|
""" |
|
) |
|
|
|
demo.launch() |