varshamishra commited on
Commit
cea3436
·
verified ·
1 Parent(s): 255c5df

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +43 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Load Whisper model
5
+ model_name = "AventIQ-AI/whisper-speech-text"
6
+ stt_pipeline = pipeline("automatic-speech-recognition", model=model_name)
7
+
8
+ def transcribe(audio_path):
9
+ """Transcribe speech to text using Whisper."""
10
+ if audio_path is None:
11
+ return "⚠️ Please upload an audio file."
12
+
13
+ try:
14
+ # Pass the file path directly to the Whisper pipeline
15
+ result = stt_pipeline(audio_path)
16
+
17
+ return f"📝 **Transcription:**\n{result['text']}"
18
+
19
+ except Exception as e:
20
+ return f"❌ Error processing audio: {str(e)}"
21
+
22
+ # Create Enhanced Gradio Interface
23
+ with gr.Blocks(theme="default") as demo:
24
+ gr.Markdown(
25
+ """
26
+ # 🎤 **Whisper Speech-to-Text**
27
+ **Upload an audio file** and this tool will convert your speech into text using **AventIQ-AI Whisper Model**.
28
+ Supports **MP3, WAV, FLAC** formats.
29
+ """
30
+ )
31
+
32
+ with gr.Row():
33
+ audio_input = gr.File(label="🎙️ Upload an Audio File", type="filepath") # Corrected `type`
34
+
35
+ transcribed_text = gr.Textbox(label="📝 Transcription", interactive=False)
36
+
37
+ submit_btn = gr.Button("🎧 Transcribe", variant="primary")
38
+
39
+ submit_btn.click(transcribe, inputs=audio_input, outputs=transcribed_text)
40
+
41
+ # Launch the app
42
+ if __name__ == "__main__":
43
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio
4
+ sentencepiece