ayushsinha commited on
Commit
e0abf46
·
verified ·
1 Parent(s): 54e44bd

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +27 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Load the Whisper model
5
+ model_name = "AventIQ-AI/whisper_small_Automatic_speech_recognition"
6
+ asr_pipeline = pipeline("automatic-speech-recognition", model=model_name)
7
+
8
+ def transcribe_audio(audio):
9
+ if audio is None:
10
+ return "⚠️ Please upload or record an audio file."
11
+
12
+ transcript = asr_pipeline(audio)["text"]
13
+ return transcript if transcript else "⚠️ No speech detected."
14
+
15
+ # Create Gradio UI
16
+ with gr.Blocks() as demo:
17
+ gr.Markdown("## 🎤 Whisper Small - Speech to Text")
18
+ gr.Markdown("Upload an audio file or record your voice to get a transcript.")
19
+
20
+ audio_input = gr.Audio(type="filepath", interactive=True, label="🎙️ Upload or Record Audio")
21
+ transcribe_button = gr.Button("🔍 Transcribe")
22
+ output_text = gr.Textbox(label="📝 Transcription Output")
23
+
24
+ transcribe_button.click(transcribe_audio, inputs=audio_input, outputs=output_text)
25
+
26
+ # Launch the app
27
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ transformers
3
+ gradio
4
+ sentencepiece
5
+ torchvision
6
+ huggingface_hub
7
+ pillow
8
+ numpy