Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,24 +4,22 @@ import gradio as gr
|
|
4 |
import requests
|
5 |
from gtts import gTTS
|
6 |
|
7 |
-
# Load Whisper model
|
8 |
model = whisper.load_model("base")
|
9 |
|
10 |
-
#
|
11 |
-
GROQ_API_KEY =
|
12 |
-
|
13 |
-
# Main function: audio β text β LLM β speech
|
14 |
def transcribe_and_respond(audio_file):
|
15 |
-
# 1. Transcribe audio
|
16 |
result = model.transcribe(audio_file)
|
17 |
user_text = result["text"]
|
18 |
|
19 |
-
# 2.
|
20 |
headers = {
|
21 |
"Content-Type": "application/json",
|
22 |
"Authorization": f"Bearer {GROQ_API_KEY}"
|
23 |
}
|
24 |
-
|
25 |
data = {
|
26 |
"model": "llama-3.3-70b-versatile",
|
27 |
"messages": [{"role": "user", "content": user_text}]
|
@@ -32,22 +30,22 @@ def transcribe_and_respond(audio_file):
|
|
32 |
if response.status_code == 200:
|
33 |
output_text = response.json()['choices'][0]['message']['content']
|
34 |
else:
|
35 |
-
output_text = f"Error
|
36 |
|
37 |
-
# 3. Convert to speech
|
38 |
tts = gTTS(text=output_text, lang='en')
|
39 |
tts_path = "response.mp3"
|
40 |
tts.save(tts_path)
|
41 |
|
42 |
return output_text, tts_path
|
43 |
|
44 |
-
# Gradio
|
45 |
iface = gr.Interface(
|
46 |
fn=transcribe_and_respond,
|
47 |
inputs=gr.Audio(type="filepath", label="ποΈ Speak"),
|
48 |
outputs=[gr.Textbox(label="π§ LLM Reply"), gr.Audio(label="π Spoken Response")],
|
49 |
-
title="Voice Chatbot
|
50 |
-
description="
|
51 |
)
|
52 |
|
53 |
if __name__ == "__main__":
|
|
|
4 |
import requests
|
5 |
from gtts import gTTS
|
6 |
|
7 |
+
# Load Whisper model (base is a good balance of speed + quality)
|
8 |
model = whisper.load_model("base")
|
9 |
|
10 |
+
# Put your actual Groq API key here
|
11 |
+
GROQ_API_KEY = "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"
|
12 |
+
|
|
|
13 |
def transcribe_and_respond(audio_file):
|
14 |
+
# 1. Transcribe audio to text
|
15 |
result = model.transcribe(audio_file)
|
16 |
user_text = result["text"]
|
17 |
|
18 |
+
# 2. Send to Groq LLM (LLaMA 3.3 70B)
|
19 |
headers = {
|
20 |
"Content-Type": "application/json",
|
21 |
"Authorization": f"Bearer {GROQ_API_KEY}"
|
22 |
}
|
|
|
23 |
data = {
|
24 |
"model": "llama-3.3-70b-versatile",
|
25 |
"messages": [{"role": "user", "content": user_text}]
|
|
|
30 |
if response.status_code == 200:
|
31 |
output_text = response.json()['choices'][0]['message']['content']
|
32 |
else:
|
33 |
+
output_text = f"Error: {response.status_code} - {response.text}"
|
34 |
|
35 |
+
# 3. Convert reply to speech
|
36 |
tts = gTTS(text=output_text, lang='en')
|
37 |
tts_path = "response.mp3"
|
38 |
tts.save(tts_path)
|
39 |
|
40 |
return output_text, tts_path
|
41 |
|
42 |
+
# Gradio interface
|
43 |
iface = gr.Interface(
|
44 |
fn=transcribe_and_respond,
|
45 |
inputs=gr.Audio(type="filepath", label="ποΈ Speak"),
|
46 |
outputs=[gr.Textbox(label="π§ LLM Reply"), gr.Audio(label="π Spoken Response")],
|
47 |
+
title="Voice-to-Voice Chatbot (Whisper + Groq + gTTS)",
|
48 |
+
description="Record your voice, get a reply from LLaMA 3.3 70B, and hear it spoken back!"
|
49 |
)
|
50 |
|
51 |
if __name__ == "__main__":
|