AIVoice5 / app.py
dschandra's picture
Update app.py
0af1f1a verified
raw
history blame
11.7 kB
from flask import Flask, render_template_string, request, jsonify
import speech_recognition as sr
from tempfile import NamedTemporaryFile
import os
import ffmpeg
import logging
from werkzeug.exceptions import BadRequest
# Initialize Flask App
app = Flask(__name__)
# Set up logging
logging.basicConfig(level=logging.INFO)
# Initialize conversation state
user_order = [] # Stores the current order
user_preferences = None # Stores the customer's preferences (Veg, Non-Veg/Halal, or Both)
# Define food items
menu_items = {
'Veg': ["Vegetable Biryani", "Paneer Butter Masala", "Aloo Gobi", "Veg Sambar", "Veg Korma"],
'Non-Veg': ["Butter Chicken", "Chicken Biryani", "Tandoori Chicken", "Mutton Rogan Josh", "Chicken Korma"],
'Both': ["Vegetable Biryani", "Paneer Butter Masala", "Butter Chicken", "Chicken Biryani", "Mutton Rogan Josh", "Tandoori Chicken", "Aloo Gobi", "Veg Sambar", "Veg Korma", "Chicken Korma"],
'Drinks': ["Lassi", "Soft Drink", "Lemon Juice", "Iced Tea", "Milkshake"],
'Desserts': ["Gulab Jamun", "Kheer", "Ice Cream", "Ras Malai"]
}
# HTML Template for Frontend
html_code = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>AI Dining Assistant</title>
<style>
body {
font-family: Arial, sans-serif;
background-color: #f4f4f9;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100vh;
margin: 0;
}
h1 {
color: #333;
}
.mic-button {
width: 80px;
height: 80px;
border-radius: 50%;
background-color: #007bff;
color: white;
font-size: 24px;
border: none;
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
transition: background-color 0.3s;
}
.mic-button:hover {
background-color: #0056b3;
}
.status {
margin-top: 20px;
font-size: 18px;
color: #666;
}
.response {
margin-top: 20px;
padding: 10px;
background-color: #fff;
border: 1px solid #ddd;
border-radius: 5px;
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
width: 300px;
text-align: center;
}
</style>
</head>
<body>
<h1>AI Dining Assistant</h1>
<button class="mic-button" id="mic-button">🎤</button>
<div class="status" id="status">Press the mic button to start the conversation...</div>
<div class="response" id="response" style="display: none;">Response will appear here...</div>
<script>
const micButton = document.getElementById('mic-button');
const status = document.getElementById('status');
const response = document.getElementById('response');
let mediaRecorder;
let audioChunks = [];
let isConversationActive = false;
micButton.addEventListener('click', () => {
if (!isConversationActive) {
isConversationActive = true;
startConversation();
}
});
function startConversation() {
// Show "Listening..." before recording
status.textContent = 'Listening...';
startListening();
}
function startListening() {
navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => {
mediaRecorder = new MediaRecorder(stream, { mimeType: 'audio/webm;codecs=opus' });
mediaRecorder.start();
audioChunks = [];
mediaRecorder.ondataavailable = event => audioChunks.push(event.data);
mediaRecorder.onstop = async () => {
// Once the recording stops, update status to "Processing..."
status.textContent = 'Processing...';
const audioBlob = new Blob(audioChunks, { type: 'audio/webm' });
const formData = new FormData();
formData.append('audio', audioBlob);
try {
const result = await fetch('/process-audio', { method: 'POST', body: formData });
const data = await result.json();
response.textContent = data.response;
response.style.display = 'block';
// Handle Speech synthesis
try {
const utterance = new SpeechSynthesisUtterance(data.response);
speechSynthesis.speak(utterance);
utterance.onend = () => {
console.log("Speech synthesis completed.");
};
utterance.onerror = (e) => {
console.error("Speech synthesis error:", e.error);
status.textContent = 'Error with speech output.';
};
} catch (speechError) {
console.error("Speech synthesis not supported or failed:", speechError);
response.textContent = "Speech output unavailable. Please check your browser.";
}
// Continue the conversation after a short delay
if (data.response.includes("Goodbye")) {
status.textContent = 'Conversation ended. Press the mic button to start again.';
isConversationActive = false;
} else {
status.textContent = 'Listening...';
setTimeout(startListening, 1000); // Continue listening
}
} catch (error) {
response.textContent = 'Error occurred. Please try again.';
response.style.display = 'block';
status.textContent = 'Press the mic button to restart the conversation.';
isConversationActive = false;
}
};
setTimeout(() => mediaRecorder.stop(), 10000); // Stop recording after 10 seconds
}).catch(() => {
status.textContent = 'Microphone access denied.';
isConversationActive = false;
});
}
</script>
</body>
</html>
"""
@app.route('/')
def index():
return render_template_string(html_code)
@app.route('/process-audio', methods=['POST'])
def process_audio():
global user_order, user_preferences
try:
# Validate audio file
audio_file = request.files.get('audio')
if not audio_file:
raise BadRequest("No audio file provided.")
temp_file = NamedTemporaryFile(delete=False, suffix=".webm")
audio_file.save(temp_file.name)
logging.info(f"Saved input audio to {temp_file.name}")
if os.path.getsize(temp_file.name) == 0:
raise BadRequest("Uploaded audio file is empty.")
# Convert audio to PCM WAV format (16kHz, mono)
converted_file = NamedTemporaryFile(delete=False, suffix=".wav")
try:
ffmpeg.input(temp_file.name).output(
converted_file.name, acodec='pcm_s16le', ac=1, ar='16000'
).run(overwrite_output=True)
except Exception as ffmpeg_error:
logging.error(f"FFmpeg conversion error: {str(ffmpeg_error)}")
return jsonify({"response": "Audio conversion failed. Please try again."})
logging.info(f"Converted audio saved to {converted_file.name}")
# Recognize speech
recognizer = sr.Recognizer()
with sr.AudioFile(converted_file.name) as source:
audio_data = recognizer.record(source)
try:
command = recognizer.recognize_google(audio_data)
logging.info(f"Recognized command: {command}")
response = process_command(command)
except sr.UnknownValueError:
logging.error("Google Speech Recognition could not understand the audio")
response = "Sorry, I couldn't understand your request. Please try again."
except sr.RequestError as e:
logging.error(f"Error with Google Speech Recognition service: {e}")
response = "Sorry, there was an issue with the speech recognition service."
return jsonify({"response": response})
except BadRequest as br:
logging.error(f"Bad request error: {br}")
return jsonify({"response": f"Bad Request: {str(br)}"})
except Exception as e:
logging.error(f"Error processing audio: {e}")
return jsonify({"response": f"An error occurred: {str(e)}"})
finally:
# Clean up temporary files
try:
if os.path.exists(temp_file.name):
os.unlink(temp_file.name)
if os.path.exists(converted_file.name):
os.unlink(converted_file.name)
except Exception as cleanup_error:
logging.error(f"Error cleaning up files: {cleanup_error}")
def process_command(command):
global user_order, user_preferences
command = command.lower()
if "hello" in command or "hi" in command or "hey" in command:
return (
"Welcome! How can I assist you with your meal today? "
"Please let me know your preferences (Veg, Non-Veg/Halal, or Both)."
)
elif "veg" in command:
user_preferences = "Veg"
return "You selected Veg. Here are the options: " + ", ".join(menu_items["Veg"]) + ". What would you like to add?"
elif "non-veg" in command or "halal" in command:
user_preferences = "Non-Veg"
return "You selected Non-Veg/Halal. Here are the options: " + ", ".join(menu_items["Non-Veg"]) + ". What would you like to add?"
elif "both" in command:
user_preferences = "Both"
return "You selected Both. Here are the options: " + ", ".join(menu_items["Both"]) + ". What would you like to add?"
elif "reset preferences" in command:
user_order = [] # Reset the order
user_preferences = None # Reset preferences
return "Your preferences have been reset. What would you like to order?"
elif "show my order" in command or "what's my order" in command:
if user_order:
return "Your current order includes: " + ", ".join(user_order)
else:
return "You haven't added anything to your order yet."
elif "place order" in command or "confirm order" in command:
if user_order:
return (
"You have the following items in your order: " + ", ".join(user_order) +
". Would you like to confirm your order?"
)
else:
return "You haven't added anything to your order yet. Please add some items."
elif "yes" in command or "place order" in command:
return "Your order has been confirmed and sent to the kitchen. Thank you for ordering!"
return "Sorry, I didn't understand your request. You can ask to view your order, reset preferences, or place an order."
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860)