|
import gradio as gr |
|
import os |
|
import openai |
|
import speech_recognition as sr |
|
from gtts import gTTS |
|
from deep_translator import GoogleTranslator |
|
|
|
def translate_text(text, target_lang): |
|
translator = GoogleTranslator(source='auto', target=target_lang) |
|
return translator.translate(text) |
|
|
|
def get_llm_response(prompt): |
|
response = openai.ChatCompletion.create( |
|
model="gpt-4o-mini-2024-07-18", |
|
messages=[ |
|
{"role": "system", "content": "You are a helpful assistant that provides informative and concise responses."}, |
|
{"role": "user", "content": prompt} |
|
], |
|
max_tokens=150 |
|
) |
|
return response.choices[0].message.content.strip() |
|
|
|
def process_voice_or_text(input_audio, input_text, output_lang): |
|
if output_lang not in ["ta", "en"]: |
|
return "Invalid output language selected. Please choose either Tamil ('ta') or English ('en').", "", "", "" |
|
|
|
if input_audio is not None: |
|
|
|
recognizer = sr.Recognizer() |
|
with sr.AudioFile(input_audio) as source: |
|
audio = recognizer.record(source) |
|
try: |
|
input_text = recognizer.recognize_google(audio, language="ta-IN,en-IN") |
|
except sr.UnknownValueError: |
|
return "Could not understand the audio input.", "", "", "" |
|
except sr.RequestError as e: |
|
return f"Speech recognition error: {e}", "", "", "" |
|
|
|
if not input_text: |
|
return "Please provide a valid input.", "", "", "" |
|
|
|
|
|
english_query = translate_text(input_text, "en") |
|
|
|
|
|
llm_response = get_llm_response(english_query) |
|
|
|
|
|
final_response = translate_text(llm_response, output_lang) |
|
|
|
return input_text, english_query, llm_response, final_response |
|
|
|
|
|
|
|
english_query = translate_text(input_text, "en") |
|
|
|
|
|
llm_response = get_llm_response(english_query) |
|
|
|
|
|
final_response = translate_text(llm_response, output_lang) |
|
|
|
return input_text, english_query, llm_response, final_response |
|
|
|
def text_to_speech(response, lang): |
|
tts = gTTS(text=response, lang=lang) |
|
tts.save("response.mp3") |
|
return "response.mp3" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=process_voice_or_text, |
|
inputs=[ |
|
gr.Audio(type="filepath", label="Voice Input (Tamil or English)"), |
|
gr.Textbox(label="Text Input (Tamil or English)", placeholder="Type your input here..."), |
|
gr.Radio(["ta", "en"], label="Output Language") |
|
], |
|
outputs=[ |
|
gr.Textbox(label="Original Input"), |
|
gr.Textbox(label="Translated English Query"), |
|
gr.Textbox(label="LLM Response (English)"), |
|
gr.Textbox(label="Final Response (Tamil/English)") |
|
], |
|
live=True, |
|
title="Nisha - Tamil-English Voice Assistant", |
|
description="Speak or type in Tamil or English, and get responses in your preferred language!" |
|
) |
|
|
|
iface.launch() |
|
|