|
import streamlit as st |
|
from dotenv import load_dotenv |
|
from audiorecorder import audiorecorder |
|
from langchain_core.messages import HumanMessage, AIMessage |
|
import requests |
|
from transformers import pipeline |
|
from gtts import gTTS |
|
import io |
|
|
|
|
|
load_dotenv() |
|
|
|
user_id = "1" |
|
|
|
|
|
pipe = pipeline("automatic-speech-recognition", model="kingabzpro/wav2vec2-large-xls-r-300m-Urdu") |
|
|
|
def get_response(user_input): |
|
''' |
|
Takes user_input in English and invokes the infer API for response. |
|
|
|
Parameters: |
|
user_input (string): User Query in English. |
|
Returns: |
|
res (string): Response from the LLM. |
|
''' |
|
url = f"http://127.0.0.1/infer/{user_id}" |
|
headers = {"Content-Type": "application/x-www-form-urlencoded"} |
|
data = {"user_input": user_input} |
|
response = requests.post(url, headers=headers, data=data) |
|
res = response.json() |
|
return res["data"] |
|
|
|
|
|
def text_to_speech(text, lang='ur'): |
|
''' |
|
Converts text to speech using gTTS. |
|
|
|
Parameters: |
|
text (string): Text to be converted to speech. |
|
lang (string): Language for the speech synthesis. Default is 'ur' (Urdu). |
|
Returns: |
|
response_audio_io (BytesIO): BytesIO object containing the audio data. |
|
''' |
|
tts = gTTS(text, lang=lang) |
|
response_audio_io = io.BytesIO() |
|
tts.write_to_fp(response_audio_io) |
|
response_audio_io.seek(0) |
|
return response_audio_io |
|
|
|
|
|
st.set_page_config(page_title="Urdu Virtual Assistant", page_icon="π€") |
|
|
|
col1, col2 = st.columns([1, 5]) |
|
|
|
|
|
with col1: |
|
st.image("bolo_logo-removebg-preview.png", width=100) |
|
|
|
|
|
with col2: |
|
st.title("Urdu Virtual Assistant") |
|
st.write("This application is a comprehensive speech-to-speech model designed to understand and respond in Urdu. It not only handles natural conversations but also has the capability to access and provide real-time information by integrating with the Tavily search engine. Whether you're asking for the weather or engaging in everyday dialogue, this assistant delivers accurate and context-aware responses, all in Urdu.") |
|
|
|
|
|
audio = audiorecorder() |
|
|
|
if len(audio) > 0: |
|
|
|
audio.export("audio.wav", format="wav") |
|
|
|
|
|
with open("audio.wav", "rb") as f: |
|
audio_bytes = f.read() |
|
|
|
|
|
result = pipe("audio.wav") |
|
user_query = result["text"] |
|
|
|
with st.chat_message("Human"): |
|
st.audio(audio.export().read()) |
|
st.markdown(user_query) |
|
|
|
|
|
response_text = get_response(user_input=user_query) |
|
response_audio = text_to_speech(response_text, lang='ur') |
|
|
|
|
|
with st.chat_message("AI"): |
|
st.audio(response_audio.read(), format='audio/mp3') |
|
st.markdown(response_text) |