File size: 3,379 Bytes
887206d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4228d26
887206d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b486b3a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import streamlit as st
from dotenv import load_dotenv
from audiorecorder import audiorecorder
from langchain_core.messages import HumanMessage, AIMessage
import requests
from transformers import pipeline
from gtts import gTTS
import io

# Load environment variables (if any)
load_dotenv()

user_id = "1"  # example user id

# Initialize the wav2vec2 model for Urdu speech-to-text
pipe = pipeline("automatic-speech-recognition", model="kingabzpro/wav2vec2-large-xls-r-300m-Urdu")

def get_response(user_input):
    '''

        Takes user_input in English and invokes the infer API for response.



        Parameters:

            user_input (string): User Query in English.

        Returns:

            res (string): Response from the LLM.

    '''
    url = f"http://127.0.0.1:8000/infer/{user_id}"
    headers = {"Content-Type": "application/x-www-form-urlencoded"}
    data = {"user_input": user_input}
    response = requests.post(url, headers=headers, data=data)
    res = response.json()
    return res["data"]


def text_to_speech(text, lang='ur'):
    '''

        Converts text to speech using gTTS.



        Parameters:

            text (string): Text to be converted to speech.

            lang (string): Language for the speech synthesis. Default is 'ur' (Urdu).

        Returns:

            response_audio_io (BytesIO): BytesIO object containing the audio data.

    '''
    tts = gTTS(text, lang=lang)
    response_audio_io = io.BytesIO()
    tts.write_to_fp(response_audio_io)
    response_audio_io.seek(0)
    return response_audio_io


st.set_page_config(page_title="Urdu Virtual Assistant", page_icon="🤖")  # set the page title and icon

col1, col2 = st.columns([1, 5])  # Adjust the ratio to control the logo and title sizes

# Display the logo in the first column
with col1:
    st.image("bolo_logo-removebg-preview.png", width=100)  # Adjust the width as needed

# Display the title in the second column
with col2:
    st.title("Urdu Virtual Assistant") # set the main title of the application
st.write("This application is a comprehensive speech-to-speech model designed to understand and respond in Urdu. It not only handles natural conversations but also has the capability to access and provide real-time information by integrating with the Tavily search engine. Whether you're asking for the weather or engaging in everyday dialogue, this assistant delivers accurate and context-aware responses, all in Urdu.")

# Add a text input box
audio = audiorecorder()

if len(audio) > 0:
    # Save the audio to a file
    audio.export("audio.wav", format="wav")

    # Convert audio to text using the wav2vec2 model
    with open("audio.wav", "rb") as f:
        audio_bytes = f.read()

    # Process the audio file
    result = pipe("audio.wav")
    user_query = result["text"]

    with st.chat_message("Human"):  # create the message box for human input
        st.audio(audio.export().read())  # display the audio player
        st.markdown(user_query)

    # Get response from the LLM
    response_text = get_response(user_input=user_query)
    response_audio = text_to_speech(response_text, lang='ur')

    # Play the generated speech in the app
    with st.chat_message("AI"):
      st.audio(response_audio.read(), format='audio/mp3')
      st.markdown(response_text)