zohaibterminator commited on
Commit
12c19a5
·
verified ·
1 Parent(s): bcdcc86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -90
app.py CHANGED
@@ -1,91 +1,91 @@
1
- import streamlit as st
2
- from dotenv import load_dotenv
3
- from audiorecorder import audiorecorder
4
- from langchain_core.messages import HumanMessage, AIMessage
5
- import requests
6
- from transformers import pipeline
7
- from gtts import gTTS
8
- import io
9
-
10
- # Load environment variables (if any)
11
- load_dotenv()
12
-
13
- user_id = "1" # example user id
14
-
15
- # Initialize the wav2vec2 model for Urdu speech-to-text
16
- pipe = pipeline("automatic-speech-recognition", model="kingabzpro/wav2vec2-large-xls-r-300m-Urdu")
17
-
18
- def get_response(user_input):
19
- '''
20
- Takes user_input in English and invokes the infer API for response.
21
-
22
- Parameters:
23
- user_input (string): User Query in English.
24
- Returns:
25
- res (string): Response from the LLM.
26
- '''
27
- url = f"https://whole-icons-hammer.loca.lt/infer/{user_id}"
28
- headers = {"Content-Type": "application/x-www-form-urlencoded"}
29
- data = {"user_input": user_input}
30
- response = requests.post(url, headers=headers, data=data)
31
- res = response.json()
32
- return res["data"]
33
-
34
-
35
- def text_to_speech(text, lang='ur'):
36
- '''
37
- Converts text to speech using gTTS.
38
-
39
- Parameters:
40
- text (string): Text to be converted to speech.
41
- lang (string): Language for the speech synthesis. Default is 'ur' (Urdu).
42
- Returns:
43
- response_audio_io (BytesIO): BytesIO object containing the audio data.
44
- '''
45
- tts = gTTS(text, lang=lang)
46
- response_audio_io = io.BytesIO()
47
- tts.write_to_fp(response_audio_io)
48
- response_audio_io.seek(0)
49
- return response_audio_io
50
-
51
-
52
- st.set_page_config(page_title="Urdu Virtual Assistant", page_icon="🤖") # set the page title and icon
53
-
54
- col1, col2 = st.columns([1, 5]) # Adjust the ratio to control the logo and title sizes
55
-
56
- # Display the logo in the first column
57
- with col1:
58
- st.image("bolo_logo-removebg-preview.png", width=100) # Adjust the width as needed
59
-
60
- # Display the title in the second column
61
- with col2:
62
- st.title("Urdu Virtual Assistant") # set the main title of the application
63
- st.write("This application is a comprehensive speech-to-speech model designed to understand and respond in Urdu. It not only handles natural conversations but also has the capability to access and provide real-time information by integrating with the Tavily search engine. Whether you're asking for the weather or engaging in everyday dialogue, this assistant delivers accurate and context-aware responses, all in Urdu.")
64
-
65
- # Add a text input box
66
- audio = audiorecorder()
67
-
68
- if len(audio) > 0:
69
- # Save the audio to a file
70
- audio.export("audio.wav", format="wav")
71
-
72
- # Convert audio to text using the wav2vec2 model
73
- with open("audio.wav", "rb") as f:
74
- audio_bytes = f.read()
75
-
76
- # Process the audio file
77
- result = pipe("audio.wav")
78
- user_query = result["text"]
79
-
80
- with st.chat_message("Human"): # create the message box for human input
81
- st.audio(audio.export().read()) # display the audio player
82
- st.markdown(user_query)
83
-
84
- # Get response from the LLM
85
- response_text = get_response(user_input=user_query)
86
- response_audio = text_to_speech(response_text, lang='ur')
87
-
88
- # Play the generated speech in the app
89
- with st.chat_message("AI"):
90
- st.audio(response_audio.read(), format='audio/mp3')
91
  st.markdown(response_text)
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ from audiorecorder import audiorecorder
4
+ from langchain_core.messages import HumanMessage, AIMessage
5
+ import requests
6
+ from transformers import pipeline
7
+ from gtts import gTTS
8
+ import io
9
+
10
+ # Load environment variables (if any)
11
+ load_dotenv()
12
+
13
+ user_id = "1" # example user id
14
+
15
+ # Initialize the wav2vec2 model for Urdu speech-to-text
16
+ pipe = pipeline("automatic-speech-recognition", model="kingabzpro/wav2vec2-large-xls-r-300m-Urdu")
17
+
18
+ def get_response(user_input):
19
+ '''
20
+ Takes user_input in English and invokes the infer API for response.
21
+
22
+ Parameters:
23
+ user_input (string): User Query in English.
24
+ Returns:
25
+ res (string): Response from the LLM.
26
+ '''
27
+ url = f"http://127.0.0.1:8000/infer/{user_id}"
28
+ headers = {"Content-Type": "application/x-www-form-urlencoded"}
29
+ data = {"user_input": user_input}
30
+ response = requests.post(url, headers=headers, data=data)
31
+ res = response.json()
32
+ return res["data"]
33
+
34
+
35
+ def text_to_speech(text, lang='ur'):
36
+ '''
37
+ Converts text to speech using gTTS.
38
+
39
+ Parameters:
40
+ text (string): Text to be converted to speech.
41
+ lang (string): Language for the speech synthesis. Default is 'ur' (Urdu).
42
+ Returns:
43
+ response_audio_io (BytesIO): BytesIO object containing the audio data.
44
+ '''
45
+ tts = gTTS(text, lang=lang)
46
+ response_audio_io = io.BytesIO()
47
+ tts.write_to_fp(response_audio_io)
48
+ response_audio_io.seek(0)
49
+ return response_audio_io
50
+
51
+
52
+ st.set_page_config(page_title="Urdu Virtual Assistant", page_icon="🤖") # set the page title and icon
53
+
54
+ col1, col2 = st.columns([1, 5]) # Adjust the ratio to control the logo and title sizes
55
+
56
+ # Display the logo in the first column
57
+ with col1:
58
+ st.image("bolo_logo-removebg-preview.png", width=100) # Adjust the width as needed
59
+
60
+ # Display the title in the second column
61
+ with col2:
62
+ st.title("Urdu Virtual Assistant") # set the main title of the application
63
+ st.write("This application is a comprehensive speech-to-speech model designed to understand and respond in Urdu. It not only handles natural conversations but also has the capability to access and provide real-time information by integrating with the Tavily search engine. Whether you're asking for the weather or engaging in everyday dialogue, this assistant delivers accurate and context-aware responses, all in Urdu.")
64
+
65
+ # Add a text input box
66
+ audio = audiorecorder()
67
+
68
+ if len(audio) > 0:
69
+ # Save the audio to a file
70
+ audio.export("audio.wav", format="wav")
71
+
72
+ # Convert audio to text using the wav2vec2 model
73
+ with open("audio.wav", "rb") as f:
74
+ audio_bytes = f.read()
75
+
76
+ # Process the audio file
77
+ result = pipe("audio.wav")
78
+ user_query = result["text"]
79
+
80
+ with st.chat_message("Human"): # create the message box for human input
81
+ st.audio(audio.export().read()) # display the audio player
82
+ st.markdown(user_query)
83
+
84
+ # Get response from the LLM
85
+ response_text = get_response(user_input=user_query)
86
+ response_audio = text_to_speech(response_text, lang='ur')
87
+
88
+ # Play the generated speech in the app
89
+ with st.chat_message("AI"):
90
+ st.audio(response_audio.read(), format='audio/mp3')
91
  st.markdown(response_text)