1Prarthana commited on
Commit
2cd7bf6
Β·
verified Β·
1 Parent(s): 54f92c5

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +69 -0
  2. chat_history.json +1 -0
  3. main.py +854 -0
  4. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ import google.generativeai as gen_ai
4
+ import pyttsx3
5
+ import threading
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+
11
+ # Configure Streamlit page settings
12
+ st.set_page_config(
13
+ page_title="Gemini-Pro ChatBot",
14
+ page_icon="πŸ€–", # Favicon emoji
15
+ layout="centered", # Page layout option
16
+ )
17
+
18
+ # Retrieve Google API Key
19
+ Google_API_Key = os.getenv("Google_API_Key")
20
+
21
+ # Set up Google Gemini-Pro AI Model
22
+ gen_ai.configure(api_key=Google_API_Key)
23
+ model = gen_ai.GenerativeModel('gemini-pro')
24
+
25
+ # Function to translate roles between Gemini-Pro and Streamlit terminology
26
+ def translate_role_for_streamlit(user_role):
27
+ return "assistant" if user_role == "model" else user_role
28
+
29
+ # Function to handle text-to-speech (TTS) in a separate thread
30
+ def speak_text(text):
31
+ engine = pyttsx3.init()
32
+ engine.say(text)
33
+ engine.runAndWait()
34
+
35
+ # Initialize chat session in Streamlit if not already present
36
+ if "chat_session" not in st.session_state:
37
+ st.session_state.chat_session = model.start_chat(history=[])
38
+
39
+ # Display chatbot title and description
40
+ st.markdown("<h1 style='text-align: center; color: #4A90E2;'>πŸ€– Gemini-Pro ChatBot</h1>", unsafe_allow_html=True)
41
+ st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True)
42
+
43
+ # Display chat history
44
+ for message in st.session_state.chat_session.history:
45
+ with st.chat_message(translate_role_for_streamlit(message.role)):
46
+ st.markdown(message.parts[0].text)
47
+
48
+ # User input field
49
+ user_prompt = st.chat_input("Ask Gemini Pro...")
50
+
51
+ # If user enters a prompt
52
+ if user_prompt:
53
+ # Display user's message
54
+ st.chat_message("user").markdown(user_prompt)
55
+
56
+ # Show a loading indicator while waiting for a response
57
+ with st.spinner("Thinking..."):
58
+ gemini_response = st.session_state.chat_session.send_message(user_prompt)
59
+
60
+ # Display Gemini-Pro's response
61
+ with st.chat_message("assistant"):
62
+ st.markdown(gemini_response.text)
63
+
64
+ # Run text-to-speech in the background
65
+ threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start()
66
+
67
+
68
+
69
+
chat_history.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [{"role": "user", "content": "hii"}, {"role": "assistant", "content": "Hello! How can I assist you today?"}, {"role": "user", "content": "what is machine learning"}, {"role": "assistant", "content": "**Machine Learning**\n\nMachine learning (ML) is a subset of artificial intelligence (AI) that gives computers the ability to learn without being explicitly programmed. It enables systems to identify patterns, make predictions, and solve problems by using data, rather than following predefined rules.\n\n**Key Concepts:**\n\n* **Data:** ML algorithms are trained on large datasets that contain relevant information to the task at hand.\n* **Features:** Data is transformed into numerical features that represent important characteristics.\n* **Model:** An ML model is a mathematical function that maps inputs (features) to outputs (predictions).\n* **Training:** The model is iteratively adjusted by minimizing a loss function that measures the difference between predicted and actual outputs.\n\n**Types of Machine Learning:**\n\n* **Supervised Learning:** Learns from labeled data, where each data point has a known output.\n* **Unsupervised Learning:** Learns from unlabeled data, discovering patterns and structures without guidance.\n* **Reinforcement Learning:** Learns through trial and error, receiving rewards or penalties based on its actions.\n\n**Applications:**\n\nMachine learning is widely used in various industries and applications, including:\n\n* **Natural Language Processing:** Text analysis, machine translation, chatbots\n* **Image Recognition:** Object detection, facial recognition, medical imaging\n* **Predictive Analytics:** Forecasting demand, stock market predictions, fraud detection\n* **Healthcare:** Disease diagnosis, drug discovery, personalized medicine\n* **Finance:** Risk assessment, trading algorithms, portfolio optimization\n\n**Advantages:**\n\n* **Automation:** Automates tasks that are time-consuming or complex for humans.\n* **Accuracy:** Models can achieve high accuracy and precision in predictions.\n* **Adaptability:** Models can be updated and improved over time as new data becomes available.\n* **Customization:** Models can be tailored to specific domains and applications.\n\n**Challenges:**\n\n* **Data Quality:** Poor quality or biased data can lead to inaccurate models.\n* **Overfitting:** Models that perform well on training data but poorly on new data.\n* **Interpretability:** Understanding the decision-making process of ML models can be difficult."}, {"role": "user", "content": "hii"}, {"role": "assistant", "content": "Hello there! How can I assist you today?"}, {"role": "user", "content": "Hii"}, {"role": "assistant", "content": "Hello! How can I help you today?"}, {"role": "user", "content": "Hii"}, {"role": "assistant", "content": "Hello there! How can I assist you today?"}]
main.py ADDED
@@ -0,0 +1,854 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import os
2
+ # import streamlit as st
3
+ # from dotenv import load_dotenv
4
+ # import google.generativeai as gen_ai
5
+ #
6
+ #
7
+ # # Load environvent variables
8
+ # load_dotenv()
9
+ #
10
+ # # Configure Streamlit page settings
11
+ # st.set_page_config(
12
+ # page_title="Chat with Gemini-Pro!",
13
+ # page_icon=":brain:", # Favicon emoji
14
+ # layout="centered", # Page layout option
15
+ # )
16
+ #
17
+ # Google_API_Key = os.getenv("Google_API_Key")
18
+ #
19
+ # # Set up Google Gemni-Pro AI Model
20
+ # gen_ai.configure(api_key=Google_API_Key)
21
+ # model = gen_ai.GenerativeModel('gemini-pro')
22
+ #
23
+ # # Function to translate roles between Gemini-Pro and streamlit terminology
24
+ #
25
+ # def translate_role_for_streamlit(user_role):
26
+ # if user_role == 'model':
27
+ # return 'assistant'
28
+ # else: return user_role
29
+ #
30
+ # # Initialize chat session in streamlit if not already present
31
+ # if "chat_session" not in st.session_state:
32
+ # st.session_state.chat_session = model.start_chat(history=[])
33
+ #
34
+ # # Display the chatbot's title on the page
35
+ # st.title("πŸ€– Gemini Pro - ChatBot")
36
+ #
37
+ # # Display the chat history
38
+ # for message in st.session_state.chat_session.history:
39
+ # with st.chat_message(translate_role_for_streamlit(message.role)):
40
+ # st.markdown(message.parts[0].text)
41
+ #
42
+ #
43
+ # # Input field for user's message
44
+ # user_prompt = st.chat_input("Ask Gemini Pro...")
45
+ #
46
+ # if user_prompt:
47
+ # # Add user's message to chat and display it
48
+ # st.chat_message("user").markdown(user_prompt)
49
+ #
50
+ # # Send user's message to chat and display it
51
+ # gemini_response = st.session_state.chat_session.send_message(user_prompt)
52
+ #
53
+ # # Display Gemini-Pro's response
54
+ # with st.chat_message('assistant'):
55
+ # st.markdown(gemini_response.text)
56
+
57
+
58
+
59
+
60
+
61
+
62
+ #
63
+ # import os
64
+ # import json
65
+ # import streamlit as st
66
+ # import google.generativeai as gen_ai
67
+ # from dotenv import load_dotenv
68
+ # import speech_recognition as sr
69
+ # import pyttsx3
70
+ #
71
+ # # Load environment variables
72
+ # load_dotenv()
73
+ # Google_API_Key = os.getenv("Google_API_Key")
74
+ #
75
+ # # Configure Streamlit page settings
76
+ # st.set_page_config(
77
+ # page_title="Chat with Gemini-Pro!",
78
+ # page_icon="πŸ€–",
79
+ # layout="wide",
80
+ # )
81
+ #
82
+ # # Set up Google Gemini-Pro AI Model
83
+ # gen_ai.configure(api_key=Google_API_Key)
84
+ # model = gen_ai.GenerativeModel('gemini-pro')
85
+ #
86
+ # # Initialize chatbot memory
87
+ # if "chat_history" not in st.session_state:
88
+ # try:
89
+ # with open("chat_history.json", "r") as f:
90
+ # st.session_state.chat_history = json.load(f)
91
+ # except FileNotFoundError:
92
+ # st.session_state.chat_history = []
93
+ #
94
+ #
95
+ # # Save chat history
96
+ # def save_chat_history():
97
+ # with open("chat_history.json", "w") as f:
98
+ # json.dump(st.session_state.chat_history, f)
99
+ #
100
+ #
101
+ # # Sidebar settings
102
+ # with st.sidebar:
103
+ # st.subheader("βš™οΈ Settings")
104
+ #
105
+ # # Theme selection
106
+ # theme = st.radio("Select Theme", ["🌞 Light", "πŸŒ™ Dark"])
107
+ #
108
+ # # Clear chat history button
109
+ # if st.button("πŸ—‘οΈ Clear Chat History"):
110
+ # st.session_state.chat_history = []
111
+ # save_chat_history()
112
+ # st.experimental_rerun()
113
+ #
114
+ # # Apply dark mode styling
115
+ # if theme == "πŸŒ™ Dark":
116
+ # st.markdown(
117
+ # """
118
+ # <style>
119
+ # body {background-color: #333; color: white;}
120
+ # .stChatMessage {background-color: #444; color: white;}
121
+ # </style>
122
+ # """,
123
+ # unsafe_allow_html=True
124
+ # )
125
+ #
126
+ # # Display chatbot title
127
+ # st.title("πŸ€– Gemini Pro - ChatBot")
128
+ #
129
+ # # Display chat history
130
+ # for message in st.session_state.chat_history:
131
+ # with st.chat_message(message["role"]):
132
+ # st.markdown(message["content"])
133
+ #
134
+ # # Speech recognition & text-to-speech setup
135
+ # recognizer = sr.Recognizer()
136
+ # engine = pyttsx3.init()
137
+ #
138
+ # # Voice input button
139
+ # if st.button("🎀 Speak"):
140
+ # with sr.Microphone() as source:
141
+ # st.write("Listening...")
142
+ # audio = recognizer.listen(source)
143
+ # try:
144
+ # user_prompt = recognizer.recognize_google(audio)
145
+ # st.chat_message("user").markdown(user_prompt)
146
+ # except:
147
+ # st.error("Could not understand. Try again.")
148
+ #
149
+ # # Text input
150
+ # user_prompt = st.chat_input("Ask Gemini Pro...")
151
+ #
152
+ # if user_prompt:
153
+ # # Add user message to chat history
154
+ # st.session_state.chat_history.append({"role": "user", "content": user_prompt})
155
+ #
156
+ # # Display user message
157
+ # st.chat_message("user").markdown(user_prompt)
158
+ #
159
+ # # Get response from Gemini-Pro
160
+ # gemini_response = model.generate_content(user_prompt)
161
+ #
162
+ # # Add chatbot response to history
163
+ # st.session_state.chat_history.append({"role": "assistant", "content": gemini_response.text})
164
+ #
165
+ # # Display chatbot response
166
+ # with st.chat_message("assistant"):
167
+ # st.markdown(gemini_response.text)
168
+ #
169
+ # # Save chat history
170
+ # save_chat_history()
171
+ #
172
+ # # Text-to-Speech response
173
+ # engine.say(gemini_response.text)
174
+ # engine.runAndWait()
175
+
176
+
177
+
178
+
179
+
180
+ # import os
181
+ # import json
182
+ # import streamlit as st
183
+ # import google.generativeai as gen_ai
184
+ # from dotenv import load_dotenv
185
+ # import speech_recognition as sr
186
+ # import pyttsx3
187
+ #
188
+ # # Load environment variables
189
+ # load_dotenv()
190
+ # Google_API_Key = os.getenv("Google_API_Key")
191
+ #
192
+ # # Configure Streamlit page settings
193
+ # st.set_page_config(
194
+ # page_title="Chat with Gemini-Pro!",
195
+ # page_icon="πŸ€–",
196
+ # layout="wide",
197
+ # )
198
+ #
199
+ # # Apply custom CSS for UI/UX improvements
200
+ # st.markdown(
201
+ # """
202
+ # <style>
203
+ # .stChatMessage { padding: 10px; border-radius: 10px; margin: 5px 0; }
204
+ # .user { background-color: #DCF8C6; }
205
+ # .assistant { background-color: #E3E3E3; }
206
+ # .sidebar .sidebar-content { background-color: #f0f2f6; }
207
+ # .chat-container { max-width: 700px; margin: auto; }
208
+ # </style>
209
+ # """,
210
+ # unsafe_allow_html=True
211
+ # )
212
+ #
213
+ # # Set up Google Gemini-Pro AI Model
214
+ # gen_ai.configure(api_key=Google_API_Key)
215
+ # model = gen_ai.GenerativeModel('gemini-pro')
216
+ #
217
+ # # Initialize chatbot memory
218
+ # if "chat_history" not in st.session_state:
219
+ # try:
220
+ # with open("chat_history.json", "r") as f:
221
+ # st.session_state.chat_history = json.load(f)
222
+ # except (FileNotFoundError, json.JSONDecodeError):
223
+ # st.session_state.chat_history = []
224
+ #
225
+ #
226
+ # # Save chat history
227
+ # def save_chat_history():
228
+ # with open("chat_history.json", "w") as f:
229
+ # json.dump(st.session_state.chat_history, f)
230
+ #
231
+ #
232
+ # # Sidebar settings
233
+ # with st.sidebar:
234
+ # st.subheader("βš™οΈ Settings")
235
+ #
236
+ # # Theme selection
237
+ # theme = st.radio("Select Theme", ["🌞 Light", "πŸŒ™ Dark"])
238
+ #
239
+ # # Clear chat history button
240
+ # if st.button("πŸ—‘οΈ Clear Chat History"):
241
+ # st.session_state.chat_history = []
242
+ # save_chat_history()
243
+ # st.experimental_rerun()
244
+ #
245
+ # # Apply dark mode styling
246
+ # if theme == "πŸŒ™ Dark":
247
+ # st.markdown(
248
+ # """
249
+ # <style>
250
+ # body {background-color: #1e1e1e; color: white;}
251
+ # .stChatMessage {background-color: #444; color: white;}
252
+ # .sidebar .sidebar-content {background-color: #333;}
253
+ # .stTextInput, .stButton, .stRadio {background-color: #333; color: white;}
254
+ # </style>
255
+ # """,
256
+ # unsafe_allow_html=True
257
+ # )
258
+ #
259
+ # # Display chatbot title
260
+ # st.title("πŸ€– Gemini Pro - AI ChatBot")
261
+ #
262
+ # # Chat container
263
+ # st.markdown('<div class="chat-container">', unsafe_allow_html=True)
264
+ #
265
+ # # Display chat history
266
+ # for message in st.session_state.chat_history:
267
+ # role = message["role"]
268
+ # avatar = "πŸ‘€" if role == "user" else "πŸ€–"
269
+ # bg_color = "user" if role == "user" else "assistant"
270
+ #
271
+ # with st.chat_message(role):
272
+ # st.markdown(f'<div class="{bg_color} stChatMessage">{avatar} {message["content"]}</div>',
273
+ # unsafe_allow_html=True)
274
+ #
275
+ # # Speech recognition & text-to-speech setup
276
+ # recognizer = sr.Recognizer()
277
+ # # engine = pyttsx3.init()
278
+ # engine = pyttsx3.init()
279
+ # engine.setProperty('rate', 150) # Adjust speech rate
280
+ # engine.setProperty('voice', engine.getProperty('voices')[0].id) # Set a specific voice
281
+ #
282
+ # # Voice input button
283
+ # if st.button("🎀 Speak"):
284
+ # with sr.Microphone() as source:
285
+ # st.write("Listening...")
286
+ # audio = recognizer.listen(source)
287
+ # try:
288
+ # user_prompt = recognizer.recognize_google(audio)
289
+ # st.session_state.chat_history.append({"role": "user", "content": user_prompt})
290
+ # st.chat_message("user").markdown(user_prompt)
291
+ # # Trigger chatbot response
292
+ # gemini_response = model.generate_content(user_prompt)
293
+ # st.session_state.chat_history.append({"role": "assistant", "content": gemini_response.text})
294
+ # st.chat_message("assistant").markdown(gemini_response.text)
295
+ # save_chat_history()
296
+ # engine.say(gemini_response.text)
297
+ # engine.runAndWait()
298
+ # except sr.UnknownValueError:
299
+ # st.error("Sorry, I could not understand the audio.")
300
+ # except sr.RequestError:
301
+ # st.error("Could not request results from the speech recognition service.")
302
+ # # Text input
303
+ # user_prompt = st.chat_input("Ask Gemini Pro...")
304
+ #
305
+ # if user_prompt:
306
+ # # Add user message to chat history
307
+ # st.session_state.chat_history.append({"role": "user", "content": user_prompt})
308
+ #
309
+ # # Display user message
310
+ # st.chat_message("user").markdown(user_prompt)
311
+ #
312
+ # # Get response from Gemini-Pro
313
+ # # gemini_response = model.generate_content(user_prompt)
314
+ # try:
315
+ # gemini_response = model.generate_content(user_prompt)
316
+ # except Exception as e:
317
+ # st.error(f"An error occurred: {e}")
318
+ # gemini_response = type('Object', (), {'text': 'Sorry, I could not generate a response.'})
319
+ #
320
+ # # Add chatbot response to history
321
+ # st.session_state.chat_history.append({"role": "assistant", "content": gemini_response.text})
322
+ #
323
+ # # Display chatbot response
324
+ # with st.chat_message("assistant"):
325
+ # st.markdown(gemini_response.text)
326
+ #
327
+ # # Save chat history
328
+ # save_chat_history()
329
+ #
330
+ # # Text-to-Speech response
331
+ # engine.say(gemini_response.text)
332
+ # engine.runAndWait()
333
+ #
334
+ # st.markdown('</div>', unsafe_allow_html=True)
335
+ #
336
+
337
+
338
+
339
+
340
+
341
+
342
+
343
+
344
+
345
+
346
+ # import os
347
+ # import streamlit as st
348
+ # import google.generativeai as gen_ai
349
+ # import pyttsx3
350
+ # import threading
351
+ # from dotenv import load_dotenv
352
+ #
353
+ # # Load environment variables
354
+ # load_dotenv()
355
+ #
356
+ # # Configure Streamlit page settings
357
+ # st.set_page_config(
358
+ # page_title="Gemini-Pro ChatBot",
359
+ # page_icon="πŸ€–", # Favicon emoji
360
+ # layout="centered", # Page layout option
361
+ # )
362
+ #
363
+ # # Retrieve Google API Key
364
+ # Google_API_Key = os.getenv("Google_API_Key")
365
+ #
366
+ # # Set up Google Gemini-Pro AI Model
367
+ # gen_ai.configure(api_key=Google_API_Key)
368
+ # model = gen_ai.GenerativeModel('gemini-pro')
369
+ #
370
+ # # Function to translate roles between Gemini-Pro and Streamlit terminology
371
+ # def translate_role_for_streamlit(user_role):
372
+ # return "assistant" if user_role == "model" else user_role
373
+ #
374
+ # # Function to handle text-to-speech (TTS) in a separate thread
375
+ # def speak_text(text):
376
+ # engine = pyttsx3.init()
377
+ # engine.say(text)
378
+ # engine.runAndWait()
379
+ #
380
+ # # Initialize chat session in Streamlit if not already present
381
+ # if "chat_session" not in st.session_state:
382
+ # st.session_state.chat_session = model.start_chat(history=[])
383
+ #
384
+ # # Display chatbot title and description
385
+ # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>πŸ€– Gemini-Pro ChatBot</h1>", unsafe_allow_html=True)
386
+ # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True)
387
+ #
388
+ # # Display chat history
389
+ # for message in st.session_state.chat_session.history:
390
+ # with st.chat_message(translate_role_for_streamlit(message.role)):
391
+ # st.markdown(message.parts[0].text)
392
+ #
393
+ # # User input field
394
+ # user_prompt = st.chat_input("Ask Gemini Pro...")
395
+ #
396
+ # # If user enters a prompt
397
+ # if user_prompt:
398
+ # # Display user's message
399
+ # st.chat_message("user").markdown(user_prompt)
400
+ #
401
+ # # Show a loading indicator while waiting for a response
402
+ # with st.spinner("Thinking..."):
403
+ # gemini_response = st.session_state.chat_session.send_message(user_prompt)
404
+ #
405
+ # # Display Gemini-Pro's response
406
+ # with st.chat_message("assistant"):
407
+ # st.markdown(gemini_response.text)
408
+ #
409
+ # # Run text-to-speech in the background
410
+ # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start()
411
+
412
+
413
+
414
+
415
+
416
+
417
+ #
418
+ #
419
+ #
420
+ #
421
+ #
422
+ #
423
+ #
424
+ #
425
+ #
426
+ #
427
+ # import os
428
+ # import streamlit as st
429
+ # import google.generativeai as gen_ai
430
+ # import pyttsx3
431
+ # import threading
432
+ # from dotenv import load_dotenv
433
+ #
434
+ # # Load environment variables
435
+ # load_dotenv()
436
+ #
437
+ # # Configure Streamlit page settings
438
+ # st.set_page_config(
439
+ # page_title="Gemini-Pro ChatBot",
440
+ # page_icon="πŸ€–",
441
+ # layout="centered",
442
+ # )
443
+ #
444
+ # # Retrieve Google API Key
445
+ # Google_API_Key = os.getenv("Google_API_Key")
446
+ #
447
+ # # Set up Google Gemini-Pro AI Model
448
+ # gen_ai.configure(api_key=Google_API_Key)
449
+ # model = gen_ai.GenerativeModel('gemini-pro')
450
+ #
451
+ # # Function to translate roles between Gemini-Pro and Streamlit terminology
452
+ # def translate_role_for_streamlit(user_role):
453
+ # return "assistant" if user_role == "model" else user_role
454
+ #
455
+ # # Initialize text-to-speech engine
456
+ # if "tts_engine" not in st.session_state:
457
+ # st.session_state.tts_engine = pyttsx3.init()
458
+ #
459
+ # def stop_speech():
460
+ # """Stop the current speech if running."""
461
+ # st.session_state.tts_engine.stop()
462
+ #
463
+ # def speak_text(text):
464
+ # """Stop previous speech and start speaking new text."""
465
+ # stop_speech() # Stop any ongoing speech
466
+ # st.session_state.tts_engine.say(text)
467
+ # st.session_state.tts_engine.runAndWait()
468
+ #
469
+ # # Initialize chat session in Streamlit if not already present
470
+ # if "chat_session" not in st.session_state:
471
+ # st.session_state.chat_session = model.start_chat(history=[])
472
+ #
473
+ # # Display chatbot title and description
474
+ # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>πŸ€– Gemini-Pro ChatBot</h1>", unsafe_allow_html=True)
475
+ # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True)
476
+ #
477
+ # # Display chat history
478
+ # for message in st.session_state.chat_session.history:
479
+ # with st.chat_message(translate_role_for_streamlit(message.role)):
480
+ # st.markdown(message.parts[0].text)
481
+ #
482
+ # # User input field
483
+ # user_prompt = st.chat_input("Ask Gemini Pro...")
484
+ #
485
+ # # If user enters a prompt
486
+ # if user_prompt:
487
+ # # Display user's message
488
+ # st.chat_message("user").markdown(user_prompt)
489
+ #
490
+ # # Show a loading indicator while waiting for a response
491
+ # with st.spinner("Thinking..."):
492
+ # gemini_response = st.session_state.chat_session.send_message(user_prompt)
493
+ #
494
+ # # Display Gemini-Pro's response
495
+ # with st.chat_message("assistant"):
496
+ # st.markdown(gemini_response.text)
497
+ #
498
+ # # Run text-to-speech in the background (stopping previous speech first)
499
+ # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start()
500
+
501
+
502
+ # import os
503
+ # import streamlit as st
504
+ # import google.generativeai as gen_ai
505
+ # import pyttsx3
506
+ # import threading
507
+ # from dotenv import load_dotenv
508
+ #
509
+ # # Load environment variables
510
+ # load_dotenv()
511
+ #
512
+ # # Configure Streamlit page settings
513
+ # st.set_page_config(
514
+ # page_title="Gemini-Pro ChatBot",
515
+ # page_icon="πŸ€–",
516
+ # layout="centered",
517
+ # )
518
+ #
519
+ # # Retrieve Google API Key
520
+ # Google_API_Key = os.getenv("Google_API_Key")
521
+ #
522
+ # # Set up Google Gemini-Pro AI Model
523
+ # gen_ai.configure(api_key=Google_API_Key)
524
+ # model = gen_ai.GenerativeModel('gemini-pro')
525
+ #
526
+ # # Function to translate roles between Gemini-Pro and Streamlit terminology
527
+ # def translate_role_for_streamlit(user_role):
528
+ # return "assistant" if user_role == "model" else user_role
529
+ #
530
+ # # Initialize text-to-speech engine
531
+ # if "tts_engine" not in st.session_state:
532
+ # st.session_state.tts_engine = pyttsx3.init()
533
+ #
534
+ # # Initialize threading event for speech control
535
+ # if "speech_event" not in st.session_state:
536
+ # st.session_state.speech_event = threading.Event()
537
+ #
538
+ # def stop_speech():
539
+ # """Stop the current speech if running."""
540
+ # st.session_state.speech_event.set() # Set the event to stop speech
541
+ # st.session_state.tts_engine.stop()
542
+ #
543
+ # def speak_text(text):
544
+ # """Stop previous speech and start speaking new text."""
545
+ # stop_speech() # Stop any ongoing speech
546
+ # st.session_state.speech_event.clear() # Clear the event for new speech
547
+ # st.session_state.tts_engine.say(text)
548
+ # st.session_state.tts_engine.runAndWait()
549
+ #
550
+ # # Initialize chat session in Streamlit if not already present
551
+ # if "chat_session" not in st.session_state:
552
+ # st.session_state.chat_session = model.start_chat(history=[])
553
+ #
554
+ # # Display chatbot title and description
555
+ # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>πŸ€– Gemini-Pro ChatBot</h1>", unsafe_allow_html=True)
556
+ # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True)
557
+ #
558
+ # # Display chat history
559
+ # for message in st.session_state.chat_session.history:
560
+ # with st.chat_message(translate_role_for_streamlit(message.role)):
561
+ # st.markdown(message.parts[0].text)
562
+ #
563
+ # # User input field
564
+ # user_prompt = st.chat_input("Ask Gemini Pro...")
565
+ #
566
+ # # If user enters a prompt
567
+ # if user_prompt:
568
+ # # Display user's message
569
+ # st.chat_message("user").markdown(user_prompt)
570
+ #
571
+ # # Show a loading indicator while waiting for a response
572
+ # with st.spinner("Thinking..."):
573
+ # gemini_response = st.session_state.chat_session.send_message(user_prompt)
574
+ #
575
+ # # Display Gemini-Pro's response
576
+ # with st.chat_message("assistant"):
577
+ # st.markdown(gemini_response.text)
578
+ #
579
+ # # Run text-to-speech in the background (stopping previous speech first)
580
+ # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start()
581
+
582
+
583
+
584
+
585
+
586
+
587
+ #
588
+ # import os
589
+ # import streamlit as st
590
+ # import google.generativeai as gen_ai
591
+ # import pyttsx3
592
+ # import threading
593
+ # from dotenv import load_dotenv
594
+ #
595
+ # # Load environment variables
596
+ # load_dotenv()
597
+ #
598
+ # # Configure Streamlit page settings
599
+ # st.set_page_config(
600
+ # page_title="Gemini-Pro ChatBot",
601
+ # page_icon="πŸ€–",
602
+ # layout="centered",
603
+ # )
604
+ #
605
+ # # Retrieve Google API Key
606
+ # Google_API_Key = os.getenv("Google_API_Key")
607
+ #
608
+ # # Set up Google Gemini-Pro AI Model
609
+ # gen_ai.configure(api_key=Google_API_Key)
610
+ # model = gen_ai.GenerativeModel('gemini-pro')
611
+ #
612
+ # # Function to translate roles between Gemini-Pro and Streamlit terminology
613
+ # def translate_role_for_streamlit(user_role):
614
+ # return "assistant" if user_role == "model" else user_role
615
+ #
616
+ # # Initialize text-to-speech engine
617
+ # if "tts_engine" not in st.session_state:
618
+ # st.session_state.tts_engine = pyttsx3.init()
619
+ #
620
+ # def stop_speech():
621
+ # """Stop the current speech if running."""
622
+ # st.session_state.tts_engine.stop()
623
+ #
624
+ # def speak_text(text):
625
+ # """Stop previous speech and start speaking new text."""
626
+ # stop_speech() # Stop any ongoing speech
627
+ # st.session_state.tts_engine.say(text)
628
+ # st.session_state.tts_engine.runAndWait()
629
+ #
630
+ # # Initialize chat session in Streamlit if not already present
631
+ # if "chat_session" not in st.session_state:
632
+ # st.session_state.chat_session = model.start_chat(history=[])
633
+ #
634
+ # # Display chatbot title and description
635
+ # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>πŸ€– Gemini-Pro ChatBot</h1>", unsafe_allow_html=True)
636
+ # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True)
637
+ #
638
+ # # Display chat history
639
+ # for message in st.session_state.chat_session.history:
640
+ # with st.chat_message(translate_role_for_streamlit(message.role)):
641
+ # st.markdown(message.parts[0].text)
642
+ #
643
+ # # User input field
644
+ # user_prompt = st.chat_input("Ask Gemini Pro...")
645
+ #
646
+ # # If user enters a prompt
647
+ # if user_prompt:
648
+ # # Display user's message
649
+ # st.chat_message("user").markdown(user_prompt)
650
+ #
651
+ # # Show a loading indicator while waiting for a response
652
+ # with st.spinner("Thinking..."):
653
+ # gemini_response = st.session_state.chat_session.send_message(user_prompt)
654
+ #
655
+ # # Display Gemini-Pro's response
656
+ # with st.chat_message("assistant"):
657
+ # st.markdown(gemini_response.text)
658
+ #
659
+ # # Run text-to-speech in the background (stopping previous speech first)
660
+ # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start()
661
+ #
662
+ #
663
+ #
664
+ #
665
+
666
+
667
+
668
+
669
+
670
+
671
+
672
+
673
+
674
+
675
+
676
+
677
+
678
+ #
679
+ #
680
+ # import os
681
+ # import streamlit as st
682
+ # import google.generativeai as gen_ai
683
+ # import pyttsx3
684
+ # import threading
685
+ # from dotenv import load_dotenv
686
+ # import speech_recognition as sr
687
+ #
688
+ # # Load environment variables
689
+ # load_dotenv()
690
+ #
691
+ # # Configure Streamlit page settings
692
+ # st.set_page_config(
693
+ # page_title="Gemini-Pro ChatBot",
694
+ # page_icon="πŸ€–", # Favicon emoji
695
+ # layout="centered", # Page layout option
696
+ # )
697
+ #
698
+ # # Retrieve Google API Key
699
+ # Google_API_Key = os.getenv("Google_API_Key")
700
+ #
701
+ # # Set up Google Gemini-Pro AI Model
702
+ # gen_ai.configure(api_key=Google_API_Key)
703
+ # model = gen_ai.GenerativeModel('gemini-pro')
704
+ #
705
+ # # Initialize text-to-speech engine
706
+ # if "tts_engine" not in st.session_state:
707
+ # st.session_state.tts_engine = pyttsx3.init()
708
+ #
709
+ # # Speech-to-text function
710
+ # def listen_for_input():
711
+ # recognizer = sr.Recognizer()
712
+ # with sr.Microphone() as source:
713
+ # print("Listening...")
714
+ # audio = recognizer.listen(source)
715
+ # try:
716
+ # return recognizer.recognize_google(audio)
717
+ # except sr.UnknownValueError:
718
+ # return "Sorry, I did not catch that."
719
+ # except sr.RequestError:
720
+ # return "Sorry, there was an error with the speech recognition service."
721
+ #
722
+ # # Stop previous speech and speak the new text
723
+ # def speak_text(text):
724
+ # stop_speech() # Stop any ongoing speech
725
+ # st.session_state.tts_engine.say(text)
726
+ # st.session_state.tts_engine.runAndWait()
727
+ #
728
+ # # Stop ongoing speech
729
+ # def stop_speech():
730
+ # st.session_state.tts_engine.stop()
731
+ #
732
+ # # Function to translate roles between Gemini-Pro and Streamlit terminology
733
+ # def translate_role_for_streamlit(user_role):
734
+ # return "assistant" if user_role == "model" else user_role
735
+ #
736
+ # # Initialize chat session in Streamlit if not already present
737
+ # if "chat_session" not in st.session_state:
738
+ # st.session_state.chat_session = model.start_chat(history=[])
739
+ #
740
+ # # Display chatbot title and description
741
+ # st.markdown("<h1 style='text-align: center; color: #4A90E2;'>πŸ€– Gemini-Pro ChatBot</h1>", unsafe_allow_html=True)
742
+ # st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True)
743
+ #
744
+ # # Display chat history
745
+ # for message in st.session_state.chat_session.history:
746
+ # with st.chat_message(translate_role_for_streamlit(message.role)):
747
+ # st.markdown(message.parts[0].text)
748
+ #
749
+ # # User input field (with optional speech-to-text input)
750
+ # user_prompt = st.chat_input("Ask Gemini Pro...")
751
+ #
752
+ # if st.button("Use Voice Input"):
753
+ # user_prompt = listen_for_input()
754
+ #
755
+ # # If user enters a prompt
756
+ # if user_prompt:
757
+ # # Display user's message
758
+ # st.chat_message("user").markdown(user_prompt)
759
+ #
760
+ # # Show a loading indicator while waiting for a response
761
+ # with st.spinner("Thinking..."):
762
+ # gemini_response = st.session_state.chat_session.send_message(user_prompt)
763
+ #
764
+ # # Display Gemini-Pro's response
765
+ # with st.chat_message("assistant"):
766
+ # st.markdown(gemini_response.text)
767
+ #
768
+ # # Run text-to-speech in the background
769
+ # threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start()
770
+
771
+
772
+
773
+
774
+
775
+
776
+
777
+
778
+ import os
779
+ import streamlit as st
780
+ import google.generativeai as gen_ai
781
+ import pyttsx3
782
+ import threading
783
+ from dotenv import load_dotenv
784
+
785
+ # Load environment variables
786
+ load_dotenv()
787
+
788
+ # Configure Streamlit page settings
789
+ st.set_page_config(
790
+ page_title="Gemini-Pro ChatBot",
791
+ page_icon="πŸ€–", # Favicon emoji
792
+ layout="centered", # Page layout option
793
+ )
794
+
795
+ # Retrieve Google API Key
796
+ Google_API_Key = os.getenv("Google_API_Key")
797
+
798
+ # Set up Google Gemini-Pro AI Model
799
+ gen_ai.configure(api_key=Google_API_Key)
800
+ model = gen_ai.GenerativeModel('gemini-pro')
801
+
802
+ # Function to translate roles between Gemini-Pro and Streamlit terminology
803
+ def translate_role_for_streamlit(user_role):
804
+ return "assistant" if user_role == "model" else user_role
805
+
806
+ # Initialize the TTS engine if not already present in session state
807
+ if "tts_engine" not in st.session_state:
808
+ st.session_state.tts_engine = pyttsx3.init()
809
+
810
+ # Function to stop speech if any ongoing speech is happening
811
+ def stop_speech():
812
+ if hasattr(st.session_state, "tts_engine"):
813
+ st.session_state.tts_engine.stop()
814
+
815
+ # Function to handle text-to-speech (TTS) in a separate thread
816
+ def speak_text(text):
817
+ try:
818
+ stop_speech() # Stop any ongoing speech
819
+ st.session_state.tts_engine.say(text)
820
+ st.session_state.tts_engine.runAndWait()
821
+ except Exception as e:
822
+ st.error(f"Error in TTS: {e}")
823
+
824
+ # Initialize chat session in Streamlit if not already present
825
+ if "chat_session" not in st.session_state:
826
+ st.session_state.chat_session = model.start_chat(history=[])
827
+
828
+ # Display chatbot title and description
829
+ st.markdown("<h1 style='text-align: center; color: #4A90E2;'>πŸ€– Gemini-Pro ChatBot</h1>", unsafe_allow_html=True)
830
+ st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True)
831
+
832
+ # Display chat history
833
+ for message in st.session_state.chat_session.history:
834
+ with st.chat_message(translate_role_for_streamlit(message.role)):
835
+ st.markdown(message.parts[0].text)
836
+
837
+ # User input field
838
+ user_prompt = st.chat_input("Ask Gemini Pro...")
839
+
840
+ # If user enters a prompt
841
+ if user_prompt:
842
+ # Display user's message
843
+ st.chat_message("user").markdown(user_prompt)
844
+
845
+ # Show a loading indicator while waiting for a response
846
+ with st.spinner("Thinking..."):
847
+ gemini_response = st.session_state.chat_session.send_message(user_prompt)
848
+
849
+ # Display Gemini-Pro's response
850
+ with st.chat_message("assistant"):
851
+ st.markdown(gemini_response.text)
852
+
853
+ # Run text-to-speech in the background
854
+ threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ streamlit~=1.36.0
2
+ python-dotenv~=1.0.1