1Prarthana's picture
Update app.py
0b76cae verified
import os
import streamlit as st
import google.generativeai as gen_ai
import pyttsx3
import threading
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Configure Streamlit page settings
st.set_page_config(
page_title="Gemini-Pro ChatBot",
page_icon="πŸ€–", # Favicon emoji
layout="centered", # Page layout option
)
# Retrieve Google API Key
Google_API_Key = os.getenv("Google_API_Key")
# Set up Google Gemini-Pro AI Model
gen_ai.configure(api_key=Google_API_Key)
model = gen_ai.GenerativeModel('gemini-2.0-flash')
# Function to translate roles between Gemini-Pro and Streamlit terminology
def translate_role_for_streamlit(user_role):
return "assistant" if user_role == "model" else user_role
# Function to handle text-to-speech (TTS) in a separate thread
def speak_text(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
# Initialize chat session in Streamlit if not already present
if "chat_session" not in st.session_state:
st.session_state.chat_session = model.start_chat(history=[])
# Display chatbot title and description
st.markdown("<h1 style='text-align: center; color: #4A90E2;'>πŸ€– Gemini-Pro ChatBot</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True)
# Display chat history
for message in st.session_state.chat_session.history:
with st.chat_message(translate_role_for_streamlit(message.role)):
st.markdown(message.parts[0].text)
# User input field
user_prompt = st.chat_input("Ask Gemini Pro...")
# If user enters a prompt
if user_prompt:
# Display user's message
st.chat_message("user").markdown(user_prompt)
# Show a loading indicator while waiting for a response
with st.spinner("Thinking..."):
gemini_response = st.session_state.chat_session.send_message(user_prompt)
# Display Gemini-Pro's response
with st.chat_message("assistant"):
st.markdown(gemini_response.text)
# Run text-to-speech in the background
threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start()