import os import streamlit as st import google.generativeai as gen_ai import pyttsx3 import threading from dotenv import load_dotenv # Load environment variables load_dotenv() # Configure Streamlit page settings st.set_page_config( page_title="Gemini-Pro ChatBot", page_icon="🤖", # Favicon emoji layout="centered", # Page layout option ) # Retrieve Google API Key Google_API_Key = os.getenv("Google_API_Key") # Set up Google Gemini-Pro AI Model gen_ai.configure(api_key=Google_API_Key) model = gen_ai.GenerativeModel('gemini-2.0-flash') # Function to translate roles between Gemini-Pro and Streamlit terminology def translate_role_for_streamlit(user_role): return "assistant" if user_role == "model" else user_role # Function to handle text-to-speech (TTS) in a separate thread def speak_text(text): engine = pyttsx3.init() engine.say(text) engine.runAndWait() # Initialize chat session in Streamlit if not already present if "chat_session" not in st.session_state: st.session_state.chat_session = model.start_chat(history=[]) # Display chatbot title and description st.markdown("
Ask me anything! I'm powered by Gemini-Pro AI.
", unsafe_allow_html=True) # Display chat history for message in st.session_state.chat_session.history: with st.chat_message(translate_role_for_streamlit(message.role)): st.markdown(message.parts[0].text) # User input field user_prompt = st.chat_input("Ask Gemini Pro...") # If user enters a prompt if user_prompt: # Display user's message st.chat_message("user").markdown(user_prompt) # Show a loading indicator while waiting for a response with st.spinner("Thinking..."): gemini_response = st.session_state.chat_session.send_message(user_prompt) # Display Gemini-Pro's response with st.chat_message("assistant"): st.markdown(gemini_response.text) # Run text-to-speech in the background threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start()