|
import os |
|
import streamlit as st |
|
import google.generativeai as gen_ai |
|
import pyttsx3 |
|
import threading |
|
from dotenv import load_dotenv |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
st.set_page_config( |
|
page_title="Gemini-Pro ChatBot", |
|
page_icon="π€", |
|
layout="centered", |
|
) |
|
|
|
|
|
Google_API_Key = os.getenv("Google_API_Key") |
|
|
|
|
|
gen_ai.configure(api_key=Google_API_Key) |
|
model = gen_ai.GenerativeModel('gemini-2.0-flash') |
|
|
|
|
|
def translate_role_for_streamlit(user_role): |
|
return "assistant" if user_role == "model" else user_role |
|
|
|
|
|
def speak_text(text): |
|
engine = pyttsx3.init() |
|
engine.say(text) |
|
engine.runAndWait() |
|
|
|
|
|
if "chat_session" not in st.session_state: |
|
st.session_state.chat_session = model.start_chat(history=[]) |
|
|
|
|
|
st.markdown("<h1 style='text-align: center; color: #4A90E2;'>π€ Gemini-Pro ChatBot</h1>", unsafe_allow_html=True) |
|
st.markdown("<p style='text-align: center; font-size: 16px;'>Ask me anything! I'm powered by Gemini-Pro AI.</p>", unsafe_allow_html=True) |
|
|
|
|
|
for message in st.session_state.chat_session.history: |
|
with st.chat_message(translate_role_for_streamlit(message.role)): |
|
st.markdown(message.parts[0].text) |
|
|
|
|
|
user_prompt = st.chat_input("Ask Gemini Pro...") |
|
|
|
|
|
if user_prompt: |
|
|
|
st.chat_message("user").markdown(user_prompt) |
|
|
|
|
|
with st.spinner("Thinking..."): |
|
gemini_response = st.session_state.chat_session.send_message(user_prompt) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
st.markdown(gemini_response.text) |
|
|
|
|
|
threading.Thread(target=speak_text, args=(gemini_response.text,), daemon=True).start() |
|
|
|
|
|
|
|
|
|
|