File size: 3,375 Bytes
a630adb
 
 
 
 
 
 
 
 
 
 
322907f
 
1fb5170
a630adb
197a2d3
 
a630adb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import os
import keyfile
import warnings
import streamlit as st
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.schema import HumanMessage, SystemMessage, AIMessage

# Ignore warnings
warnings.filterwarnings("ignore")

# Streamlit settings
st.set_page_config(page_title="🌿 ArchanaCare πŸ§™β€β™€οΈ", page_icon="πŸ§™β€β™€οΈ", layout="centered")
st.markdown("<h1 style='text-align: center; color: #4B0082;'>Welcome to ArchanaCare 🌿✨</h1>", unsafe_allow_html=True)
st.markdown("<h3 style='color: #003366;'>How can I assist with your ailments or worries today? πŸ§ͺπŸ’«</h3>", unsafe_allow_html=True)

# Adding vertical space without streamlit_extras
st.markdown("<br><br>", unsafe_allow_html=True)

# Initialize session state for messages with an introductory message
if "sessionMessages" not in st.session_state:
    st.session_state["sessionMessages"] = [
        SystemMessage(content="You are a medieval magical healer known for your peculiar sarcasm.")
    ]

# Set Google API key
os.environ["GOOGLE_API_KEY"] = keyfile.GOOGLEKEY

# Initialize the model
llm = ChatGoogleGenerativeAI(
    model="gemini-1.5-pro",
    temperature=0.7,
    convert_system_message_to_human=True
)

# Define a function to create chat bubbles
def chat_bubble(message, is_user=True):
    align = 'right' if is_user else 'left'
    color = '#ADD8E6' if is_user else '#E6E6FA'
    border_radius = '25px' if is_user else '25px'
    st.markdown(f"""
    <div style="text-align: {align}; padding: 10px;">
        <span style="display: inline-block; padding: 10px; background-color: {color}; color: black;
                     border-radius: {border_radius}; max-width: 70%;">
            {message}
        </span>
    </div>
    """, unsafe_allow_html=True)

# Response function
def load_answer(question):
    # Add user question to the message history
    st.session_state.sessionMessages.append(HumanMessage(content=question))
    
    # Get AI's response
    assistant_answer = llm.invoke(st.session_state.sessionMessages)
    
    # Append AI's answer to the session messages
    if isinstance(assistant_answer, AIMessage):
        st.session_state.sessionMessages.append(assistant_answer)
        return assistant_answer.content
    else:
        st.session_state.sessionMessages.append(AIMessage(content=assistant_answer))
        return assistant_answer

# Capture user input
def get_text():
    input_text = st.text_input("You: ", key="input", placeholder="Type your question here...")
    return str(input_text)

# Main implementation
user_input = get_text()
submit = st.button("🌟 Get a Magical Answer 🌟")

if submit and user_input:
    # Display the user's question
    chat_bubble(user_input, is_user=True)
    
    # Load the response and display it as a chat bubble
    response = load_answer(user_input)
    chat_bubble(response, is_user=False)

# Background styling and layout enhancements
st.markdown("""
<style>
    .stApp {
        background: linear-gradient(to right, #FFEFBA, #FFFFFF);
        color: #4B0082;
        font-family: Arial, sans-serif;
    }
    input[type="text"] {
        padding: 8px;
        border: 2px solid #4B0082;
        border-radius: 15px;
        outline: none;
    }
    button {
        background-color: #4B0082;
        color: white;
        border-radius: 15px;
    }
</style>
""", unsafe_allow_html=True)