File size: 7,355 Bytes
aee0698
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import streamlit as st
import uuid
import os
from dotenv import load_dotenv
import config
from g4f.client import Client
import time

load_dotenv()

st.set_page_config(
    page_title=config.APP_NAME,
    page_icon=":robot_face:",
    layout="wide"
)

st.markdown("""

<style>

.stChatMessage {

    background-color: #262730;

    border-radius: 10px;

    padding: 15px;

    margin-bottom: 10px;

    color: white;

    box-shadow: 2px 2px 5px rgba(0, 0, 0, 0.2);

}



.stSidebar {

    background-color: #1E1E1E;

    color: white;

    padding: 20px;

}



.stSidebar h2, .stSidebar h3 {

    color: #007BFF;

}



.stSidebar .stButton>button {

    width: 100%;

    border-radius: 5px;

    margin-bottom: 10px;

}



.main .block-container {

    padding-top: 2rem;

    padding-bottom: 2rem;

}



.stChatInputContainer {

    margin-top: 20px;

}



body {

    color: white;

}

</style>

""", unsafe_allow_html=True)

if "chat_sessions" not in st.session_state:
    st.session_state.chat_sessions = {}
if "current_chat_id" not in st.session_state:
    st.session_state.current_chat_id = None
if "system_message" not in st.session_state:
     st.session_state.system_message = "Hello! I am ready to help you."
if "selected_model" not in st.session_state:
    st.session_state.selected_model = config.DEFAULT_MODEL
if "model_selected" not in st.session_state:
    st.session_state.model_selected = False
if "request_timestamps" not in st.session_state:
    st.session_state.request_timestamps = []


def start_new_chat():
    new_chat_id = str(uuid.uuid4())
    st.session_state.chat_sessions[new_chat_id] = [{"role": "assistant", "content": st.session_state.system_message}]
    st.session_state.current_chat_id = new_chat_id
    st.session_state.model_selected = False
    st.session_state.request_timestamps = []


with st.sidebar:
    st.header("Chat History")

    if st.button("New Chat"):
        start_new_chat()
        st.rerun()

    if st.session_state.current_chat_id is None:
        start_new_chat()
        st.rerun()

    st.subheader("Existing Chats")
    if st.session_state.chat_sessions:
        for chat_id in reversed(list(st.session_state.chat_sessions.keys())):
            history = st.session_state.chat_sessions[chat_id]
            if len(history) > 1 and history[1]['role'] == 'user':
                 chat_title_content = str(history[1]['content'])
                 chat_title = chat_title_content[:30] + "..." if len(chat_title_content) > 30 else chat_title_content
            else:
                 chat_title_content = str(history[0]['content'])
                 chat_title = chat_title_content[:30] + "..." if len(chat_title_content) > 30 else chat_title_content

            button_style = "background-color: #007BFF;" if chat_id == st.session_state.current_chat_id else ""

            if st.button(chat_title, key=f"select_chat_{chat_id}"):
                st.session_state.current_chat_id = chat_id
                st.session_state.model_selected = True
                st.rerun()
    else:
        st.info("No chats yet.")


st.title(config.APP_NAME)
st.markdown(f"*{config.APP_DESCRIPTION}*")

if not st.session_state.model_selected:
    available_models_display = list(config.MODELS.keys())
    available_models_api = list(config.MODELS.values())
    default_model_index = available_models_api.index(config.DEFAULT_MODEL) if config.DEFAULT_MODEL in available_models_api else 0
    st.session_state.selected_model = st.selectbox("Select AI Model", available_models_display, index=default_model_index)


current_chat_history = st.session_state.chat_sessions.get(st.session_state.current_chat_id, [])

chat_container = st.container(border=True)
with chat_container:
    for message in current_chat_history:
        role = message["role"]
        content = message["content"]
        avatar_icon = "👤" if role == "user" else "🤖"

        with st.chat_message(role, avatar=avatar_icon):
            st.markdown(content)


user_input = st.chat_input("Type your message here...")

if user_input:
    current_time = time.time()
    st.session_state.request_timestamps.append(current_time)

    one_minute_ago = current_time - 60
    one_hour_ago = current_time - 3600
    one_day_ago = current_time - 86400

    recent_requests_minute = [ts for ts in st.session_state.request_timestamps if ts > one_minute_ago]
    recent_requests_hour = [ts for ts in st.session_state.request_timestamps if ts > one_hour_ago]
    recent_requests_day = [ts for ts in st.session_state.request_timestamps if ts > one_day_ago]

    st.session_state.request_timestamps = recent_requests_day

    if len(recent_requests_minute) > config.RPM or \
       len(recent_requests_hour) > config.RPH or \
       len(recent_requests_day) > config.RPD:
        st.warning("Rate limit exceeded. Please wait before sending another message.")
    else:
        st.session_state.chat_sessions[st.session_state.current_chat_id].append({"role": "user", "content": user_input})
        st.session_state.model_selected = True

        with chat_container:
            with st.chat_message("user", avatar="👤"):
                st.markdown(user_input)

        with chat_container:
            with st.chat_message("assistant", avatar="🤖"):
                message_placeholder = st.empty()
                full_response = ""

        with st.spinner(f"Generating response with {st.session_state.selected_model}..."):
            try:
                client_params = {}
                if config.ALGORITHM == 'openai':
                    openai_base_url = os.getenv("OPENAI_BASE_URL")
                    openai_api_key = os.getenv("OPENAI_API_KEY")
                    if openai_base_url:
                        client_params['base_url'] = openai_base_url
                    if openai_api_key:
                        client_params['api_key'] = openai_api_key

                client = Client(**client_params)

                messages_for_api = [{"role": msg["role"], "content": msg["content"]} for msg in st.session_state.chat_sessions[st.session_state.current_chat_id]]

                api_model_name = config.MODELS.get(st.session_state.selected_model, config.DEFAULT_MODEL)
                if api_model_name not in config.MODELS.values():
                     api_model_name = config.DEFAULT_MODEL

                stream = client.chat.completions.create(
                    model=api_model_name,
                    messages=messages_for_api,
                    stream=True,
                    web_search=False
                )

                for chunk in stream:
                    if chunk.choices[0].delta.content is not None:
                        full_response += chunk.choices[0].delta.content
                        message_placeholder.markdown(full_response + "▌")

                message_placeholder.markdown(full_response)

            except Exception as e:
                full_response = f"Error generating response: {e}"
                message_placeholder.markdown(full_response)

            st.session_state.chat_sessions[st.session_state.current_chat_id].append({"role": "assistant", "content": full_response})

        st.rerun()