Spaces:
Running
Running
import streamlit as st | |
import uuid | |
import os | |
from dotenv import load_dotenv | |
import config | |
from g4f.client import Client | |
import time | |
load_dotenv() | |
st.set_page_config( | |
page_title=config.APP_NAME, | |
page_icon=":robot_face:", | |
layout="wide" | |
) | |
st.markdown(""" | |
<style> | |
.stChatMessage { | |
background-color: #262730; | |
border-radius: 10px; | |
padding: 15px; | |
margin-bottom: 10px; | |
color: white; | |
box-shadow: 2px 2px 5px rgba(0, 0, 0, 0.2); | |
} | |
.stSidebar { | |
background-color: #1E1E1E; | |
color: white; | |
padding: 20px; | |
} | |
.stSidebar h2, .stSidebar h3 { | |
color: #007BFF; | |
} | |
.stSidebar .stButton>button { | |
width: 100%; | |
border-radius: 5px; | |
margin-bottom: 10px; | |
} | |
.main .block-container { | |
padding-top: 2rem; | |
padding-bottom: 2rem; | |
} | |
.stChatInputContainer { | |
margin-top: 20px; | |
} | |
body { | |
color: white; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
if "chat_sessions" not in st.session_state: | |
st.session_state.chat_sessions = {} | |
if "current_chat_id" not in st.session_state: | |
st.session_state.current_chat_id = None | |
if "system_message" not in st.session_state: | |
st.session_state.system_message = "Hello! I am ready to help you." | |
if "selected_model" not in st.session_state: | |
st.session_state.selected_model = config.DEFAULT_MODEL | |
if "model_selected" not in st.session_state: | |
st.session_state.model_selected = False | |
if "request_timestamps" not in st.session_state: | |
st.session_state.request_timestamps = [] | |
def start_new_chat(): | |
new_chat_id = str(uuid.uuid4()) | |
st.session_state.chat_sessions[new_chat_id] = [{"role": "assistant", "content": st.session_state.system_message}] | |
st.session_state.current_chat_id = new_chat_id | |
st.session_state.model_selected = False | |
st.session_state.request_timestamps = [] | |
with st.sidebar: | |
st.header("Chat History") | |
if st.button("New Chat"): | |
start_new_chat() | |
st.rerun() | |
if st.session_state.current_chat_id is None: | |
start_new_chat() | |
st.rerun() | |
st.subheader("Existing Chats") | |
if st.session_state.chat_sessions: | |
for chat_id in reversed(list(st.session_state.chat_sessions.keys())): | |
history = st.session_state.chat_sessions[chat_id] | |
if len(history) > 1 and history[1]['role'] == 'user': | |
chat_title_content = str(history[1]['content']) | |
chat_title = chat_title_content[:30] + "..." if len(chat_title_content) > 30 else chat_title_content | |
else: | |
chat_title_content = str(history[0]['content']) | |
chat_title = chat_title_content[:30] + "..." if len(chat_title_content) > 30 else chat_title_content | |
button_style = "background-color: #007BFF;" if chat_id == st.session_state.current_chat_id else "" | |
if st.button(chat_title, key=f"select_chat_{chat_id}"): | |
st.session_state.current_chat_id = chat_id | |
st.session_state.model_selected = True | |
st.rerun() | |
else: | |
st.info("No chats yet.") | |
st.title(config.APP_NAME) | |
st.markdown(f"*{config.APP_DESCRIPTION}*") | |
if not st.session_state.model_selected: | |
available_models_display = list(config.MODELS.keys()) | |
available_models_api = list(config.MODELS.values()) | |
default_model_index = available_models_api.index(config.DEFAULT_MODEL) if config.DEFAULT_MODEL in available_models_api else 0 | |
st.session_state.selected_model = st.selectbox("Select AI Model", available_models_display, index=default_model_index) | |
current_chat_history = st.session_state.chat_sessions.get(st.session_state.current_chat_id, []) | |
chat_container = st.container(border=True) | |
with chat_container: | |
for message in current_chat_history: | |
role = message["role"] | |
content = message["content"] | |
avatar_icon = "👤" if role == "user" else "🤖" | |
with st.chat_message(role, avatar=avatar_icon): | |
st.markdown(content) | |
user_input = st.chat_input("Type your message here...") | |
if user_input: | |
current_time = time.time() | |
st.session_state.request_timestamps.append(current_time) | |
one_minute_ago = current_time - 60 | |
one_hour_ago = current_time - 3600 | |
one_day_ago = current_time - 86400 | |
recent_requests_minute = [ts for ts in st.session_state.request_timestamps if ts > one_minute_ago] | |
recent_requests_hour = [ts for ts in st.session_state.request_timestamps if ts > one_hour_ago] | |
recent_requests_day = [ts for ts in st.session_state.request_timestamps if ts > one_day_ago] | |
st.session_state.request_timestamps = recent_requests_day | |
if len(recent_requests_minute) > config.RPM or \ | |
len(recent_requests_hour) > config.RPH or \ | |
len(recent_requests_day) > config.RPD: | |
st.warning("Rate limit exceeded. Please wait before sending another message.") | |
else: | |
st.session_state.chat_sessions[st.session_state.current_chat_id].append({"role": "user", "content": user_input}) | |
st.session_state.model_selected = True | |
with chat_container: | |
with st.chat_message("user", avatar="👤"): | |
st.markdown(user_input) | |
with chat_container: | |
with st.chat_message("assistant", avatar="🤖"): | |
message_placeholder = st.empty() | |
full_response = "" | |
with st.spinner(f"Generating response with {st.session_state.selected_model}..."): | |
try: | |
client_params = {} | |
if config.ALGORITHM == 'openai': | |
openai_base_url = os.getenv("OPENAI_BASE_URL") | |
openai_api_key = os.getenv("OPENAI_API_KEY") | |
if openai_base_url: | |
client_params['base_url'] = openai_base_url | |
if openai_api_key: | |
client_params['api_key'] = openai_api_key | |
client = Client(**client_params) | |
messages_for_api = [{"role": msg["role"], "content": msg["content"]} for msg in st.session_state.chat_sessions[st.session_state.current_chat_id]] | |
api_model_name = config.MODELS.get(st.session_state.selected_model, config.DEFAULT_MODEL) | |
if api_model_name not in config.MODELS.values(): | |
api_model_name = config.DEFAULT_MODEL | |
stream = client.chat.completions.create( | |
model=api_model_name, | |
messages=messages_for_api, | |
stream=True, | |
web_search=False | |
) | |
for chunk in stream: | |
if chunk.choices[0].delta.content is not None: | |
full_response += chunk.choices[0].delta.content | |
message_placeholder.markdown(full_response + "▌") | |
message_placeholder.markdown(full_response) | |
except Exception as e: | |
full_response = f"Error generating response: {e}" | |
message_placeholder.markdown(full_response) | |
st.session_state.chat_sessions[st.session_state.current_chat_id].append({"role": "assistant", "content": full_response}) | |
st.rerun() | |