Spaces:
Sleeping
Sleeping
import requests | |
import os | |
import json | |
import streamlit as st | |
from datetime import datetime, timedelta | |
import time | |
import uuid | |
# Page configuration | |
st.set_page_config( | |
page_title="Chat Flow π·", | |
page_icon="π¬", | |
initial_sidebar_state="collapsed" | |
) | |
# CSS - Keep original web design, add mobile responsiveness only | |
st.markdown(""" | |
<style> | |
.stApp { | |
background: white; | |
} | |
.main .block-container { | |
max-width: 800px; | |
} | |
#MainMenu {visibility: hidden;} | |
footer {visibility: hidden;} | |
header {visibility: hidden;} | |
.stDeployButton {display: none;} | |
.model-id { | |
color: #28a745; | |
font-family: monospace; | |
} | |
.model-attribution { | |
color: #28a745; | |
font-size: 0.8em; | |
font-style: italic; | |
} | |
/* MOBILE ONLY - doesn't affect web */ | |
@media (max-width: 768px) { | |
.stApp { | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
} | |
.main .block-container { | |
max-width: 100%; | |
padding: 0.5rem; | |
} | |
/* Make sidebar work better on mobile */ | |
.css-1d391kg { | |
width: 100% !important; | |
background: rgba(255, 255, 255, 0.95); | |
backdrop-filter: blur(10px); | |
} | |
/* Chat messages styling for mobile */ | |
.stChatMessage { | |
background: rgba(255, 255, 255, 0.95); | |
border-radius: 15px; | |
margin: 0.5rem 0; | |
padding: 1rem; | |
box-shadow: 0 2px 10px rgba(0,0,0,0.1); | |
} | |
/* User messages - right aligned */ | |
.stChatMessage[data-testid="user-message"] { | |
background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); | |
color: white; | |
margin-left: 15%; | |
} | |
/* Assistant messages - left aligned */ | |
.stChatMessage[data-testid="assistant-message"] { | |
background: rgba(255, 255, 255, 0.98); | |
color: #333; | |
margin-right: 15%; | |
} | |
/* Mobile text sizing */ | |
.stMarkdown { | |
font-size: 14px; | |
} | |
h1 { | |
font-size: 1.5rem !important; | |
color: white; | |
text-shadow: 2px 2px 4px rgba(0,0,0,0.3); | |
text-align: center; | |
} | |
.stCaption { | |
color: rgba(255, 255, 255, 0.8); | |
text-align: center; | |
} | |
/* Mobile buttons */ | |
.stButton button { | |
border-radius: 8px; | |
font-size: 14px; | |
} | |
/* Chat input for mobile */ | |
.stChatInput { | |
background: rgba(255, 255, 255, 0.9); | |
border-radius: 20px; | |
backdrop-filter: blur(10px); | |
} | |
.stChatInput input { | |
font-size: 16px; /* Prevents zoom on iOS */ | |
} | |
} | |
/* Extra small screens (iPhone 5, old phones) */ | |
@media (max-width: 480px) { | |
.main .block-container { | |
padding: 0.25rem; | |
} | |
.stChatMessage { | |
padding: 0.75rem; | |
margin: 0.25rem 0; | |
} | |
.stChatMessage[data-testid="user-message"] { | |
margin-left: 8%; | |
} | |
.stChatMessage[data-testid="assistant-message"] { | |
margin-right: 8%; | |
} | |
h1 { | |
font-size: 1.3rem !important; | |
} | |
.stButton button { | |
font-size: 12px; | |
padding: 0.4rem; | |
} | |
.stMarkdown { | |
font-size: 13px; | |
} | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
# File to store chat history | |
HISTORY_FILE = "chat_history.json" | |
# File to store online users | |
USERS_FILE = "online_users.json" | |
def load_chat_history(): | |
"""Load chat history from file""" | |
try: | |
if os.path.exists(HISTORY_FILE): | |
with open(HISTORY_FILE, 'r', encoding='utf-8') as f: | |
return json.load(f) | |
except Exception as e: | |
st.error(f"Error loading chat history: {e}") | |
return [] | |
def save_chat_history(messages): | |
"""Save chat history to file""" | |
try: | |
with open(HISTORY_FILE, 'w', encoding='utf-8') as f: | |
json.dump(messages, f, ensure_ascii=False, indent=2) | |
except Exception as e: | |
st.error(f"Error saving chat history: {e}") | |
def clear_chat_history(): | |
"""Clear chat history file""" | |
try: | |
if os.path.exists(HISTORY_FILE): | |
os.remove(HISTORY_FILE) | |
st.session_state.messages = [] | |
except Exception as e: | |
st.error(f"Error clearing chat history: {e}") | |
# User tracking functions | |
def get_user_id(): | |
"""Get unique ID for this user session""" | |
if 'user_id' not in st.session_state: | |
st.session_state.user_id = str(uuid.uuid4())[:8] # Short ID for family use | |
return st.session_state.user_id | |
def update_online_users(): | |
"""Update that this user is online right now""" | |
try: | |
# Load current online users | |
users = {} | |
if os.path.exists(USERS_FILE): | |
with open(USERS_FILE, 'r') as f: | |
users = json.load(f) | |
# Add/update this user | |
user_id = get_user_id() | |
users[user_id] = { | |
'last_seen': datetime.now().isoformat(), | |
'name': f'User-{user_id}' # You can customize this | |
} | |
# Remove users not seen in last 5 minutes | |
current_time = datetime.now() | |
active_users = {} | |
for uid, data in users.items(): | |
last_seen = datetime.fromisoformat(data['last_seen']) | |
if current_time - last_seen < timedelta(minutes=5): | |
active_users[uid] = data | |
# Save updated list | |
with open(USERS_FILE, 'w') as f: | |
json.dump(active_users, f, indent=2) | |
return len(active_users) | |
except Exception: | |
return 1 # If error, assume at least you're online | |
def get_online_count(): | |
"""Get number of people currently online""" | |
try: | |
if not os.path.exists(USERS_FILE): | |
return 0 | |
with open(USERS_FILE, 'r') as f: | |
users = json.load(f) | |
# Check who's still active (last 5 minutes) | |
current_time = datetime.now() | |
active_count = 0 | |
for data in users.values(): | |
last_seen = datetime.fromisoformat(data['last_seen']) | |
if current_time - last_seen < timedelta(minutes=5): | |
active_count += 1 | |
return active_count | |
except Exception: | |
return 0 | |
# Initialize session state with saved history | |
if "messages" not in st.session_state: | |
st.session_state.messages = load_chat_history() | |
# Get API key | |
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY") | |
def check_api_status(): | |
if not OPENROUTER_API_KEY: | |
return "No API Key" | |
try: | |
url = "https://openrouter.ai/api/v1/models" | |
headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"} | |
response = requests.get(url, headers=headers, timeout=10) | |
return "Connected" if response.status_code == 200 else "Error" | |
except: | |
return "Error" | |
def get_ai_response(messages, model="openai/gpt-3.5-turbo"): | |
if not OPENROUTER_API_KEY: | |
return "No API key found. Please add OPENROUTER_API_KEY to environment variables." | |
url = "https://openrouter.ai/api/v1/chat/completions" | |
headers = { | |
"Content-Type": "application/json", | |
"Authorization": f"Bearer {OPENROUTER_API_KEY}", | |
"HTTP-Referer": "http://localhost:8501", # Optional: Your site URL | |
"X-Title": "Streamlit AI Assistant" # Optional: Your app name | |
} | |
# Create system message and user messages | |
api_messages = [{"role": "system", "content": "You are a helpful AI assistant. Provide clear and helpful responses."}] | |
api_messages.extend(messages) | |
data = { | |
"model": model, | |
"messages": api_messages, | |
"stream": True, | |
"max_tokens": 2000, | |
"temperature": 0.7, | |
"top_p": 1, | |
"frequency_penalty": 0, | |
"presence_penalty": 0 | |
} | |
try: | |
response = requests.post(url, headers=headers, json=data, stream=True, timeout=60) | |
# Better error handling | |
if response.status_code != 200: | |
error_detail = "" | |
try: | |
error_data = response.json() | |
error_detail = error_data.get('error', {}).get('message', f"HTTP {response.status_code}") | |
except: | |
error_detail = f"HTTP {response.status_code}: {response.reason}" | |
yield f"API Error: {error_detail}. Please try a different model or check your API key." | |
return | |
full_response = "" | |
buffer = "" | |
# Using your working streaming logic | |
for line in response.iter_lines(): | |
if line: | |
# The server sends lines starting with "data: ..." | |
if line.startswith(b"data: "): | |
data_str = line[len(b"data: "):].decode("utf-8") | |
if data_str.strip() == "[DONE]": | |
break | |
try: | |
data = json.loads(data_str) | |
delta = data["choices"][0]["delta"].get("content", "") | |
if delta: | |
full_response += delta | |
yield full_response | |
except json.JSONDecodeError: | |
continue | |
except (KeyError, IndexError): | |
continue | |
except requests.exceptions.Timeout: | |
yield "Request timed out. Please try again with a shorter message or different model." | |
except requests.exceptions.ConnectionError: | |
yield "Connection error. Please check your internet connection and try again." | |
except requests.exceptions.RequestException as e: | |
yield f"Request error: {str(e)}. Please try again." | |
except Exception as e: | |
yield f"Unexpected error: {str(e)}. Please try again or contact support." | |
# Header | |
st.title("Chat Flow π·") | |
st.caption("10 powerful Models, one simple chat.") | |
# Sidebar | |
with st.sidebar: | |
st.header("Settings") | |
# API Status | |
status = check_api_status() | |
if status == "Connected": | |
st.success("π’ API Connected") | |
elif status == "No API Key": | |
st.error("No API Key") | |
else: | |
st.warning("Connection Issue") | |
st.divider() | |
# Live Users Section | |
st.header("π₯ Who's Online") | |
# Update that you're online | |
online_count = update_online_users() | |
# Show live count | |
if online_count == 1: | |
st.info("π’ Just you online") | |
else: | |
st.success(f"π’ {online_count} people online") | |
# Show your session | |
your_id = get_user_id() | |
st.caption(f"You: User-{your_id}") | |
# Quick refresh button | |
if st.button("Refresh", use_container_width=True): | |
st.rerun() | |
# Debug Section | |
with st.expander("π Debug Info"): | |
if os.path.exists(USERS_FILE): | |
with open(USERS_FILE, 'r') as f: | |
users = json.load(f) | |
st.write(f"Users in file: {len(users)}") | |
for uid, data in users.items(): | |
last_seen_time = datetime.fromisoformat(data['last_seen']) | |
time_ago = datetime.now() - last_seen_time | |
minutes_ago = int(time_ago.total_seconds() / 60) | |
st.write(f"- {uid}: {minutes_ago} min ago") | |
else: | |
st.write("No users file yet") | |
st.divider() | |
# All models including new ones | |
models = [ | |
("GPT-3.5 Turbo", "openai/gpt-3.5-turbo"), | |
("LLaMA 3.1 8B", "meta-llama/llama-3.1-8b-instruct"), | |
("LLaMA 3.1 70B", "meta-llama/llama-3.1-70b-instruct"), | |
("DeepSeek Chat v3", "deepseek/deepseek-chat-v3-0324:free"), | |
("DeepSeek R1", "deepseek/deepseek-r1-0528:free"), | |
("Qwen3 Coder", "qwen/qwen3-coder:free"), | |
("Microsoft MAI DS R1", "microsoft/mai-ds-r1:free"), | |
("Gemma 3 27B", "google/gemma-3-27b-it:free"), | |
("Gemma 3 4B", "google/gemma-3-4b-it:free"), | |
("Auto (Best Available)", "openrouter/auto") | |
] | |
model_names = [name for name, _ in models] | |
model_ids = [model_id for _, model_id in models] | |
selected_index = st.selectbox("Model", range(len(model_names)), | |
format_func=lambda x: model_names[x], | |
index=0) | |
selected_model = model_ids[selected_index] | |
# Show selected model ID in green | |
st.markdown(f"**Model ID:** <span class='model-id'>{selected_model}</span>", unsafe_allow_html=True) | |
st.divider() | |
# Chat History Controls | |
st.header("Chat History") | |
# Show number of messages | |
if st.session_state.messages: | |
st.info(f"Messages stored: {len(st.session_state.messages)}") | |
# Auto-save toggle | |
auto_save = st.checkbox("Auto-save messages", value=True) | |
# Manual save/load buttons | |
col1, col2 = st.columns(2) | |
with col1: | |
if st.button("Save History", use_container_width=True): | |
save_chat_history(st.session_state.messages) | |
st.success("History saved!") | |
with col2: | |
if st.button("Load History", use_container_width=True): | |
st.session_state.messages = load_chat_history() | |
st.success("History loaded!") | |
st.rerun() | |
st.divider() | |
# View History | |
if st.button("View History File", use_container_width=True): | |
if os.path.exists(HISTORY_FILE): | |
with open(HISTORY_FILE, 'r', encoding='utf-8') as f: | |
history_content = f.read() | |
st.text_area("Chat History (JSON)", history_content, height=200) | |
else: | |
st.warning("No history file found") | |
# Download History | |
if os.path.exists(HISTORY_FILE): | |
with open(HISTORY_FILE, 'rb') as f: | |
st.download_button( | |
label="Download History", | |
data=f.read(), | |
file_name=f"chat_history_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json", | |
mime="application/json", | |
use_container_width=True | |
) | |
st.divider() | |
# Clear controls | |
if st.button("Clear Chat", use_container_width=True, type="secondary"): | |
clear_chat_history() | |
st.success("Chat cleared!") | |
st.rerun() | |
# Display chat messages | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
# Check if this is an assistant message with attribution | |
if message["role"] == "assistant" and "Response created by:" in message["content"]: | |
# Split content and attribution | |
parts = message["content"].split("\n\n---\n*Response created by:") | |
main_content = parts[0] | |
if len(parts) > 1: | |
model_name = parts[1].replace("***", "").replace("**", "") | |
st.markdown(main_content) | |
st.markdown(f"<div class='model-attribution'>Response created by: <strong>{model_name}</strong></div>", unsafe_allow_html=True) | |
else: | |
st.markdown(message["content"]) | |
else: | |
st.markdown(message["content"]) | |
# Chat input | |
if prompt := st.chat_input("Chat Smarter. Chat many Brains"): | |
# Update online status when user sends message | |
update_online_users() | |
# Add user message | |
user_message = {"role": "user", "content": prompt} | |
st.session_state.messages.append(user_message) | |
# Auto-save if enabled | |
if auto_save: | |
save_chat_history(st.session_state.messages) | |
# Display user message | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Get AI response | |
with st.chat_message("assistant"): | |
placeholder = st.empty() | |
full_response = "" | |
try: | |
for response in get_ai_response(st.session_state.messages, selected_model): | |
full_response = response | |
placeholder.markdown(full_response + "β") | |
# Remove cursor and show final response | |
placeholder.markdown(full_response) | |
except Exception as e: | |
error_msg = f"An error occurred: {str(e)}" | |
placeholder.markdown(error_msg) | |
full_response = error_msg | |
# Add AI response to messages with attribution | |
full_response_with_attribution = full_response + f"\n\n---\n*Response created by: **{model_names[selected_index]}***" | |
assistant_message = {"role": "assistant", "content": full_response_with_attribution} | |
st.session_state.messages.append(assistant_message) | |
# Auto-save if enabled | |
if auto_save: | |
save_chat_history(st.session_state.messages) | |
# Show currently using model | |
st.caption(f"Currently using: **{model_names[selected_index]}**") |