awacke1's picture
Update app.py
9d1d48c verified
raw
history blame
47.6 kB
import streamlit as st
import asyncio
import websockets
import uuid
import argparse
from datetime import datetime
import os
import random
import time
import hashlib
from PIL import Image
import glob
import base64
import io
import streamlit.components.v1 as components
import edge_tts
import nest_asyncio
import re
from streamlit_paste_button import paste_image_button
import pytz
import shutil
from urllib.parse import urlencode
from PyPDF2 import PdfReader
import json
# Patch for nested async - sneaky fix! 🐍✨
nest_asyncio.apply()
# Static config - constants rule! πŸ“πŸ‘‘
icons = 'πŸ€–πŸ§ πŸ”¬πŸ“'
START_ROOM = "Sector 🌌"
# Page setup - dressing up the window! πŸ–ΌοΈπŸŽ€
st.set_page_config(
page_title="πŸ€–πŸ§ MMO Chat BrainπŸ“πŸ”¬",
page_icon=icons,
layout="wide",
initial_sidebar_state="auto"
)
# Funky usernames with corresponding Edge TTS voices
FUN_USERNAMES = {
"CosmicJester 🌌": "en-US-AriaNeural",
"PixelPanda 🐼": "en-US-JennyNeural",
"QuantumQuack πŸ¦†": "en-GB-SoniaNeural",
"StellarSquirrel 🐿️": "en-AU-NatashaNeural",
"GizmoGuru βš™οΈ": "en-CA-ClaraNeural",
"NebulaNinja 🌠": "en-US-GuyNeural",
"ByteBuster πŸ’Ύ": "en-GB-RyanNeural",
"GalacticGopher 🌍": "en-AU-WilliamNeural",
"RocketRaccoon πŸš€": "en-CA-LiamNeural",
"EchoElf 🧝": "en-US-AnaNeural",
"PhantomFox 🦊": "en-US-BrandonNeural",
"WittyWizard πŸ§™": "en-GB-ThomasNeural",
"LunarLlama πŸŒ™": "en-AU-FreyaNeural",
"SolarSloth β˜€οΈ": "en-CA-LindaNeural",
"AstroAlpaca πŸ¦™": "en-US-ChristopherNeural",
"CyberCoyote 🐺": "en-GB-ElliotNeural",
"MysticMoose 🦌": "en-AU-JamesNeural",
"GlitchGnome 🧚": "en-CA-EthanNeural",
"VortexViper 🐍": "en-US-AmberNeural",
"ChronoChimp πŸ’": "en-GB-LibbyNeural"
}
# Top-level files (no subdirectories)
CHAT_FILE = "global_chat.md"
QUOTE_VOTES_FILE = "quote_votes.md"
MEDIA_VOTES_FILE = "media_votes.md"
HISTORY_FILE = "chat_history.md"
STATE_FILE = "user_state.txt"
MEDIA_DIR = "media_base64"
# Fancy digits - numbers got style! πŸ”’πŸ’ƒ
UNICODE_DIGITS = {i: f"{i}\uFE0F⃣" for i in range(10)}
# Massive font collection - typography bonanza! πŸ–‹οΈπŸŽ¨
UNICODE_FONTS = [
("Normal", lambda x: x),
("Bold", lambda x: "".join(chr(ord(c) + 0x1D400 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D41A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Italic", lambda x: "".join(chr(ord(c) + 0x1D434 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D44E - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D468 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D482 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Script", lambda x: "".join(chr(ord(c) + 0x1D49C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D4B6 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Bold Script", lambda x: "".join(chr(ord(c) + 0x1D4D0 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D4EA - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Fraktur", lambda x: "".join(chr(ord(c) + 0x1D504 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D51E - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Bold Fraktur", lambda x: "".join(chr(ord(c) + 0x1D56C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D586 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Double Struck", lambda x: "".join(chr(ord(c) + 0x1D538 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D552 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif", lambda x: "".join(chr(ord(c) + 0x1D5A0 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D5BA - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif Bold", lambda x: "".join(chr(ord(c) + 0x1D5D4 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D5EE - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif Italic", lambda x: "".join(chr(ord(c) + 0x1D608 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D622 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D63C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D656 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Monospace", lambda x: "".join(chr(ord(c) + 0x1D670 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D68A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x24B6) if 'A' <= c <= 'Z' else chr(ord(c) - 0x61 + 0x24D0) if 'a' <= c <= 'z' else c for c in x)),
("Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F130) if 'A' <= c <= 'Z' else c for c in x)),
("Negative Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F150) if 'A' <= c <= 'Z' else c for c in x)),
("Negative Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F170) if 'A' <= c <= 'Z' else c for c in x)),
("Regional Indicator", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F1E6) if 'A' <= c <= 'Z' else c for c in x)),
]
# Global state - keeping tabs! πŸŒπŸ“‹
if 'server_running' not in st.session_state:
st.session_state.server_running = False
if 'server_task' not in st.session_state:
st.session_state.server_task = None
if 'active_connections' not in st.session_state:
st.session_state.active_connections = {}
if 'media_notifications' not in st.session_state:
st.session_state.media_notifications = []
if 'last_chat_update' not in st.session_state:
st.session_state.last_chat_update = 0
if 'displayed_chat_lines' not in st.session_state:
st.session_state.displayed_chat_lines = []
if 'message_text' not in st.session_state:
st.session_state.message_text = ""
if 'audio_cache' not in st.session_state:
st.session_state.audio_cache = {}
if 'pasted_image_data' not in st.session_state:
st.session_state.pasted_image_data = None
if 'quote_line' not in st.session_state:
st.session_state.quote_line = None
if 'refresh_rate' not in st.session_state:
st.session_state.refresh_rate = 5
if 'base64_cache' not in st.session_state:
st.session_state.base64_cache = {}
if 'image_hashes' not in st.session_state:
st.session_state.image_hashes = set()
if 'gallery_columns' not in st.session_state:
st.session_state.gallery_columns = 1
if 'user_id' not in st.session_state:
st.session_state.user_id = None
if 'user_hash' not in st.session_state:
st.session_state.user_hash = None
# Timestamp wizardry - clock ticks with flair! ⏰🎩
def format_timestamp_prefix(username):
central = pytz.timezone('US/Central')
now = datetime.now(central)
return f"{now.strftime('%I-%M-%p-ct-%m-%d-%Y')}-by-{username}-{st.session_state.user_id}"
# Compute image hash from binary data
def compute_image_hash(image_data):
if isinstance(image_data, Image.Image):
img_byte_arr = io.BytesIO()
image_data.save(img_byte_arr, format='PNG')
img_bytes = img_byte_arr.getvalue()
else:
img_bytes = image_data
return hashlib.md5(img_bytes).hexdigest()[:8]
# Node naming - christening the beast! 🌐🍼
def get_node_name():
parser = argparse.ArgumentParser(description='Start a chat node with a specific name')
parser.add_argument('--node-name', type=str, default=None)
parser.add_argument('--port', type=int, default=8501)
args = parser.parse_args()
username = st.session_state.get('username', 'System 🌟')
log_action(username, "🌐🍼 - Node naming - christening the beast!")
return args.node_name or f"node-{uuid.uuid4().hex[:8]}", args.port
# Action logger - spying on deeds! πŸ•΅οΈπŸ“œ
def log_action(username, action):
if 'action_log' not in st.session_state:
st.session_state.action_log = {}
user_log = st.session_state.action_log.setdefault(username, {})
current_time = time.time()
user_log = {k: v for k, v in user_log.items() if current_time - v < 10}
st.session_state.action_log[username] = user_log
if action not in user_log:
central = pytz.timezone('US/Central')
with open(HISTORY_FILE, 'a') as f:
f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: {action}\n")
user_log[action] = current_time
# Clean text - strip the fancy stuff! πŸ§ΉπŸ“
def clean_text_for_tts(text):
cleaned = re.sub(r'[#*!\[\]]+', '', text)
cleaned = ' '.join(cleaned.split())
return cleaned if cleaned else "No text to speak" # Default if empty
# Audio Processor Class from your code, adapted
class AudioProcessor:
def __init__(self):
self.cache_dir = "audio_cache"
os.makedirs(self.cache_dir, exist_ok=True)
self.metadata = self._load_metadata()
def _load_metadata(self):
metadata_file = os.path.join(self.cache_dir, "metadata.json")
return json.load(open(metadata_file)) if os.path.exists(metadata_file) else {}
def _save_metadata(self):
metadata_file = os.path.join(self.cache_dir, "metadata.json")
with open(metadata_file, 'w') as f:
json.dump(self.metadata, f)
async def create_audio(self, text, voice='en-US-AriaNeural', filename=None):
cache_key = hashlib.md5(f"{text}:{voice}".encode()).hexdigest()
cache_path = filename or os.path.join(self.cache_dir, f"{cache_key}.mp3")
if cache_key in self.metadata and os.path.exists(cache_path):
return cache_path
# Clean text for speech
text = text.replace("\n", " ").replace("</s>", " ").strip()
if not text:
return None
# Generate audio with edge_tts
try:
communicate = edge_tts.Communicate(text, voice)
await communicate.save(cache_path)
if not os.path.exists(cache_path):
raise edge_tts.exceptions.NoAudioReceived("No audio file created")
except edge_tts.exceptions.NoAudioReceived as e:
log_action("System 🌟", f"TTS failed for text '{text}' with voice '{voice}': {str(e)}")
return None
# Update metadata
self.metadata[cache_key] = {
'timestamp': datetime.now().isoformat(),
'text_length': len(text),
'voice': voice
}
self._save_metadata()
return cache_path
# Chat saver - words locked tight! πŸ’¬πŸ”’
async def save_chat_entry(username, message, is_markdown=False, quote_line=None, media_file=None):
await asyncio.to_thread(log_action, username, "πŸ’¬πŸ”’ - Chat saver - words locked tight!")
central = pytz.timezone('US/Central')
timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
user_history_file = f"{username}_history.md"
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
indent = " " if quote_line else "" # Nesting for replies
# Prepare entry
if is_markdown:
entry = f"{indent}[{timestamp}] {username}:\n{indent}```markdown\n{indent}{message}\n{indent}```"
else:
entry = f"{indent}[{timestamp}] {username}: {message}"
if quote_line:
entry = f"{indent}> {quote_line}\n{entry}"
# Save to global chat file
with open(CHAT_FILE, 'a') as f:
f.write(f"{entry}\n")
# Save to user-specific history file
if not os.path.exists(user_history_file):
with open(user_history_file, 'w') as f:
f.write(f"# Chat History for {username} (Voice: {voice})\n\n")
with open(user_history_file, 'a') as f:
f.write(f"{entry}\n")
# Generate audio
cleaned_message = clean_text_for_tts(message)
audio_processor = AudioProcessor()
audio_filename = f"{format_timestamp_prefix(username)}-{hashlib.md5(cleaned_message.encode()).hexdigest()[:8]}.mp3"
log_action(username, f"Attempting TTS with text: '{cleaned_message}' and voice: '{voice}'")
audio_file = await audio_processor.create_audio(cleaned_message, voice, audio_filename)
# Log audio and media
if audio_file:
with open(HISTORY_FILE, 'a') as f:
f.write(f"[{timestamp}] {username} ({voice}): Audio generated - {audio_filename}\n")
with open(user_history_file, 'a') as f:
f.write(f"{indent}[{timestamp}] Audio: {audio_filename}\n")
with open(CHAT_FILE, 'a') as f:
f.write(f"{indent}[{timestamp}] Audio: {audio_filename}\n")
if media_file:
if isinstance(media_file, Image.Image):
img_hash = compute_image_hash(media_file)
timestamp_prefix = format_timestamp_prefix(username)
media_filename = f"{timestamp_prefix}-{img_hash}.b64"
media_path = os.path.join(MEDIA_DIR, media_filename)
os.makedirs(MEDIA_DIR, exist_ok=True)
img_byte_arr = io.BytesIO()
media_file.save(img_byte_arr, format='PNG')
img_bytes = img_byte_arr.getvalue()
b64_data = base64.b64encode(img_bytes).decode()
with open(media_path, 'w') as f:
f.write(b64_data)
media_file = media_filename
with open(CHAT_FILE, 'a') as f:
f.write(f"{indent}[{timestamp}] Media: ![Media]({media_file})\n")
with open(user_history_file, 'a') as f:
f.write(f"{indent}[{timestamp}] Media: ![Media]({media_file})\n")
await broadcast_message(f"{username}|{message}", "chat")
st.session_state.last_chat_update = time.time()
return audio_filename
# Save chat history with image or PDF
async def save_chat_history_with_image(username, image_path):
central = pytz.timezone('US/Central')
timestamp = datetime.now(central).strftime("%Y-%m-%d_%H-%M-%S")
user_history_file = f"{username}_history.md"
chat_content = await load_chat()
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
if not os.path.exists(user_history_file):
with open(user_history_file, 'w') as f:
f.write(f"# Chat History for {username} (Voice: {voice})\n\n")
with open(user_history_file, 'a') as f:
f.write(f"[{timestamp}] {username} (Voice: {voice}) Shared Media: {os.path.basename(image_path)}\n")
f.write(f"```markdown\n{chat_content}\n```\n")
# Chat loader - history unleashed! πŸ“œπŸš€
async def load_chat():
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ“œπŸš€ - Chat loader - history unleashed!")
if not os.path.exists(CHAT_FILE):
await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"# {START_ROOM} Chat\n\nWelcome to the cosmic hub - start chatting! 🎀\n"))
with open(CHAT_FILE, 'r') as f:
content = await asyncio.to_thread(f.read)
return content
# User lister - who’s in the gang! πŸ‘₯πŸŽ‰
async def get_user_list(chat_content):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ‘₯πŸŽ‰ - User lister - who’s in the gang!")
users = set()
for line in chat_content.split('\n'):
if line.strip() and ': ' in line:
user = line.split(': ')[1].split(' ')[0]
users.add(user)
return sorted(list(users))
# Join checker - been here before? πŸšͺπŸ”
async def has_joined_before(client_id, chat_content):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸšͺπŸ” - Join checker - been here before?")
return any(f"Client-{client_id}" in line for line in chat_content.split('\n'))
# Suggestion maker - old quips resurface! πŸ’‘πŸ“
async def get_message_suggestions(chat_content, prefix):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ’‘πŸ“ - Suggestion maker - old quips resurface!")
lines = chat_content.split('\n')
messages = [line.split(': ', 1)[1] for line in lines if ': ' in line and line.strip()]
return [msg for msg in messages if msg.lower().startswith(prefix.lower())][:5]
# Vote saver - cheers recorded! πŸ‘πŸ“Š
async def save_vote(file, item, user_hash, username, comment=""):
await asyncio.to_thread(log_action, username, "πŸ‘πŸ“Š - Vote saver - cheers recorded!")
central = pytz.timezone('US/Central')
timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
entry = f"[{timestamp}] {user_hash} voted for {item}"
await asyncio.to_thread(lambda: open(file, 'a').write(f"{entry}\n"))
await asyncio.to_thread(lambda: open(HISTORY_FILE, "a").write(f"- {timestamp} - User {user_hash} voted for {item}\n"))
chat_message = f"{username} upvoted: \"{item}\""
if comment:
chat_message += f" - {comment}"
await save_chat_entry(username, chat_message)
# Vote counter - tallying the love! πŸ†πŸ“ˆ
async def load_votes(file):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ†πŸ“ˆ - Vote counter - tallying the love!")
if not os.path.exists(file):
await asyncio.to_thread(lambda: open(file, 'w').write("# Vote Tally\n\nNo votes yet - get clicking! πŸ–±οΈ\n"))
with open(file, 'r') as f:
content = await asyncio.to_thread(f.read)
lines = content.strip().split('\n')[2:]
votes = {}
user_votes = set()
for line in lines:
if line.strip() and 'voted for' in line:
user_hash = line.split('] ')[1].split(' voted for ')[0]
item = line.split('voted for ')[1]
vote_key = f"{user_hash}-{item}"
if vote_key not in user_votes:
votes[item] = votes.get(item, 0) + 1
user_votes.add(vote_key)
return votes
# Hash generator - secret codes ahoy! πŸ”‘πŸ•΅οΈ
async def generate_user_hash():
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ”‘πŸ•΅οΈ - Hash generator - secret codes ahoy!")
if 'user_hash' not in st.session_state:
st.session_state.user_hash = hashlib.md5(str(random.getrandbits(128)).encode()).hexdigest()[:8]
return st.session_state.user_hash
# Audio maker - voices come alive! 🎢🌟
async def async_edge_tts_generate(text, voice, rate=0, pitch=0, file_format="mp3"):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "🎢🌟 - Audio maker - voices come alive!")
timestamp = format_timestamp_prefix(username)
filename = f"{timestamp}-{hashlib.md5(text.encode()).hexdigest()[:8]}.{file_format}"
filepath = filename # Top-level file
communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
try:
await communicate.save(filepath)
return filepath if os.path.exists(filepath) else None
except edge_tts.exceptions.NoAudioReceived:
with open(HISTORY_FILE, 'a') as f:
central = pytz.timezone('US/Central')
f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: Audio failed - No audio received for '{text}'\n")
return None
# Audio player - tunes blast off! πŸ”ŠπŸš€
def play_and_download_audio(file_path):
if file_path and os.path.exists(file_path):
with open(file_path, "rb") as f:
audio_data = f.read()
b64 = base64.b64encode(audio_data).decode()
audio_html = f'''
<audio controls style="display:inline; vertical-align:middle; width:100px;">
<source src="data:audio/mpeg;base64,{b64}" type="audio/mpeg">
</audio>
<a href="data:audio/mpeg;base64,{b64}" download="{os.path.basename(file_path)}" style="vertical-align:middle;">🎡</a>
'''
return audio_html
return ""
# Image saver - pics preserved with naming as base64! πŸ“ΈπŸ’Ύ
async def save_pasted_image(image, username):
await asyncio.to_thread(log_action, username, "πŸ“ΈπŸ’Ύ - Image saver - pics preserved!")
img_hash = compute_image_hash(image)
if img_hash in st.session_state.image_hashes:
return None
timestamp = format_timestamp_prefix(username)
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
filename = f"{timestamp}-{img_hash}-voice-{voice}.b64"
filepath = os.path.join(MEDIA_DIR, filename)
os.makedirs(MEDIA_DIR, exist_ok=True)
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format='PNG')
b64_data = base64.b64encode(img_byte_arr.getvalue()).decode()
with open(filepath, 'w') as f:
f.write(b64_data)
st.session_state.image_hashes.add(img_hash)
await save_chat_history_with_image(username, filepath)
return filename
# Display base64 image and audio
def display_base64_media(media_file, width="100px"):
if os.path.exists(os.path.join(MEDIA_DIR, media_file)) and media_file.endswith('.b64'):
with open(os.path.join(MEDIA_DIR, media_file), 'r') as f:
b64_data = f.read()
img_data = base64.b64decode(b64_data)
img = Image.open(io.BytesIO(img_data))
st.image(img, use_container_width=True)
# Find corresponding audio file
timestamp = media_file.split('-by-')[0] + '-by-' + media_file.split('-by-')[1].split('-')[0]
voice = media_file.split('-voice-')[1].split('.b64')[0]
audio_files = glob.glob(f"{timestamp}*-{voice}.mp3")
if audio_files:
audio_file = audio_files[0]
st.audio(audio_file)
# PDF saver and audio generator
async def save_pdf_and_generate_audio(pdf_file, username, max_pages=10):
await asyncio.to_thread(log_action, username, "πŸ“œπŸŽΆ - PDF saver and audio generator!")
timestamp = format_timestamp_prefix(username)
file_hash = hashlib.md5(pdf_file.getbuffer()).hexdigest()[:8]
pdf_filename = f"{timestamp}-{file_hash}.pdf"
with open(pdf_filename, 'wb') as f:
f.write(pdf_file.getbuffer())
reader = PdfReader(pdf_filename)
total_pages = min(len(reader.pages), max_pages)
texts = []
audio_files = []
audio_processor = AudioProcessor()
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
for i in range(total_pages):
text = reader.pages[i].extract_text()
texts.append(text)
audio_filename = f"{timestamp}-page{i+1}-{file_hash}-voice-{voice}.mp3"
audio_data = await audio_processor.create_audio(text, voice, audio_filename)
if audio_data:
audio_files.append(audio_filename)
return pdf_filename, texts, audio_files
# Video renderer - movies roll with autoplay! πŸŽ₯🎬
def get_video_html(video_path, width="100px"):
video_url = f"data:video/mp4;base64,{base64.b64encode(open(video_path, 'rb').read()).decode()}"
return f'''
<video width="{width}" controls autoplay muted loop>
<source src="{video_url}" type="video/mp4">
Your browser does not support the video tag.
</video>
'''
# Audio renderer - sounds soar! 🎢✈️
async def get_audio_html(audio_path, width="100px"):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "🎢✈️ - Audio renderer - sounds soar!")
audio_url = f"data:audio/mpeg;base64,{base64.b64encode(await asyncio.to_thread(open, audio_path, 'rb').read()).decode()}"
return f'''
<audio controls style="width: {width};">
<source src="{audio_url}" type="audio/mpeg">
Your browser does not support the audio element.
</audio>
'''
# Websocket handler - chat links up! πŸŒπŸ”—
async def websocket_handler(websocket, path):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸŒπŸ”— - Websocket handler - chat links up!")
try:
client_id = str(uuid.uuid4())
room_id = "chat"
st.session_state.active_connections.setdefault(room_id, {})[client_id] = websocket
chat_content = await load_chat()
username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys())))
if not await has_joined_before(client_id, chat_content):
await save_chat_entry(f"Client-{client_id}", f"{username} has joined {START_ROOM}!")
async for message in websocket:
parts = message.split('|', 1)
if len(parts) == 2:
username, content = parts
await save_chat_entry(username, content)
except websockets.ConnectionClosed:
pass
finally:
if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]:
del st.session_state.active_connections[room_id][client_id]
# Message broadcaster - words fly far! πŸ“’βœˆοΈ
async def broadcast_message(message, room_id):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ“’βœˆοΈ - Message broadcaster - words fly far!")
if room_id in st.session_state.active_connections:
disconnected = []
for client_id, ws in st.session_state.active_connections[room_id].items():
try:
await ws.send(message)
except websockets.ConnectionClosed:
disconnected.append(client_id)
for client_id in disconnected:
del st.session_state.active_connections[room_id][client_id]
# Server starter - web spins up! πŸ–₯οΈπŸŒ€
async def run_websocket_server():
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ–₯οΈπŸŒ€ - Server starter - web spins up!")
if not st.session_state.server_running:
server = await websockets.serve(websocket_handler, '0.0.0.0', 8765)
st.session_state.server_running = True
await server.wait_closed()
# Delete all user files function
def delete_user_files():
protected_files = {'app.py', 'requirements.txt', 'README.md', CHAT_FILE, QUOTE_VOTES_FILE, MEDIA_VOTES_FILE, HISTORY_FILE, STATE_FILE, MEDIA_DIR}
deleted_files = []
for file in os.listdir('.'):
if file not in protected_files and not file.endswith('_history.md'):
try:
os.remove(file)
deleted_files.append(file)
except Exception as e:
st.error(f"Failed to delete {file}: {e}")
for root, dirs, files in os.walk(MEDIA_DIR):
for file in files:
file_path = os.path.join(root, file)
try:
os.remove(file_path)
deleted_files.append(file_path)
except Exception as e:
st.error(f"Failed to delete {file_path}: {e}")
st.session_state.image_hashes.clear()
st.session_state.audio_cache.clear()
st.session_state.base64_cache.clear()
st.session_state.displayed_chat_lines.clear()
return deleted_files
# Query parameter checker - parse q for username
def check_query_params():
query_params = st.query_params if hasattr(st, 'query_params') else st.experimental_get_query_params()
q_value = query_params.get("q", [None])[0]
if q_value and q_value in FUN_USERNAMES:
st.session_state.username = q_value
st.session_state.voice = FUN_USERNAMES[q_value]
return q_value
elif q_value:
st.session_state.user_id = q_value # Use as user_id if not a valid username
return None
# Mermaid graph generator
def generate_mermaid_graph(chat_lines):
mermaid_code = "graph TD\n"
nodes = {}
edges = []
for i, line in enumerate(chat_lines):
if line.strip() and not line.startswith(' '):
timestamp = line.split('] ')[0][1:] if '] ' in line else "Unknown"
content = line.split(': ', 1)[1] if ': ' in line else line
user = content.split(' ')[0]
message = content.split(' ', 1)[1] if ' ' in content else ''
node_id = f"{user}_{i}"
nodes[node_id] = f"{user}: {message}"
if i + 1 < len(chat_lines) and "Audio:" in chat_lines[i + 1]:
audio_node = f"audio_{i}"
nodes[audio_node] = "🎡"
edges.append(f"{node_id} --> {audio_node}")
if i + 2 < len(chat_lines) and "Media:" in chat_lines[i + 2]:
media_node = f"media_{i}"
nodes[media_node] = "πŸ–Ό"
edges.append(f"{node_id} --> {media_node}")
if i > 0 and "> " in line:
parent_user = chat_lines[i-1].split(': ')[1].split(' ')[0]
parent_id = f"{parent_user}_{i-1}"
edges.append(f"{parent_id} --> {node_id}")
for node_id, label in nodes.items():
mermaid_code += f" {node_id}[\"{label}\"]\n"
mermaid_code += "\n".join(f" {edge}" for edge in edges)
return mermaid_code
# Main execution - let’s roll! πŸŽ²πŸš€
def main():
NODE_NAME, port = get_node_name()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def async_interface():
# Generate user ID and hash if not set
if not st.session_state.user_id:
st.session_state.user_id = str(uuid.uuid4())
st.session_state.user_hash = await generate_user_hash()
# Check query params first to override username
q_value = check_query_params()
if not q_value and 'username' not in st.session_state:
chat_content = await load_chat()
available_names = [name for name in FUN_USERNAMES if not any(f"{name} has joined" in line for line in chat_content.split('\n'))]
st.session_state.username = random.choice(available_names) if available_names else random.choice(list(FUN_USERNAMES.keys()))
st.session_state.voice = FUN_USERNAMES[st.session_state.username]
st.markdown(f"**πŸŽ™οΈ Voice Selected**: {st.session_state.voice} πŸ—£οΈ for {st.session_state.username}")
# Check existing history file for content
user_history_file = f"{st.session_state.username}_history.md"
if os.path.exists(user_history_file):
with open(user_history_file, 'r') as f:
st.session_state.displayed_chat_lines = f.read().split('\n')
user_url = f"/q={st.session_state.username}"
with st.container():
st.markdown(f"<small>Your unique URL path: [{user_url}]({user_url})</small>", unsafe_allow_html=True)
with st.container():
st.markdown(f"#### πŸ€–πŸ§ MMO {st.session_state.username}πŸ“πŸ”¬")
st.markdown(f"<small>Welcome to {START_ROOM} - chat, vote, upload, paste images, and enjoy quoting! πŸŽ‰ User ID: {st.session_state.user_id}</small>", unsafe_allow_html=True)
if not st.session_state.server_task:
st.session_state.server_task = loop.create_task(run_websocket_server())
# Unified Chat History at Top with Markdown Emoji Output
with st.container():
st.markdown(f"##### {START_ROOM} Chat History πŸ’¬")
chat_content = await load_chat()
chat_lines = [line for line in chat_content.split('\n') if line.strip() and not line.startswith('#')]
if chat_lines:
chat_by_minute = {}
for line in reversed(chat_lines):
timestamp = line.split('] ')[0][1:] if '] ' in line else "Unknown"
minute_key = timestamp[:16] # Up to minute
if minute_key not in chat_by_minute:
chat_by_minute[minute_key] = []
chat_by_minute[minute_key].append(line)
markdown_output = ""
for minute, lines in chat_by_minute.items():
minute_output = f"###### {minute[-5:]}\n" # Show only HH:MM
for line in lines:
if ': ' in line and not line.startswith(' '):
user_message = line.split(': ', 1)[1]
user = user_message.split(' ')[0]
msg = user_message.split(' ', 1)[1] if ' ' in user_message else ''
audio_html = ""
media_content = ""
next_lines = chat_lines[chat_lines.index(line)+1:chat_lines.index(line)+3]
for nl in next_lines:
if "Audio:" in nl:
audio_file = nl.split("Audio: ")[-1].strip()
audio_html = play_and_download_audio(audio_file)
elif "Media:" in nl:
media_file = nl.split("Media: ")[-1].strip('![]()')
if media_file.endswith('.b64'):
media_content = display_base64_media(media_file, width="100px")
elif media_file.endswith(('.png', '.jpg')):
media_content = f"<img src='file://{media_file}' width='100'>"
elif media_file.endswith('.mp4'):
media_content = get_video_html(media_file)
elif media_file.endswith('.pdf'):
media_content = f"πŸ“œ {os.path.basename(media_file)}"
minute_output += f"- πŸ’¬ **{user}**: {msg} {audio_html} {media_content}\n"
markdown_output += minute_output
st.markdown(markdown_output, unsafe_allow_html=True)
# Mermaid Graph Visualization
st.markdown("###### Chat Relationship Tree 🌳")
mermaid_code = generate_mermaid_graph(chat_lines)
mermaid_html = f"""
<script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
<div class="mermaid" style="height: 200px; overflow: auto;">{mermaid_code}</div>
<script>mermaid.initialize({{startOnLoad:true}});</script>
"""
components.html(mermaid_html, height=250)
with st.container():
if st.session_state.quote_line:
st.markdown(f"###### Quoting: {st.session_state.quote_line}")
quote_response = st.text_area("Add your response", key="quote_response", value=st.session_state.message_text)
paste_result_quote = paste_image_button("πŸ“‹ Paste Image or Text with Quote", key="paste_button_quote")
if paste_result_quote.image_data is not None:
if isinstance(paste_result_quote.image_data, str):
st.session_state.message_text = paste_result_quote.image_data
st.text_area("Add your response", key="quote_response", value=st.session_state.message_text)
else:
st.image(paste_result_quote.image_data, caption="Received Image for Quote")
filename = await save_pasted_image(paste_result_quote.image_data, st.session_state.username)
if filename:
st.session_state.pasted_image_data = filename
await save_chat_entry(st.session_state.username, f"Pasted image: {filename}", quote_line=st.session_state.quote_line, media_file=paste_result_quote.image_data)
if st.button("Send Quote πŸš€", key="send_quote"):
markdown_response = f"### Quote Response\n- **Original**: {st.session_state.quote_line}\n- **{st.session_state.username} Replies**: {quote_response}"
if st.session_state.pasted_image_data:
markdown_response += f"\n- **Image**: ![Pasted Image]({st.session_state.pasted_image_data})"
await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}", quote_line=st.session_state.quote_line, media_file=st.session_state.pasted_image_data)
st.session_state.pasted_image_data = None
await save_chat_entry(st.session_state.username, markdown_response, is_markdown=True, quote_line=st.session_state.quote_line)
st.session_state.quote_line = None
st.session_state.message_text = ''
st.rerun()
current_selection = st.session_state.username if st.session_state.username in FUN_USERNAMES else ""
new_username = st.selectbox("Change Name and Voice", [""] + list(FUN_USERNAMES.keys()), index=(list(FUN_USERNAMES.keys()).index(current_selection) + 1 if current_selection else 0), format_func=lambda x: f"{x} ({FUN_USERNAMES.get(x, 'No Voice')})" if x else "Select a name")
if new_username and new_username != st.session_state.username:
await save_chat_entry("System 🌟", f"{st.session_state.username} changed name to {new_username}")
st.session_state.username = new_username
st.session_state.voice = FUN_USERNAMES[new_username]
st.markdown(f"**πŸŽ™οΈ Voice Changed**: {st.session_state.voice} πŸ—£οΈ for {st.session_state.username}")
st.rerun()
# Message input with Send button on the right
col_input, col_send = st.columns([5, 1])
with col_input:
message = st.text_input(f"Message as {st.session_state.username} (Voice: {st.session_state.voice})", key="message_input", value=st.session_state.message_text)
with col_send:
if st.button("Send πŸš€", key="send_button"):
if message.strip():
audio_file = await save_chat_entry(st.session_state.username, message, is_markdown=True)
if audio_file:
st.session_state.audio_cache[f"{message}_{FUN_USERNAMES[st.session_state.username]}"] = audio_file
st.audio(audio_file) # Immediate preview
if st.session_state.pasted_image_data:
await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}", media_file=st.session_state.pasted_image_data)
st.session_state.pasted_image_data = None
st.session_state.message_text = ''
st.rerun()
paste_result_msg = paste_image_button("πŸ“‹ Paste Image or Text with Message", key="paste_button_msg")
if paste_result_msg.image_data is not None:
if isinstance(paste_result_msg.image_data, str):
st.session_state.message_text = paste_result_msg.image_data
st.text_input(f"Message as {st.session_state.username} (Voice: {st.session_state.voice})", key="message_input_paste", value=st.session_state.message_text)
else:
st.image(paste_result_msg.image_data, caption="Received Image for Quote")
filename = await save_pasted_image(paste_result_msg.image_data, st.session_state.username)
if filename:
await save_chat_entry(st.session_state.username, f"Pasted image: {filename}", media_file=paste_result_msg.image_data)
st.session_state.pasted_image_data = None
with st.container():
tab_main = st.radio("Action:", ["πŸ“Έ Media", "πŸ” ArXiv", "πŸ“ Editor"], horizontal=True, label_visibility="collapsed")
useArxiv = st.checkbox("Search Arxiv for Research Paper Answers", value=True, label_visibility="collapsed")
useArxivAudio = st.checkbox("Generate Audio File for Research Paper Answers", value=False, label_visibility="collapsed")
st.markdown("###### Upload Media πŸŽ¨πŸŽΆπŸ“œπŸŽ₯")
uploaded_file = st.file_uploader("Upload Media", type=['png', 'jpg', 'mp4', 'mp3', 'wav', 'pdf', 'txt', 'md', 'py'])
if uploaded_file:
timestamp = format_timestamp_prefix(st.session_state.username)
username = st.session_state.username
ext = uploaded_file.name.split('.')[-1]
file_hash = hashlib.md5(uploaded_file.getbuffer()).hexdigest()[:8]
if file_hash not in st.session_state.image_hashes:
filename = f"{timestamp}-{file_hash}.{ext}"
file_path = filename # Top-level file
await asyncio.to_thread(lambda: open(file_path, 'wb').write(uploaded_file.getbuffer()))
st.success(f"Uploaded {filename}")
if ext == 'pdf':
pdf_filename, texts, audio_files = await save_pdf_and_generate_audio(uploaded_file, username)
await save_chat_entry(username, f"Uploaded PDF: {pdf_filename}", media_file=pdf_filename)
for i, (text, audio_file) in enumerate(zip(texts, audio_files)):
if audio_file:
with open(CHAT_FILE, 'a') as f:
f.write(f" [{timestamp}] Page {i+1} Audio: {audio_file}\n")
else:
await save_chat_entry(username, f"Uploaded media: {file_path}", media_file=file_path)
await save_chat_history_with_image(username, file_path)
st.session_state.image_hashes.add(file_hash)
if file_path.endswith('.mp4'):
st.session_state.media_notifications.append(file_path)
# Big Red Delete Button
st.markdown("###### πŸ›‘ Danger Zone")
if st.button("Try Not To Delete It All On Your First Day", key="delete_all", help="Deletes all user-added files!", type="primary", use_container_width=True, label_visibility="collapsed"):
deleted_files = delete_user_files()
if deleted_files:
st.markdown("### πŸ—‘οΈ Deleted Files:\n" + "\n".join([f"- `{file}`" for file in deleted_files]))
else:
st.markdown("### πŸ—‘οΈ Nothing to Delete!")
st.rerun()
st.markdown("###### Refresh ⏳")
refresh_rate = st.slider("Refresh Rate", 1, 300, st.session_state.refresh_rate, label_visibility="collapsed")
st.session_state.refresh_rate = refresh_rate
timer_placeholder = st.empty()
for i in range(st.session_state.refresh_rate, -1, -1):
font_name, font_func = random.choice(UNICODE_FONTS)
countdown_str = "".join(UNICODE_DIGITS[int(d)] for d in str(i)) if i < 10 else font_func(str(i))
timer_placeholder.markdown(f"<small>⏳ {font_func('Refresh in:')} {countdown_str}</small>", unsafe_allow_html=True)
time.sleep(1)
st.rerun()
# Separate Galleries for Own and Shared Files
with st.container():
all_files = glob.glob("*.md") + glob.glob("*.pdf") + glob.glob("*.txt") + glob.glob("*.py") + glob.glob("*.png") + glob.glob("*.jpg") + glob.glob("*.mp3") + glob.glob("*.mp4") + glob.glob(os.path.join(MEDIA_DIR, "*.b64"))
own_files = [f for f in all_files if st.session_state.user_id in os.path.basename(f)]
shared_files = [f for f in all_files if f not in own_files and not f in [CHAT_FILE, QUOTE_VOTES_FILE, MEDIA_VOTES_FILE, HISTORY_FILE, STATE_FILE, os.path.join(MEDIA_DIR, "*")]]
st.markdown("###### Your Files πŸ“‚")
st.markdown("###### Image Gallery πŸ–Ό")
own_image_files = [f for f in own_files if f.endswith(('.png', '.jpg', '.b64'))]
image_cols = st.slider("Image Gallery Columns πŸ–Ό (Own)", min_value=1, max_value=15, value=5, label_visibility="collapsed")
cols = st.columns(image_cols)
for idx, image_file in enumerate(own_image_files):
with cols[idx % image_cols]:
if image_file.endswith('.b64'):
display_base64_media(os.path.basename(image_file))
else:
st.image(image_file, use_container_width=True)
st.markdown("###### Video Gallery πŸŽ₯")
own_video_files = [f for f in own_files if f.endswith('.mp4')]
video_cols = st.slider("Video Gallery Columns 🎬 (Own)", min_value=1, max_value=5, value=3, label_visibility="collapsed")
cols = st.columns(video_cols)
for idx, video_file in enumerate(own_video_files):
with cols[idx % video_cols]:
st.markdown(get_video_html(video_file), unsafe_allow_html=True)
st.markdown("###### Audio Gallery 🎧")
own_audio_files = [f for f in own_files if f.endswith(('.mp3', '.wav'))]
audio_cols = st.slider("Audio Gallery Columns 🎢 (Own)", min_value=1, max_value=15, value=5, label_visibility="collapsed")
cols = st.columns(audio_cols)
for idx, audio_file in enumerate(own_audio_files):
with cols[idx % audio_cols]:
st.markdown(await get_audio_html(audio_file), unsafe_allow_html=True)
st.markdown("###### Shared Files πŸ“€")
st.markdown("###### Image Gallery πŸ–Ό")
shared_image_files = [f for f in shared_files if f.endswith(('.png', '.jpg', '.b64'))]
image_cols = st.slider("Image Gallery Columns πŸ–Ό (Shared)", min_value=1, max_value=15, value=5, label_visibility="collapsed")
cols = st.columns(image_cols)
for idx, image_file in enumerate(shared_image_files):
with cols[idx % image_cols]:
if image_file.endswith('.b64'):
display_base64_media(os.path.basename(image_file))
else:
st.image(image_file, use_container_width=True)
st.markdown("###### Video Gallery πŸŽ₯")
shared_video_files = [f for f in shared_files if f.endswith('.mp4')]
video_cols = st.slider("Video Gallery Columns 🎬 (Shared)", min_value=1, max_value=5, value=3, label_visibility="collapsed")
cols = st.columns(video_cols)
for idx, video_file in enumerate(shared_video_files):
with cols[idx % video_cols]:
st.markdown(get_video_html(video_file), unsafe_allow_html=True)
st.markdown("###### Audio Gallery 🎧")
shared_audio_files = [f for f in shared_files if f.endswith(('.mp3', '.wav'))]
audio_cols = st.slider("Audio Gallery Columns 🎢 (Shared)", min_value=1, max_value=15, value=5, label_visibility="collapsed")
cols = st.columns(audio_cols)
for idx, audio_file in enumerate(shared_audio_files):
with cols[idx % audio_cols]:
st.markdown(await get_audio_html(audio_file), unsafe_allow_html=True)
# Full Log at End with Download
with st.container():
st.markdown("###### Full Chat Log πŸ“œ")
with open(CHAT_FILE, 'r') as f:
history_content = f.read()
st.markdown(history_content)
st.download_button("Download Chat Log as .md", history_content, file_name=f"chat_{st.session_state.user_id}.md", mime="text/markdown", label_visibility="collapsed")
loop.run_until_complete(async_interface())
if __name__ == "__main__":
main()