import streamlit as st
import asyncio
import websockets
import uuid
import argparse
from datetime import datetime
import os
import random
import time
import hashlib
from PIL import Image
import glob
import base64
import io
import streamlit.components.v1 as components
import edge_tts
import nest_asyncio
import re
from streamlit_paste_button import paste_image_button
import pytz
import shutil
from urllib.parse import urlencode
from PyPDF2 import PdfReader
import json
# Patch for nested async - sneaky fix! πβ¨
nest_asyncio.apply()
# Static config - constants rule! ππ
icons = 'π€π§ π¬π'
START_ROOM = "Sector π"
# Page setup - dressing up the window! πΌοΈπ
st.set_page_config(
page_title="π€π§ MMO Chat Brainππ¬",
page_icon=icons,
layout="wide",
initial_sidebar_state="auto"
)
# Funky usernames with corresponding Edge TTS voices
FUN_USERNAMES = {
"CosmicJester π": "en-US-AriaNeural",
"PixelPanda πΌ": "en-US-JennyNeural",
"QuantumQuack π¦": "en-GB-SoniaNeural",
"StellarSquirrel πΏοΈ": "en-AU-NatashaNeural",
"GizmoGuru βοΈ": "en-CA-ClaraNeural",
"NebulaNinja π ": "en-US-GuyNeural",
"ByteBuster πΎ": "en-GB-RyanNeural",
"GalacticGopher π": "en-AU-WilliamNeural",
"RocketRaccoon π": "en-CA-LiamNeural",
"EchoElf π§": "en-US-AnaNeural",
"PhantomFox π¦": "en-US-BrandonNeural",
"WittyWizard π§": "en-GB-ThomasNeural",
"LunarLlama π": "en-AU-FreyaNeural",
"SolarSloth βοΈ": "en-CA-LindaNeural",
"AstroAlpaca π¦": "en-US-ChristopherNeural",
"CyberCoyote πΊ": "en-GB-ElliotNeural",
"MysticMoose π¦": "en-AU-JamesNeural",
"GlitchGnome π§": "en-CA-EthanNeural",
"VortexViper π": "en-US-AmberNeural",
"ChronoChimp π": "en-GB-LibbyNeural"
}
# Top-level files (no subdirectories)
CHAT_FILE = "global_chat.md"
QUOTE_VOTES_FILE = "quote_votes.md"
MEDIA_VOTES_FILE = "media_votes.md"
HISTORY_FILE = "chat_history.md"
STATE_FILE = "user_state.txt"
MEDIA_DIR = "media_base64"
# Fancy digits - numbers got style! π’π
UNICODE_DIGITS = {i: f"{i}\uFE0Fβ£" for i in range(10)}
# Massive font collection - typography bonanza! ποΈπ¨
UNICODE_FONTS = [
("Normal", lambda x: x),
("Bold", lambda x: "".join(chr(ord(c) + 0x1D400 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D41A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Italic", lambda x: "".join(chr(ord(c) + 0x1D434 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D44E - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D468 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D482 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Script", lambda x: "".join(chr(ord(c) + 0x1D49C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D4B6 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Bold Script", lambda x: "".join(chr(ord(c) + 0x1D4D0 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D4EA - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Fraktur", lambda x: "".join(chr(ord(c) + 0x1D504 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D51E - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Bold Fraktur", lambda x: "".join(chr(ord(c) + 0x1D56C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D586 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Double Struck", lambda x: "".join(chr(ord(c) + 0x1D538 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D552 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif", lambda x: "".join(chr(ord(c) + 0x1D5A0 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D5BA - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif Bold", lambda x: "".join(chr(ord(c) + 0x1D5D4 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D5EE - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif Italic", lambda x: "".join(chr(ord(c) + 0x1D608 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D622 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D63C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D656 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Monospace", lambda x: "".join(chr(ord(c) + 0x1D670 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D68A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x24B6) if 'A' <= c <= 'Z' else chr(ord(c) - 0x61 + 0x24D0) if 'a' <= c <= 'z' else c for c in x)),
("Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F130) if 'A' <= c <= 'Z' else c for c in x)),
("Negative Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F150) if 'A' <= c <= 'Z' else c for c in x)),
("Negative Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F170) if 'A' <= c <= 'Z' else c for c in x)),
("Regional Indicator", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F1E6) if 'A' <= c <= 'Z' else c for c in x)),
]
# Global state - keeping tabs! ππ
if 'server_running' not in st.session_state:
st.session_state.server_running = False
if 'server_task' not in st.session_state:
st.session_state.server_task = None
if 'active_connections' not in st.session_state:
st.session_state.active_connections = {}
if 'media_notifications' not in st.session_state:
st.session_state.media_notifications = []
if 'last_chat_update' not in st.session_state:
st.session_state.last_chat_update = 0
if 'displayed_chat_lines' not in st.session_state:
st.session_state.displayed_chat_lines = []
if 'message_text' not in st.session_state:
st.session_state.message_text = ""
if 'audio_cache' not in st.session_state:
st.session_state.audio_cache = {}
if 'pasted_image_data' not in st.session_state:
st.session_state.pasted_image_data = None
if 'quote_line' not in st.session_state:
st.session_state.quote_line = None
if 'refresh_rate' not in st.session_state:
st.session_state.refresh_rate = 5
if 'base64_cache' not in st.session_state:
st.session_state.base64_cache = {}
if 'image_hashes' not in st.session_state:
st.session_state.image_hashes = set()
if 'gallery_columns' not in st.session_state:
st.session_state.gallery_columns = 1
if 'user_id' not in st.session_state:
st.session_state.user_id = None
if 'user_hash' not in st.session_state:
st.session_state.user_hash = None
# Timestamp wizardry - clock ticks with flair! β°π©
def format_timestamp_prefix(username):
central = pytz.timezone('US/Central')
now = datetime.now(central)
return f"{now.strftime('%I-%M-%p-ct-%m-%d-%Y')}-by-{username}-{st.session_state.user_id}"
# Compute image hash from binary data
def compute_image_hash(image_data):
if isinstance(image_data, Image.Image):
img_byte_arr = io.BytesIO()
image_data.save(img_byte_arr, format='PNG')
img_bytes = img_byte_arr.getvalue()
else:
img_bytes = image_data
return hashlib.md5(img_bytes).hexdigest()[:8]
# Node naming - christening the beast! ππΌ
def get_node_name():
parser = argparse.ArgumentParser(description='Start a chat node with a specific name')
parser.add_argument('--node-name', type=str, default=None)
parser.add_argument('--port', type=int, default=8501)
args = parser.parse_args()
username = st.session_state.get('username', 'System π')
log_action(username, "ππΌ - Node naming - christening the beast!")
return args.node_name or f"node-{uuid.uuid4().hex[:8]}", args.port
# Action logger - spying on deeds! π΅οΈπ
def log_action(username, action):
if 'action_log' not in st.session_state:
st.session_state.action_log = {}
user_log = st.session_state.action_log.setdefault(username, {})
current_time = time.time()
user_log = {k: v for k, v in user_log.items() if current_time - v < 10}
st.session_state.action_log[username] = user_log
if action not in user_log:
central = pytz.timezone('US/Central')
with open(HISTORY_FILE, 'a') as f:
f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: {action}\n")
user_log[action] = current_time
# Clean text - strip the fancy stuff! π§Ήπ
def clean_text_for_tts(text):
cleaned = re.sub(r'[#*!\[\]]+', '', text)
cleaned = ' '.join(cleaned.split())
return cleaned if cleaned else "No text to speak" # Default if empty
# Audio Processor Class from your code, adapted
class AudioProcessor:
def __init__(self):
self.cache_dir = "audio_cache"
os.makedirs(self.cache_dir, exist_ok=True)
self.metadata = self._load_metadata()
def _load_metadata(self):
metadata_file = os.path.join(self.cache_dir, "metadata.json")
return json.load(open(metadata_file)) if os.path.exists(metadata_file) else {}
def _save_metadata(self):
metadata_file = os.path.join(self.cache_dir, "metadata.json")
with open(metadata_file, 'w') as f:
json.dump(self.metadata, f)
async def create_audio(self, text, voice='en-US-AriaNeural', filename=None):
cache_key = hashlib.md5(f"{text}:{voice}".encode()).hexdigest()
cache_path = filename or os.path.join(self.cache_dir, f"{cache_key}.mp3")
if cache_key in self.metadata and os.path.exists(cache_path):
return cache_path
# Clean text for speech
text = text.replace("\n", " ").replace("", " ").strip()
if not text:
return None
# Generate audio with edge_tts
try:
communicate = edge_tts.Communicate(text, voice)
await communicate.save(cache_path)
if not os.path.exists(cache_path):
raise edge_tts.exceptions.NoAudioReceived("No audio file created")
except edge_tts.exceptions.NoAudioReceived as e:
log_action("System π", f"TTS failed for text '{text}' with voice '{voice}': {str(e)}")
return None
# Update metadata
self.metadata[cache_key] = {
'timestamp': datetime.now().isoformat(),
'text_length': len(text),
'voice': voice
}
self._save_metadata()
return cache_path
# Chat saver - words locked tight! π¬π
async def save_chat_entry(username, message, is_markdown=False, quote_line=None, media_file=None):
await asyncio.to_thread(log_action, username, "π¬π - Chat saver - words locked tight!")
central = pytz.timezone('US/Central')
timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
user_history_file = f"{username}_history.md"
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
indent = " " if quote_line else "" # Nesting for replies
# Prepare entry
if is_markdown:
entry = f"{indent}[{timestamp}] {username}:\n{indent}```markdown\n{indent}{message}\n{indent}```"
else:
entry = f"{indent}[{timestamp}] {username}: {message}"
if quote_line:
entry = f"{indent}> {quote_line}\n{entry}"
# Save to global chat file
with open(CHAT_FILE, 'a') as f:
f.write(f"{entry}\n")
# Save to user-specific history file
if not os.path.exists(user_history_file):
with open(user_history_file, 'w') as f:
f.write(f"# Chat History for {username} (Voice: {voice})\n\n")
with open(user_history_file, 'a') as f:
f.write(f"{entry}\n")
# Generate audio
cleaned_message = clean_text_for_tts(message)
audio_processor = AudioProcessor()
audio_filename = f"{format_timestamp_prefix(username)}-{hashlib.md5(cleaned_message.encode()).hexdigest()[:8]}.mp3"
log_action(username, f"Attempting TTS with text: '{cleaned_message}' and voice: '{voice}'")
audio_file = await audio_processor.create_audio(cleaned_message, voice, audio_filename)
# Log audio and media
if audio_file:
with open(HISTORY_FILE, 'a') as f:
f.write(f"[{timestamp}] {username} ({voice}): Audio generated - {audio_filename}\n")
with open(user_history_file, 'a') as f:
f.write(f"{indent}[{timestamp}] Audio: {audio_filename}\n")
with open(CHAT_FILE, 'a') as f:
f.write(f"{indent}[{timestamp}] Audio: {audio_filename}\n")
if media_file:
if isinstance(media_file, Image.Image):
img_hash = compute_image_hash(media_file)
timestamp_prefix = format_timestamp_prefix(username)
media_filename = f"{timestamp_prefix}-{img_hash}.b64"
media_path = os.path.join(MEDIA_DIR, media_filename)
os.makedirs(MEDIA_DIR, exist_ok=True)
img_byte_arr = io.BytesIO()
media_file.save(img_byte_arr, format='PNG')
img_bytes = img_byte_arr.getvalue()
b64_data = base64.b64encode(img_bytes).decode()
with open(media_path, 'w') as f:
f.write(b64_data)
media_file = media_filename
with open(CHAT_FILE, 'a') as f:
f.write(f"{indent}[{timestamp}] Media: \n")
with open(user_history_file, 'a') as f:
f.write(f"{indent}[{timestamp}] Media: \n")
await broadcast_message(f"{username}|{message}", "chat")
st.session_state.last_chat_update = time.time()
return audio_filename
# Save chat history with image or PDF
async def save_chat_history_with_image(username, image_path):
central = pytz.timezone('US/Central')
timestamp = datetime.now(central).strftime("%Y-%m-%d_%H-%M-%S")
user_history_file = f"{username}_history.md"
chat_content = await load_chat()
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
if not os.path.exists(user_history_file):
with open(user_history_file, 'w') as f:
f.write(f"# Chat History for {username} (Voice: {voice})\n\n")
with open(user_history_file, 'a') as f:
f.write(f"[{timestamp}] {username} (Voice: {voice}) Shared Media: {os.path.basename(image_path)}\n")
f.write(f"```markdown\n{chat_content}\n```\n")
# Chat loader - history unleashed! ππ
async def load_chat():
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "ππ - Chat loader - history unleashed!")
if not os.path.exists(CHAT_FILE):
await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"# {START_ROOM} Chat\n\nWelcome to the cosmic hub - start chatting! π€\n"))
with open(CHAT_FILE, 'r') as f:
content = await asyncio.to_thread(f.read)
return content
# User lister - whoβs in the gang! π₯π
async def get_user_list(chat_content):
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "π₯π - User lister - whoβs in the gang!")
users = set()
for line in chat_content.split('\n'):
if line.strip() and ': ' in line:
user = line.split(': ')[1].split(' ')[0]
users.add(user)
return sorted(list(users))
# Join checker - been here before? πͺπ
async def has_joined_before(client_id, chat_content):
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "πͺπ - Join checker - been here before?")
return any(f"Client-{client_id}" in line for line in chat_content.split('\n'))
# Suggestion maker - old quips resurface! π‘π
async def get_message_suggestions(chat_content, prefix):
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "π‘π - Suggestion maker - old quips resurface!")
lines = chat_content.split('\n')
messages = [line.split(': ', 1)[1] for line in lines if ': ' in line and line.strip()]
return [msg for msg in messages if msg.lower().startswith(prefix.lower())][:5]
# Vote saver - cheers recorded! ππ
async def save_vote(file, item, user_hash, username, comment=""):
await asyncio.to_thread(log_action, username, "ππ - Vote saver - cheers recorded!")
central = pytz.timezone('US/Central')
timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
entry = f"[{timestamp}] {user_hash} voted for {item}"
await asyncio.to_thread(lambda: open(file, 'a').write(f"{entry}\n"))
await asyncio.to_thread(lambda: open(HISTORY_FILE, "a").write(f"- {timestamp} - User {user_hash} voted for {item}\n"))
chat_message = f"{username} upvoted: \"{item}\""
if comment:
chat_message += f" - {comment}"
await save_chat_entry(username, chat_message)
# Vote counter - tallying the love! ππ
async def load_votes(file):
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "ππ - Vote counter - tallying the love!")
if not os.path.exists(file):
await asyncio.to_thread(lambda: open(file, 'w').write("# Vote Tally\n\nNo votes yet - get clicking! π±οΈ\n"))
with open(file, 'r') as f:
content = await asyncio.to_thread(f.read)
lines = content.strip().split('\n')[2:]
votes = {}
user_votes = set()
for line in lines:
if line.strip() and 'voted for' in line:
user_hash = line.split('] ')[1].split(' voted for ')[0]
item = line.split('voted for ')[1]
vote_key = f"{user_hash}-{item}"
if vote_key not in user_votes:
votes[item] = votes.get(item, 0) + 1
user_votes.add(vote_key)
return votes
# Hash generator - secret codes ahoy! ππ΅οΈ
async def generate_user_hash():
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "ππ΅οΈ - Hash generator - secret codes ahoy!")
if 'user_hash' not in st.session_state:
st.session_state.user_hash = hashlib.md5(str(random.getrandbits(128)).encode()).hexdigest()[:8]
return st.session_state.user_hash
# Audio maker - voices come alive! πΆπ
async def async_edge_tts_generate(text, voice, rate=0, pitch=0, file_format="mp3"):
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "πΆπ - Audio maker - voices come alive!")
timestamp = format_timestamp_prefix(username)
filename = f"{timestamp}-{hashlib.md5(text.encode()).hexdigest()[:8]}.{file_format}"
filepath = filename # Top-level file
communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
try:
await communicate.save(filepath)
return filepath if os.path.exists(filepath) else None
except edge_tts.exceptions.NoAudioReceived:
with open(HISTORY_FILE, 'a') as f:
central = pytz.timezone('US/Central')
f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: Audio failed - No audio received for '{text}'\n")
return None
# Audio player - tunes blast off! ππ
def play_and_download_audio(file_path):
if file_path and os.path.exists(file_path):
with open(file_path, "rb") as f:
audio_data = f.read()
b64 = base64.b64encode(audio_data).decode()
audio_html = f'''
π΅
'''
return audio_html
return ""
# Image saver - pics preserved with naming as base64! πΈπΎ
async def save_pasted_image(image, username):
await asyncio.to_thread(log_action, username, "πΈπΎ - Image saver - pics preserved!")
img_hash = compute_image_hash(image)
if img_hash in st.session_state.image_hashes:
return None
timestamp = format_timestamp_prefix(username)
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
filename = f"{timestamp}-{img_hash}-voice-{voice}.b64"
filepath = os.path.join(MEDIA_DIR, filename)
os.makedirs(MEDIA_DIR, exist_ok=True)
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format='PNG')
b64_data = base64.b64encode(img_byte_arr.getvalue()).decode()
with open(filepath, 'w') as f:
f.write(b64_data)
st.session_state.image_hashes.add(img_hash)
await save_chat_history_with_image(username, filepath)
return filename
# Display base64 image and audio
def display_base64_media(media_file, width="100px"):
if os.path.exists(os.path.join(MEDIA_DIR, media_file)) and media_file.endswith('.b64'):
with open(os.path.join(MEDIA_DIR, media_file), 'r') as f:
b64_data = f.read()
img_data = base64.b64decode(b64_data)
img = Image.open(io.BytesIO(img_data))
st.image(img, use_container_width=True)
# Find corresponding audio file
timestamp = media_file.split('-by-')[0] + '-by-' + media_file.split('-by-')[1].split('-')[0]
voice = media_file.split('-voice-')[1].split('.b64')[0]
audio_files = glob.glob(f"{timestamp}*-{voice}.mp3")
if audio_files:
audio_file = audio_files[0]
st.audio(audio_file)
# PDF saver and audio generator
async def save_pdf_and_generate_audio(pdf_file, username, max_pages=10):
await asyncio.to_thread(log_action, username, "ππΆ - PDF saver and audio generator!")
timestamp = format_timestamp_prefix(username)
file_hash = hashlib.md5(pdf_file.getbuffer()).hexdigest()[:8]
pdf_filename = f"{timestamp}-{file_hash}.pdf"
with open(pdf_filename, 'wb') as f:
f.write(pdf_file.getbuffer())
reader = PdfReader(pdf_filename)
total_pages = min(len(reader.pages), max_pages)
texts = []
audio_files = []
audio_processor = AudioProcessor()
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
for i in range(total_pages):
text = reader.pages[i].extract_text()
texts.append(text)
audio_filename = f"{timestamp}-page{i+1}-{file_hash}-voice-{voice}.mp3"
audio_data = await audio_processor.create_audio(text, voice, audio_filename)
if audio_data:
audio_files.append(audio_filename)
return pdf_filename, texts, audio_files
# Video renderer - movies roll with autoplay! π₯π¬
def get_video_html(video_path, width="100px"):
video_url = f"data:video/mp4;base64,{base64.b64encode(open(video_path, 'rb').read()).decode()}"
return f'''
'''
# Audio renderer - sounds soar! πΆβοΈ
async def get_audio_html(audio_path, width="100px"):
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "πΆβοΈ - Audio renderer - sounds soar!")
audio_url = f"data:audio/mpeg;base64,{base64.b64encode(await asyncio.to_thread(open, audio_path, 'rb').read()).decode()}"
return f'''
'''
# Websocket handler - chat links up! ππ
async def websocket_handler(websocket, path):
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "ππ - Websocket handler - chat links up!")
try:
client_id = str(uuid.uuid4())
room_id = "chat"
st.session_state.active_connections.setdefault(room_id, {})[client_id] = websocket
chat_content = await load_chat()
username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys())))
if not await has_joined_before(client_id, chat_content):
await save_chat_entry(f"Client-{client_id}", f"{username} has joined {START_ROOM}!")
async for message in websocket:
parts = message.split('|', 1)
if len(parts) == 2:
username, content = parts
await save_chat_entry(username, content)
except websockets.ConnectionClosed:
pass
finally:
if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]:
del st.session_state.active_connections[room_id][client_id]
# Message broadcaster - words fly far! π’βοΈ
async def broadcast_message(message, room_id):
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "π’βοΈ - Message broadcaster - words fly far!")
if room_id in st.session_state.active_connections:
disconnected = []
for client_id, ws in st.session_state.active_connections[room_id].items():
try:
await ws.send(message)
except websockets.ConnectionClosed:
disconnected.append(client_id)
for client_id in disconnected:
del st.session_state.active_connections[room_id][client_id]
# Server starter - web spins up! π₯οΈπ
async def run_websocket_server():
username = st.session_state.get('username', 'System π')
await asyncio.to_thread(log_action, username, "π₯οΈπ - Server starter - web spins up!")
if not st.session_state.server_running:
server = await websockets.serve(websocket_handler, '0.0.0.0', 8765)
st.session_state.server_running = True
await server.wait_closed()
# Delete all user files function
def delete_user_files():
protected_files = {'app.py', 'requirements.txt', 'README.md', CHAT_FILE, QUOTE_VOTES_FILE, MEDIA_VOTES_FILE, HISTORY_FILE, STATE_FILE, MEDIA_DIR}
deleted_files = []
for file in os.listdir('.'):
if file not in protected_files and not file.endswith('_history.md'):
try:
os.remove(file)
deleted_files.append(file)
except Exception as e:
st.error(f"Failed to delete {file}: {e}")
for root, dirs, files in os.walk(MEDIA_DIR):
for file in files:
file_path = os.path.join(root, file)
try:
os.remove(file_path)
deleted_files.append(file_path)
except Exception as e:
st.error(f"Failed to delete {file_path}: {e}")
st.session_state.image_hashes.clear()
st.session_state.audio_cache.clear()
st.session_state.base64_cache.clear()
st.session_state.displayed_chat_lines.clear()
return deleted_files
# Query parameter checker - parse q for username
def check_query_params():
query_params = st.query_params if hasattr(st, 'query_params') else st.experimental_get_query_params()
q_value = query_params.get("q", [None])[0]
if q_value and q_value in FUN_USERNAMES:
st.session_state.username = q_value
st.session_state.voice = FUN_USERNAMES[q_value]
return q_value
elif q_value:
st.session_state.user_id = q_value # Use as user_id if not a valid username
return None
# Mermaid graph generator
def generate_mermaid_graph(chat_lines):
mermaid_code = "graph TD\n"
nodes = {}
edges = []
for i, line in enumerate(chat_lines):
if line.strip() and not line.startswith(' '):
timestamp = line.split('] ')[0][1:] if '] ' in line else "Unknown"
content = line.split(': ', 1)[1] if ': ' in line else line
user = content.split(' ')[0]
message = content.split(' ', 1)[1] if ' ' in content else ''
node_id = f"{user}_{i}"
nodes[node_id] = f"{user}: {message}"
if i + 1 < len(chat_lines) and "Audio:" in chat_lines[i + 1]:
audio_node = f"audio_{i}"
nodes[audio_node] = "π΅"
edges.append(f"{node_id} --> {audio_node}")
if i + 2 < len(chat_lines) and "Media:" in chat_lines[i + 2]:
media_node = f"media_{i}"
nodes[media_node] = "πΌ"
edges.append(f"{node_id} --> {media_node}")
if i > 0 and "> " in line:
parent_user = chat_lines[i-1].split(': ')[1].split(' ')[0]
parent_id = f"{parent_user}_{i-1}"
edges.append(f"{parent_id} --> {node_id}")
for node_id, label in nodes.items():
mermaid_code += f" {node_id}[\"{label}\"]\n"
mermaid_code += "\n".join(f" {edge}" for edge in edges)
return mermaid_code
# Main execution - letβs roll! π²π
def main():
NODE_NAME, port = get_node_name()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def async_interface():
# Generate user ID and hash if not set
if not st.session_state.user_id:
st.session_state.user_id = str(uuid.uuid4())
st.session_state.user_hash = await generate_user_hash()
# Check query params first to override username
q_value = check_query_params()
if not q_value and 'username' not in st.session_state:
chat_content = await load_chat()
available_names = [name for name in FUN_USERNAMES if not any(f"{name} has joined" in line for line in chat_content.split('\n'))]
st.session_state.username = random.choice(available_names) if available_names else random.choice(list(FUN_USERNAMES.keys()))
st.session_state.voice = FUN_USERNAMES[st.session_state.username]
st.markdown(f"**ποΈ Voice Selected**: {st.session_state.voice} π£οΈ for {st.session_state.username}")
# Check existing history file for content
user_history_file = f"{st.session_state.username}_history.md"
if os.path.exists(user_history_file):
with open(user_history_file, 'r') as f:
st.session_state.displayed_chat_lines = f.read().split('\n')
user_url = f"/q={st.session_state.username}"
with st.container():
st.markdown(f"Your unique URL path: [{user_url}]({user_url})", unsafe_allow_html=True)
with st.container():
st.markdown(f"#### π€π§ MMO {st.session_state.username}ππ¬")
st.markdown(f"Welcome to {START_ROOM} - chat, vote, upload, paste images, and enjoy quoting! π User ID: {st.session_state.user_id}", unsafe_allow_html=True)
if not st.session_state.server_task:
st.session_state.server_task = loop.create_task(run_websocket_server())
# Unified Chat History at Top with Markdown Emoji Output
with st.container():
st.markdown(f"##### {START_ROOM} Chat History π¬")
chat_content = await load_chat()
chat_lines = [line for line in chat_content.split('\n') if line.strip() and not line.startswith('#')]
if chat_lines:
chat_by_minute = {}
for line in reversed(chat_lines):
timestamp = line.split('] ')[0][1:] if '] ' in line else "Unknown"
minute_key = timestamp[:16] # Up to minute
if minute_key not in chat_by_minute:
chat_by_minute[minute_key] = []
chat_by_minute[minute_key].append(line)
markdown_output = ""
for minute, lines in chat_by_minute.items():
minute_output = f"###### {minute[-5:]}\n" # Show only HH:MM
for line in lines:
if ': ' in line and not line.startswith(' '):
user_message = line.split(': ', 1)[1]
user = user_message.split(' ')[0]
msg = user_message.split(' ', 1)[1] if ' ' in user_message else ''
audio_html = ""
media_content = ""
next_lines = chat_lines[chat_lines.index(line)+1:chat_lines.index(line)+3]
for nl in next_lines:
if "Audio:" in nl:
audio_file = nl.split("Audio: ")[-1].strip()
audio_html = play_and_download_audio(audio_file)
elif "Media:" in nl:
media_file = nl.split("Media: ")[-1].strip('![]()')
if media_file.endswith('.b64'):
media_content = display_base64_media(media_file, width="100px")
elif media_file.endswith(('.png', '.jpg')):
media_content = f""
elif media_file.endswith('.mp4'):
media_content = get_video_html(media_file)
elif media_file.endswith('.pdf'):
media_content = f"π {os.path.basename(media_file)}"
minute_output += f"- π¬ **{user}**: {msg} {audio_html} {media_content}\n"
markdown_output += minute_output
st.markdown(markdown_output, unsafe_allow_html=True)
# Mermaid Graph Visualization
st.markdown("###### Chat Relationship Tree π³")
mermaid_code = generate_mermaid_graph(chat_lines)
mermaid_html = f"""