awacke1's picture
Update app.py
3d01981 verified
raw
history blame
40.6 kB
import streamlit as st
import asyncio
import websockets
import uuid
import argparse
from datetime import datetime
import os
import random
import time
import hashlib
from PIL import Image
import glob
import base64
import io
import streamlit.components.v1 as components
import edge_tts
from audio_recorder_streamlit import audio_recorder
import nest_asyncio
import re
from streamlit_paste_button import paste_image_button
import pytz
import shutil
# Patch for nested async - sneaky fix! 🐍✨
nest_asyncio.apply()
# Static config - constants rule! πŸ“πŸ‘‘
icons = 'πŸ€–πŸ§ πŸ”¬πŸ“'
START_ROOM = "Sector 🌌"
# Page setup - dressing up the window! πŸ–ΌοΈπŸŽ€
st.set_page_config(
page_title="πŸ€–πŸ§ MMO Chat BrainπŸ“πŸ”¬",
page_icon=icons,
layout="wide",
initial_sidebar_state="auto"
)
# Funky usernames with corresponding Edge TTS voices
FUN_USERNAMES = {
"CosmicJester 🌌": "en-US-AriaNeural",
"PixelPanda 🐼": "en-US-JennyNeural",
"QuantumQuack πŸ¦†": "en-GB-SoniaNeural",
"StellarSquirrel 🐿️": "en-AU-NatashaNeural",
"GizmoGuru βš™οΈ": "en-CA-ClaraNeural",
"NebulaNinja 🌠": "en-US-GuyNeural",
"ByteBuster πŸ’Ύ": "en-GB-RyanNeural",
"GalacticGopher 🌍": "en-AU-WilliamNeural",
"RocketRaccoon πŸš€": "en-CA-LiamNeural",
"EchoElf 🧝": "en-US-AnaNeural",
"PhantomFox 🦊": "en-US-BrandonNeural",
"WittyWizard πŸ§™": "en-GB-ThomasNeural",
"LunarLlama πŸŒ™": "en-AU-FreyaNeural",
"SolarSloth β˜€οΈ": "en-CA-LindaNeural",
"AstroAlpaca πŸ¦™": "en-US-ChristopherNeural",
"CyberCoyote 🐺": "en-GB-ElliotNeural",
"MysticMoose 🦌": "en-AU-JamesNeural",
"GlitchGnome 🧚": "en-CA-EthanNeural",
"VortexViper 🐍": "en-US-AmberNeural",
"ChronoChimp πŸ’": "en-GB-LibbyNeural"
}
# Folders galore - organizing chaos! πŸ“‚πŸŒ€
CHAT_DIR = "chat_logs"
VOTE_DIR = "vote_logs"
STATE_FILE = "user_state.txt"
AUDIO_DIR = "audio_logs"
HISTORY_DIR = "history_logs"
MEDIA_DIR = "media_files"
os.makedirs(CHAT_DIR, exist_ok=True)
os.makedirs(VOTE_DIR, exist_ok=True)
os.makedirs(AUDIO_DIR, exist_ok=True)
os.makedirs(HISTORY_DIR, exist_ok=True)
os.makedirs(MEDIA_DIR, exist_ok=True)
CHAT_FILE = os.path.join(CHAT_DIR, "global_chat.md")
QUOTE_VOTES_FILE = os.path.join(VOTE_DIR, "quote_votes.md")
MEDIA_VOTES_FILE = os.path.join(VOTE_DIR, "media_votes.md")
HISTORY_FILE = os.path.join(HISTORY_DIR, "chat_history.md")
# Fancy digits - numbers got style! πŸ”’πŸ’ƒ
UNICODE_DIGITS = {i: f"{i}\uFE0F⃣" for i in range(10)}
# Massive font collection - typography bonanza! πŸ–‹οΈπŸŽ¨
UNICODE_FONTS = [
("Normal", lambda x: x),
("Bold", lambda x: "".join(chr(ord(c) + 0x1D400 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D41A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Italic", lambda x: "".join(chr(ord(c) + 0x1D434 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D44E - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D468 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D482 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Script", lambda x: "".join(chr(ord(c) + 0x1D49C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D4B6 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Bold Script", lambda x: "".join(chr(ord(c) + 0x1D4D0 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D4EA - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Fraktur", lambda x: "".join(chr(ord(c) + 0x1D504 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D51E - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Bold Fraktur", lambda x: "".join(chr(ord(c) + 0x1D56C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D586 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Double Struck", lambda x: "".join(chr(ord(c) + 0x1D538 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D552 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif", lambda x: "".join(chr(ord(c) + 0x1D5A0 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D5BA - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif Bold", lambda x: "".join(chr(ord(c) + 0x1D5D4 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D5EE - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif Italic", lambda x: "".join(chr(ord(c) + 0x1D608 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D622 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Sans Serif Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D63C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D656 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Monospace", lambda x: "".join(chr(ord(c) + 0x1D670 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D68A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
("Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x24B6) if 'A' <= c <= 'Z' else chr(ord(c) - 0x61 + 0x24D0) if 'a' <= c <= 'z' else c for c in x)),
("Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F130) if 'A' <= c <= 'Z' else c for c in x)),
("Negative Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F150) if 'A' <= c <= 'Z' else c for c in x)),
("Negative Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F170) if 'A' <= c <= 'Z' else c for c in x)),
("Regional Indicator", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F1E6) if 'A' <= c <= 'Z' else c for c in x)),
]
# Global state - keeping tabs! πŸŒπŸ“‹
if 'server_running' not in st.session_state:
st.session_state.server_running = False
if 'server_task' not in st.session_state:
st.session_state.server_task = None
if 'active_connections' not in st.session_state:
st.session_state.active_connections = {}
if 'media_notifications' not in st.session_state:
st.session_state.media_notifications = []
if 'last_chat_update' not in st.session_state:
st.session_state.last_chat_update = 0
if 'displayed_chat_lines' not in st.session_state:
st.session_state.displayed_chat_lines = []
if 'old_val' not in st.session_state:
st.session_state.old_val = ""
if 'last_query' not in st.session_state:
st.session_state.last_query = ""
if 'message_text' not in st.session_state:
st.session_state.message_text = ""
if 'audio_cache' not in st.session_state:
st.session_state.audio_cache = {}
if 'pasted_image_data' not in st.session_state:
st.session_state.pasted_image_data = None
if 'quote_line' not in st.session_state:
st.session_state.quote_line = None
if 'refresh_rate' not in st.session_state:
st.session_state.refresh_rate = 5
if 'base64_cache' not in st.session_state:
st.session_state.base64_cache = {}
if 'transcript_history' not in st.session_state:
st.session_state.transcript_history = []
if 'last_transcript' not in st.session_state:
st.session_state.last_transcript = ""
if 'image_hashes' not in st.session_state:
st.session_state.image_hashes = set()
if 'gallery_columns' not in st.session_state:
st.session_state.gallery_columns = 1 # Default gallery tiles
# Timestamp wizardry - clock ticks with flair! ⏰🎩
def format_timestamp_prefix(username):
central = pytz.timezone('US/Central')
now = datetime.now(central)
return f"{now.strftime('%I-%M-%p-ct-%m-%d-%Y')}-by-{username}"
# Compute image hash from binary data
def compute_image_hash(image_data):
if isinstance(image_data, Image.Image):
img_byte_arr = io.BytesIO()
image_data.save(img_byte_arr, format='PNG')
img_bytes = img_byte_arr.getvalue()
else:
img_bytes = image_data
return hashlib.md5(img_bytes).hexdigest()[:8]
# Node naming - christening the beast! 🌐🍼
def get_node_name():
parser = argparse.ArgumentParser(description='Start a chat node with a specific name')
parser.add_argument('--node-name', type=str, default=None)
parser.add_argument('--port', type=int, default=8501)
args = parser.parse_args()
username = st.session_state.get('username', 'System 🌟')
log_action(username, "🌐🍼 - Node naming - christening the beast!")
return args.node_name or f"node-{uuid.uuid4().hex[:8]}", args.port
# Action logger - spying on deeds! πŸ•΅οΈπŸ“œ
def log_action(username, action):
if 'action_log' not in st.session_state:
st.session_state.action_log = {}
user_log = st.session_state.action_log.setdefault(username, {})
current_time = time.time()
user_log = {k: v for k, v in user_log.items() if current_time - v < 10}
st.session_state.action_log[username] = user_log
if action not in user_log:
central = pytz.timezone('US/Central')
with open(HISTORY_FILE, 'a') as f:
f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: {action}\n")
user_log[action] = current_time
# Clean text - strip the fancy stuff! πŸ§ΉπŸ“
def clean_text_for_tts(text):
cleaned = re.sub(r'[#*!\[\]]+', '', text)
cleaned = ' '.join(cleaned.split())
return cleaned[:200] if cleaned else "No text to speak"
# Chat saver - words locked tight! πŸ’¬πŸ”’
async def save_chat_entry(username, message, is_markdown=False):
await asyncio.to_thread(log_action, username, "πŸ’¬πŸ”’ - Chat saver - words locked tight!")
central = pytz.timezone('US/Central')
timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
if is_markdown:
entry = f"[{timestamp}] {username}:\n```markdown\n{message}\n```"
else:
entry = f"[{timestamp}] {username}: {message}"
await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"{entry}\n"))
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
cleaned_message = clean_text_for_tts(message)
audio_file = await async_edge_tts_generate(cleaned_message, voice)
if audio_file:
with open(HISTORY_FILE, 'a') as f:
f.write(f"[{timestamp}] {username} ({voice}): Audio generated - {audio_file}\n")
await broadcast_message(f"{username}|{message}", "chat")
st.session_state.last_chat_update = time.time()
return audio_file
# Save chat history with image
async def save_chat_history_with_image(username, image_path):
central = pytz.timezone('US/Central')
timestamp = datetime.now(central).strftime("%Y-%m-%d_%H-%M-%S")
history_filename = f"chat_history_{timestamp}-by-{username}.md"
history_filepath = os.path.join(HISTORY_DIR, history_filename)
chat_content = await load_chat()
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
with open(history_filepath, 'w') as f:
f.write(f"# Chat History at {timestamp} by {username} (Voice: {voice})\n\n")
f.write(f"## Image Shared: {os.path.basename(image_path)}\n")
f.write(chat_content)
return history_filepath
# Chat loader - history unleashed! πŸ“œπŸš€
async def load_chat():
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ“œπŸš€ - Chat loader - history unleashed!")
if not os.path.exists(CHAT_FILE):
await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"# {START_ROOM} Chat\n\nWelcome to the cosmic hub - start chatting! 🎀\n"))
with open(CHAT_FILE, 'r') as f:
content = await asyncio.to_thread(f.read)
return content
# User lister - who’s in the gang! πŸ‘₯πŸŽ‰
async def get_user_list(chat_content):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ‘₯πŸŽ‰ - User lister - who’s in the gang!")
users = set()
for line in chat_content.split('\n'):
if line.strip() and ': ' in line:
user = line.split(': ')[1].split(' ')[0]
users.add(user)
return sorted(list(users))
# Join checker - been here before? πŸšͺπŸ”
async def has_joined_before(client_id, chat_content):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸšͺπŸ” - Join checker - been here before?")
return any(f"Client-{client_id}" in line for line in chat_content.split('\n'))
# Suggestion maker - old quips resurface! πŸ’‘πŸ“
async def get_message_suggestions(chat_content, prefix):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ’‘πŸ“ - Suggestion maker - old quips resurface!")
lines = chat_content.split('\n')
messages = [line.split(': ', 1)[1] for line in lines if ': ' in line and line.strip()]
return [msg for msg in messages if msg.lower().startswith(prefix.lower())][:5]
# Vote saver - cheers recorded! πŸ‘πŸ“Š
async def save_vote(file, item, user_hash, username, comment=""):
await asyncio.to_thread(log_action, username, "πŸ‘πŸ“Š - Vote saver - cheers recorded!")
central = pytz.timezone('US/Central')
timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
entry = f"[{timestamp}] {user_hash} voted for {item}"
await asyncio.to_thread(lambda: open(file, 'a').write(f"{entry}\n"))
await asyncio.to_thread(lambda: open(HISTORY_FILE, "a").write(f"- {timestamp} - User {user_hash} voted for {item}\n"))
chat_message = f"{username} upvoted: \"{item}\""
if comment:
chat_message += f" - {comment}"
await save_chat_entry(username, chat_message)
# Vote counter - tallying the love! πŸ†πŸ“ˆ
async def load_votes(file):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ†πŸ“ˆ - Vote counter - tallying the love!")
if not os.path.exists(file):
await asyncio.to_thread(lambda: open(file, 'w').write("# Vote Tally\n\nNo votes yet - get clicking! πŸ–±οΈ\n"))
with open(file, 'r') as f:
content = await asyncio.to_thread(f.read)
lines = content.strip().split('\n')[2:]
votes = {}
user_votes = set()
for line in lines:
if line.strip() and 'voted for' in line:
user_hash = line.split('] ')[1].split(' voted for ')[0]
item = line.split('voted for ')[1]
vote_key = f"{user_hash}-{item}"
if vote_key not in user_votes:
votes[item] = votes.get(item, 0) + 1
user_votes.add(vote_key)
return votes
# Hash generator - secret codes ahoy! πŸ”‘πŸ•΅οΈ
async def generate_user_hash():
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ”‘πŸ•΅οΈ - Hash generator - secret codes ahoy!")
if 'user_hash' not in st.session_state:
st.session_state.user_hash = hashlib.md5(str(random.getrandbits(128)).encode()).hexdigest()[:8]
return st.session_state.user_hash
# Audio maker - voices come alive! 🎢🌟
async def async_edge_tts_generate(text, voice, rate=0, pitch=0, file_format="mp3"):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "🎢🌟 - Audio maker - voices come alive!")
timestamp = format_timestamp_prefix(username)
filename = f"{timestamp}.{file_format}"
filepath = os.path.join(AUDIO_DIR, filename)
communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
try:
await communicate.save(filepath)
return filepath if os.path.exists(filepath) else None
except edge_tts.exceptions.NoAudioReceived:
with open(HISTORY_FILE, 'a') as f:
central = pytz.timezone('US/Central')
f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: Audio failed - No audio received for '{text}'\n")
return None
# Audio player - tunes blast off! πŸ”ŠπŸš€
def play_and_download_audio(file_path):
if file_path and os.path.exists(file_path):
st.audio(file_path)
if file_path not in st.session_state.base64_cache:
with open(file_path, "rb") as f:
b64 = base64.b64encode(f.read()).decode()
st.session_state.base64_cache[file_path] = b64
b64 = st.session_state.base64_cache[file_path]
dl_link = f'<a href="data:audio/mpeg;base64,{b64}" download="{os.path.basename(file_path)}">🎡 Download {os.path.basename(file_path)}</a>'
st.markdown(dl_link, unsafe_allow_html=True)
# Image saver - pics preserved with naming! πŸ“ΈπŸ’Ύ
async def save_pasted_image(image, username):
await asyncio.to_thread(log_action, username, "πŸ“ΈπŸ’Ύ - Image saver - pics preserved!")
img_hash = compute_image_hash(image)
if img_hash in st.session_state.image_hashes:
return None
timestamp = format_timestamp_prefix(username)
filename = f"{timestamp}-{img_hash}.png"
filepath = os.path.join(MEDIA_DIR, filename)
await asyncio.to_thread(image.save, filepath, "PNG")
st.session_state.image_hashes.add(img_hash)
await save_chat_history_with_image(username, filepath)
return filepath
# Video renderer verbatim from your last version - movies roll with autoplay! πŸŽ₯🎬
def get_video_html(video_path, width="100%"):
video_url = f"data:video/mp4;base64,{base64.b64encode(open(video_path, 'rb').read()).decode()}"
return f'''
<video width="{width}" controls autoplay muted loop>
<source src="{video_url}" type="video/mp4">
Your browser does not support the video tag.
</video>
'''
# Audio renderer - sounds soar! 🎢✈️
async def get_audio_html(audio_path, width="100%"):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "🎢✈️ - Audio renderer - sounds soar!")
audio_url = f"data:audio/mpeg;base64,{base64.b64encode(await asyncio.to_thread(open, audio_path, 'rb').read()).decode()}"
return f'<audio controls style="width: {width};"><source src="{audio_url}" type="audio/mpeg">Your browser does not support the audio element.</audio>'
# Websocket handler - chat links up! πŸŒπŸ”—
async def websocket_handler(websocket, path):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸŒπŸ”— - Websocket handler - chat links up!")
try:
client_id = str(uuid.uuid4())
room_id = "chat"
st.session_state.active_connections.setdefault(room_id, {})[client_id] = websocket
chat_content = await load_chat()
username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys())))
if not await has_joined_before(client_id, chat_content):
await save_chat_entry(f"Client-{client_id}", f"{username} has joined {START_ROOM}!")
async for message in websocket:
parts = message.split('|', 1)
if len(parts) == 2:
username, content = parts
await save_chat_entry(username, content)
except websockets.ConnectionClosed:
pass
finally:
if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]:
del st.session_state.active_connections[room_id][client_id]
# Message broadcaster - words fly far! πŸ“’βœˆοΈ
async def broadcast_message(message, room_id):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ“’βœˆοΈ - Message broadcaster - words fly far!")
if room_id in st.session_state.active_connections:
disconnected = []
for client_id, ws in st.session_state.active_connections[room_id].items():
try:
await ws.send(message)
except websockets.ConnectionClosed:
disconnected.append(client_id)
for client_id in disconnected:
del st.session_state.active_connections[room_id][client_id]
# Server starter - web spins up! πŸ–₯οΈπŸŒ€
async def run_websocket_server():
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸ–₯οΈπŸŒ€ - Server starter - web spins up!")
if not st.session_state.server_running:
server = await websockets.serve(websocket_handler, '0.0.0.0', 8765)
st.session_state.server_running = True
await server.wait_closed()
# Voice processor - speech to text! πŸŽ€πŸ“
async def process_voice_input(audio_bytes):
username = st.session_state.get('username', 'System 🌟')
await asyncio.to_thread(log_action, username, "πŸŽ€πŸ“ - Voice processor - speech to text!")
if audio_bytes:
text = "Voice input simulation"
await save_chat_entry(username, text)
# Dummy AI lookup function (replace with actual implementation)
async def perform_ai_lookup(query, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False, useArxiv=True, useArxivAudio=False):
username = st.session_state.get('username', 'System 🌟')
result = f"AI Lookup Result for '{query}' (Arxiv: {useArxiv}, Audio: {useArxivAudio})"
await save_chat_entry(username, result)
if useArxivAudio:
audio_file = await async_edge_tts_generate(result, st.session_state.voice)
if audio_file:
st.audio(audio_file)
# Delete all user files function
def delete_user_files():
protected_files = {'app.py', 'requirements.txt', 'README.md'}
deleted_files = []
directories = [MEDIA_DIR, AUDIO_DIR, CHAT_DIR, VOTE_DIR, HISTORY_DIR]
for directory in directories:
if os.path.exists(directory):
for root, _, files in os.walk(directory):
for file in files:
file_path = os.path.join(root, file)
if os.path.basename(file_path) not in protected_files:
try:
os.remove(file_path)
deleted_files.append(file_path)
except Exception as e:
st.error(f"Failed to delete {file_path}: {e}")
try:
shutil.rmtree(directory, ignore_errors=True)
os.makedirs(directory, exist_ok=True)
except Exception as e:
st.error(f"Failed to remove directory {directory}: {e}")
st.session_state.image_hashes.clear()
st.session_state.audio_cache.clear()
st.session_state.base64_cache.clear()
st.session_state.displayed_chat_lines.clear()
return deleted_files
# ASR Component HTML
ASR_HTML = """
<html>
<head>
<title>Continuous Speech Demo</title>
<style>
body {
font-family: sans-serif;
padding: 20px;
max-width: 800px;
margin: 0 auto;
}
button {
padding: 10px 20px;
margin: 10px 5px;
font-size: 16px;
}
#status {
margin: 10px 0;
padding: 10px;
background: #e8f5e9;
border-radius: 4px;
}
#output {
white-space: pre-wrap;
padding: 15px;
background: #f5f5f5;
border-radius: 4px;
margin: 10px 0;
min-height: 100px;
max-height: 400px;
overflow-y: auto;
}
.controls {
margin: 10px 0;
}
</style>
</head>
<body>
<div class="controls">
<button id="start">Start Listening</button>
<button id="stop" disabled>Stop Listening</button>
<button id="clear">Clear Text</button>
</div>
<div id="status">Ready</div>
<div id="output"></div>
<script>
if (!('webkitSpeechRecognition' in window)) {
alert('Speech recognition not supported');
} else {
const recognition = new webkitSpeechRecognition();
const startButton = document.getElementById('start');
const stopButton = document.getElementById('stop');
const clearButton = document.getElementById('clear');
const status = document.getElementById('status');
const output = document.getElementById('output');
let fullTranscript = '';
let lastUpdateTime = Date.now();
recognition.continuous = true;
recognition.interimResults = true;
const startRecognition = () => {
try {
recognition.start();
status.textContent = 'Listening...';
startButton.disabled = true;
stopButton.disabled = false;
} catch (e) {
console.error(e);
status.textContent = 'Error: ' + e.message;
}
};
window.addEventListener('load', () => {
setTimeout(startRecognition, 1000);
});
startButton.onclick = startRecognition;
stopButton.onclick = () => {
recognition.stop();
status.textContent = 'Stopped';
startButton.disabled = false;
stopButton.disabled = true;
};
clearButton.onclick = () => {
fullTranscript = '';
output.textContent = '';
sendDataToPython({value: '', dataType: "json"});
};
recognition.onresult = (event) => {
let interimTranscript = '';
let finalTranscript = '';
for (let i = event.resultIndex; i < event.results.length; i++) {
const transcript = event.results[i][0].transcript;
if (event.results[i].isFinal) {
finalTranscript += transcript + '\\n';
} else {
interimTranscript += transcript;
}
}
if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
if (finalTranscript) {
fullTranscript += finalTranscript;
}
lastUpdateTime = Date.now();
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
output.scrollTop = output.scrollHeight;
sendDataToPython({value: fullTranscript, dataType: "json"});
}
};
recognition.onend = () => {
if (!stopButton.disabled) {
try {
recognition.start();
console.log('Restarted recognition');
} catch (e) {
console.error('Failed to restart recognition:', e);
status.textContent = 'Error restarting: ' + e.message;
startButton.disabled = false;
stopButton.disabled = true;
}
}
};
recognition.onerror = (event) => {
console.error('Recognition error:', event.error);
status.textContent = 'Error: ' + event.error;
if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
startButton.disabled = false;
stopButton.disabled = true;
}
};
}
function sendDataToPython(data) {
window.parent.postMessage({
isStreamlitMessage: true,
type: "streamlit:setComponentValue",
...data
}, "*");
}
window.addEventListener('load', function() {
window.setTimeout(function() {
window.parent.postMessage({
isStreamlitMessage: true,
type: "streamlit:setFrameHeight",
height: document.documentElement.clientHeight
}, "*");
}, 0);
});
</script>
</body>
</html>
"""
# Main execution - let’s roll! πŸŽ²πŸš€
def main():
NODE_NAME, port = get_node_name()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def async_interface():
if 'username' not in st.session_state:
chat_content = await load_chat()
available_names = [name for name in FUN_USERNAMES if not any(f"{name} has joined" in line for line in chat_content.split('\n'))]
st.session_state.username = random.choice(available_names) if available_names else random.choice(list(FUN_USERNAMES.keys()))
st.session_state.voice = FUN_USERNAMES[st.session_state.username]
st.markdown(f"**πŸŽ™οΈ Voice Selected**: {st.session_state.voice} πŸ—£οΈ for {st.session_state.username}")
st.title(f"πŸ€–πŸ§ MMO {st.session_state.username}πŸ“πŸ”¬")
st.markdown(f"Welcome to {START_ROOM} - chat, vote, upload, paste images, and enjoy quoting! πŸŽ‰")
if not st.session_state.server_task:
st.session_state.server_task = loop.create_task(run_websocket_server())
audio_bytes = audio_recorder()
if audio_bytes:
await process_voice_input(audio_bytes)
st.rerun()
# Continuous Speech Input (ASR)
st.subheader("🎀 Continuous Speech Input")
asr_component = components.html(ASR_HTML, height=400)
if asr_component and isinstance(asr_component, dict) and 'value' in asr_component:
transcript = asr_component['value'].strip()
if transcript and transcript != st.session_state.last_transcript:
st.session_state.transcript_history.append(transcript)
await save_chat_entry(st.session_state.username, transcript, is_markdown=True)
st.session_state.last_transcript = transcript
st.rerun()
# Unified Chat History at Top
st.subheader(f"{START_ROOM} Chat History πŸ’¬")
chat_content = await load_chat()
chat_lines = chat_content.split('\n')
chat_lines = [line for line in chat_lines if line.strip() and ': ' in line and not line.startswith('#')]
if chat_lines:
col1, col2 = st.columns([2, 1])
with col1:
st.write("### Text & Audio Chat")
for i, line in enumerate(reversed(chat_lines)): # Descending order
col_text, col_audio = st.columns([3, 1])
with col_text:
if "```markdown" in line:
markdown_content = re.search(r'```markdown\n(.*?)```', line, re.DOTALL)
if markdown_content:
st.markdown(markdown_content.group(1))
else:
st.markdown(line)
else:
st.markdown(line)
with col_audio:
username = line.split(': ')[1].split(' ')[0]
cache_key = f"{line}_{FUN_USERNAMES.get(username, 'en-US-AriaNeural')}"
if cache_key not in st.session_state.audio_cache:
cleaned_text = clean_text_for_tts(line.split(': ', 1)[1])
voice = st.session_state.voice if username == st.session_state.username else FUN_USERNAMES.get(username, "en-US-AriaNeural")
audio_file = await async_edge_tts_generate(cleaned_text, voice)
st.session_state.audio_cache[cache_key] = audio_file
audio_file = st.session_state.audio_cache.get(cache_key)
if audio_file:
play_and_download_audio(audio_file)
if st.session_state.quote_line:
st.markdown(f"### Quoting: {st.session_state.quote_line}")
quote_response = st.text_area("Add your response", key="quote_response", value=st.session_state.message_text)
paste_result_quote = paste_image_button("πŸ“‹ Paste Image or Text with Quote", key="paste_button_quote")
if paste_result_quote.image_data is not None:
if isinstance(paste_result_quote.image_data, str):
st.session_state.message_text = paste_result_quote.image_data
st.text_area("Add your response", key="quote_response", value=st.session_state.message_text)
else:
st.image(paste_result_quote.image_data, caption="Received Image for Quote")
filename = await save_pasted_image(paste_result_quote.image_data, st.session_state.username)
if filename:
st.session_state.pasted_image_data = filename
if st.button("Send Quote πŸš€", key="send_quote"):
markdown_response = f"### Quote Response\n- **Original**: {st.session_state.quote_line}\n- **{st.session_state.username} Replies**: {quote_response}"
if st.session_state.pasted_image_data:
markdown_response += f"\n- **Image**: ![Pasted Image]({st.session_state.pasted_image_data})"
await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")
st.session_state.pasted_image_data = None
await save_chat_entry(st.session_state.username, markdown_response, is_markdown=True)
st.session_state.quote_line = None
st.session_state.message_text = ''
st.rerun()
current_selection = st.session_state.username if st.session_state.username in FUN_USERNAMES else ""
new_username = st.selectbox("Change Name and Voice", [""] + list(FUN_USERNAMES.keys()), index=(list(FUN_USERNAMES.keys()).index(current_selection) + 1 if current_selection else 0), format_func=lambda x: f"{x} ({FUN_USERNAMES.get(x, 'No Voice')})" if x else "Select a name")
if new_username and new_username != st.session_state.username:
await save_chat_entry("System 🌟", f"{st.session_state.username} changed name to {new_username}")
st.session_state.username = new_username
st.session_state.voice = FUN_USERNAMES[new_username]
st.markdown(f"**πŸŽ™οΈ Voice Changed**: {st.session_state.voice} πŸ—£οΈ for {st.session_state.username}")
st.rerun()
# Message input with Send button on the right
col_input, col_send = st.columns([5, 1])
with col_input:
message = st.text_input(f"Message as {st.session_state.username} (Voice: {st.session_state.voice})", key="message_input", value=st.session_state.message_text)
with col_send:
if st.button("Send πŸš€", key="send_button"):
if message.strip():
audio_file = await save_chat_entry(st.session_state.username, message, is_markdown=True)
if audio_file:
st.session_state.audio_cache[f"{message}_{FUN_USERNAMES[st.session_state.username]}"] = audio_file
st.audio(audio_file) # Immediate preview
if st.session_state.pasted_image_data:
await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")
st.session_state.pasted_image_data = None
st.session_state.message_text = ''
st.rerun()
paste_result_msg = paste_image_button("πŸ“‹ Paste Image or Text with Message", key="paste_button_msg")
if paste_result_msg.image_data is not None:
if isinstance(paste_result_msg.image_data, str):
st.session_state.message_text = paste_result_msg.image_data
st.text_input(f"Message as {st.session_state.username} (Voice: {st.session_state.voice})", key="message_input_paste", value=st.session_state.message_text)
else:
st.image(paste_result_msg.image_data, caption="Received Image for Message")
filename = await save_pasted_image(paste_result_msg.image_data, st.session_state.username)
if filename:
st.session_state.pasted_image_data = filename
tab_main = st.radio("Action:", ["🎀 Voice", "πŸ“Έ Media", "πŸ” ArXiv", "πŸ“ Editor"], horizontal=True)
useArxiv = st.checkbox("Search Arxiv for Research Paper Answers", value=True)
useArxivAudio = st.checkbox("Generate Audio File for Research Paper Answers", value=False)
st.subheader("Upload Media 🎨🎢πŸŽ₯")
uploaded_file = st.file_uploader("Upload Media", type=['png', 'jpg', 'mp4', 'mp3'])
if uploaded_file:
timestamp = format_timestamp_prefix(st.session_state.username)
username = st.session_state.username
ext = uploaded_file.name.split('.')[-1]
file_hash = hashlib.md5(uploaded_file.getbuffer()).hexdigest()[:8]
if file_hash not in st.session_state.image_hashes:
filename = f"{timestamp}-{file_hash}.{ext}"
file_path = os.path.join(MEDIA_DIR, filename)
await asyncio.to_thread(lambda: open(file_path, 'wb').write(uploaded_file.getbuffer()))
st.success(f"Uploaded {filename}")
await save_chat_entry(username, f"Uploaded media: {file_path}")
await save_chat_history_with_image(username, file_path)
st.session_state.image_hashes.add(file_hash)
if file_path.endswith('.mp4'):
st.session_state.media_notifications.append(file_path)
# Big Red Delete Button
st.subheader("πŸ›‘ Danger Zone")
if st.button("Try Not To Delete It All On Your First Day", key="delete_all", help="Deletes all user-added files!", type="primary", use_container_width=True):
deleted_files = delete_user_files()
if deleted_files:
st.markdown("### πŸ—‘οΈ Deleted Files:\n" + "\n".join([f"- `{file}`" for file in deleted_files]))
else:
st.markdown("### πŸ—‘οΈ Nothing to Delete!")
st.rerun()
st.subheader("Refresh ⏳")
refresh_rate = st.slider("Refresh Rate", 1, 300, st.session_state.refresh_rate)
st.session_state.refresh_rate = refresh_rate
timer_placeholder = st.empty()
for i in range(st.session_state.refresh_rate, -1, -1):
font_name, font_func = random.choice(UNICODE_FONTS)
countdown_str = "".join(UNICODE_DIGITS[int(d)] for d in str(i)) if i < 10 else font_func(str(i))
timer_placeholder.markdown(f"<p class='timer'>⏳ {font_func('Refresh in:')} {countdown_str}</p>", unsafe_allow_html=True)
time.sleep(1)
st.rerun()
# Gallery with Adjustable Tiles
st.subheader("Media Gallery 🎨🎢πŸŽ₯")
gallery_columns = st.slider("Number of Gallery Tiles", 1, 20, st.session_state.gallery_columns)
st.session_state.gallery_columns = gallery_columns
media_files = glob.glob(f"{MEDIA_DIR}/*.png") + glob.glob(f"{MEDIA_DIR}/*.jpg") + glob.glob(f"{MEDIA_DIR}/*.mp4")
if media_files:
media_votes = await load_votes(MEDIA_VOTES_FILE)
seen_files = set()
cols = st.columns(gallery_columns)
col_idx = 0
for media_file in sorted(media_files, key=os.path.getmtime, reverse=True):
if media_file not in seen_files:
seen_files.add(media_file)
with cols[col_idx]:
filename = os.path.basename(media_file)
vote_count = media_votes.get(media_file, 0)
st.markdown(f"**{filename}**")
if media_file.endswith(('.png', '.jpg')):
st.image(media_file, use_container_width=True)
elif media_file.endswith('.mp4'):
st.markdown(get_video_html(media_file), unsafe_allow_html=True) # Verbatim autoplay
if st.button(f"πŸ‘ {vote_count}", key=f"media_vote_{media_file}"):
await save_vote(MEDIA_VOTES_FILE, media_file, await generate_user_hash(), st.session_state.username)
st.rerun()
col_idx = (col_idx + 1) % gallery_columns
# Full Log at End
st.subheader("Full Chat Log πŸ“œ")
with open(HISTORY_FILE, 'r') as f:
history_content = f.read()
st.markdown(history_content)
loop.run_until_complete(async_interface())
if __name__ == "__main__":
main()