Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,7 @@ import time
|
|
10 |
import hashlib
|
11 |
from PIL import Image
|
12 |
import glob
|
|
|
13 |
import base64
|
14 |
import io
|
15 |
import streamlit.components.v1 as components
|
@@ -17,9 +18,6 @@ import edge_tts
|
|
17 |
from audio_recorder_streamlit import audio_recorder
|
18 |
import nest_asyncio
|
19 |
import re
|
20 |
-
from streamlit_paste_button import paste_image_button
|
21 |
-
import pytz
|
22 |
-
import shutil
|
23 |
|
24 |
# Patch for nested async - sneaky fix! πβ¨
|
25 |
nest_asyncio.apply()
|
@@ -38,26 +36,26 @@ st.set_page_config(
|
|
38 |
|
39 |
# Funky usernames - whoβs who in the zoo with unique voices! ππΎποΈ
|
40 |
FUN_USERNAMES = {
|
41 |
-
"CosmicJester π": "en-US-AriaNeural",
|
42 |
-
"PixelPanda πΌ": "en-US-JennyNeural",
|
43 |
-
"QuantumQuack π¦": "en-GB-SoniaNeural",
|
44 |
-
"StellarSquirrel πΏοΈ": "en-AU-NatashaNeural",
|
45 |
-
"GizmoGuru βοΈ": "en-CA-ClaraNeural",
|
46 |
-
"NebulaNinja π ": "en-US-GuyNeural",
|
47 |
-
"ByteBuster πΎ": "en-GB-RyanNeural",
|
48 |
-
"GalacticGopher π": "en-AU-WilliamNeural",
|
49 |
-
"RocketRaccoon π": "en-CA-LiamNeural",
|
50 |
-
"EchoElf π§": "en-US-AnaNeural",
|
51 |
-
"PhantomFox π¦": "en-US-BrandonNeural",
|
52 |
-
"WittyWizard π§": "en-GB-ThomasNeural",
|
53 |
-
"LunarLlama π": "en-AU-FreyaNeural",
|
54 |
-
"SolarSloth βοΈ": "en-CA-LindaNeural",
|
55 |
-
"AstroAlpaca π¦": "en-US-ChristopherNeural",
|
56 |
-
"CyberCoyote πΊ": "en-GB-ElliotNeural",
|
57 |
-
"MysticMoose π¦": "en-AU-JamesNeural",
|
58 |
-
"GlitchGnome π§": "en-CA-EthanNeural",
|
59 |
-
"VortexViper π": "en-US-AmberNeural",
|
60 |
-
"ChronoChimp π": "en-GB-LibbyNeural"
|
61 |
}
|
62 |
|
63 |
# Folders galore - organizing chaos! ππ
|
@@ -66,12 +64,10 @@ VOTE_DIR = "vote_logs"
|
|
66 |
STATE_FILE = "user_state.txt"
|
67 |
AUDIO_DIR = "audio_logs"
|
68 |
HISTORY_DIR = "history_logs"
|
69 |
-
MEDIA_DIR = "media_files"
|
70 |
os.makedirs(CHAT_DIR, exist_ok=True)
|
71 |
os.makedirs(VOTE_DIR, exist_ok=True)
|
72 |
os.makedirs(AUDIO_DIR, exist_ok=True)
|
73 |
os.makedirs(HISTORY_DIR, exist_ok=True)
|
74 |
-
os.makedirs(MEDIA_DIR, exist_ok=True)
|
75 |
|
76 |
CHAT_FILE = os.path.join(CHAT_DIR, "global_chat.md")
|
77 |
QUOTE_VOTES_FILE = os.path.join(VOTE_DIR, "quote_votes.md")
|
@@ -100,7 +96,7 @@ UNICODE_FONTS = [
|
|
100 |
("Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x24B6) if 'A' <= c <= 'Z' else chr(ord(c) - 0x61 + 0x24D0) if 'a' <= c <= 'z' else c for c in x)),
|
101 |
("Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F130) if 'A' <= c <= 'Z' else c for c in x)),
|
102 |
("Negative Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F150) if 'A' <= c <= 'Z' else c for c in x)),
|
103 |
-
("Negative Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F170) if 'A' <= c <= 'Z' else c for c in x)),
|
104 |
("Regional Indicator", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F1E6) if 'A' <= c <= 'Z' else c for c in x)),
|
105 |
]
|
106 |
|
@@ -111,52 +107,10 @@ if 'server_task' not in st.session_state:
|
|
111 |
st.session_state.server_task = None
|
112 |
if 'active_connections' not in st.session_state:
|
113 |
st.session_state.active_connections = {}
|
114 |
-
if 'media_notifications' not in st.session_state:
|
115 |
-
st.session_state.media_notifications = []
|
116 |
-
if 'last_chat_update' not in st.session_state:
|
117 |
-
st.session_state.last_chat_update = 0
|
118 |
-
if 'displayed_chat_lines' not in st.session_state:
|
119 |
-
st.session_state.displayed_chat_lines = []
|
120 |
-
if 'old_val' not in st.session_state:
|
121 |
-
st.session_state.old_val = ""
|
122 |
-
if 'last_query' not in st.session_state:
|
123 |
-
st.session_state.last_query = ""
|
124 |
-
if 'message_text' not in st.session_state:
|
125 |
-
st.session_state.message_text = ""
|
126 |
-
if 'audio_cache' not in st.session_state:
|
127 |
-
st.session_state.audio_cache = {}
|
128 |
-
if 'pasted_image_data' not in st.session_state:
|
129 |
-
st.session_state.pasted_image_data = None
|
130 |
-
if 'quote_line' not in st.session_state:
|
131 |
-
st.session_state.quote_line = None
|
132 |
-
if 'refresh_rate' not in st.session_state:
|
133 |
-
st.session_state.refresh_rate = 5
|
134 |
-
if 'base64_cache' not in st.session_state:
|
135 |
-
st.session_state.base64_cache = {}
|
136 |
-
if 'transcript_history' not in st.session_state:
|
137 |
-
st.session_state.transcript_history = []
|
138 |
-
if 'last_transcript' not in st.session_state:
|
139 |
-
st.session_state.last_transcript = ""
|
140 |
-
if 'image_hashes' not in st.session_state:
|
141 |
-
st.session_state.image_hashes = set()
|
142 |
-
if 'gallery_columns' not in st.session_state:
|
143 |
-
st.session_state.gallery_columns = 1 # Default gallery tiles
|
144 |
|
145 |
# Timestamp wizardry - clock ticks with flair! β°π©
|
146 |
-
def format_timestamp_prefix(
|
147 |
-
|
148 |
-
now = datetime.now(central)
|
149 |
-
return f"{now.strftime('%I-%M-%p-ct-%m-%d-%Y')}-by-{username}"
|
150 |
-
|
151 |
-
# Compute image hash from binary data
|
152 |
-
def compute_image_hash(image_data):
|
153 |
-
if isinstance(image_data, Image.Image):
|
154 |
-
img_byte_arr = io.BytesIO()
|
155 |
-
image_data.save(img_byte_arr, format='PNG')
|
156 |
-
img_bytes = img_byte_arr.getvalue()
|
157 |
-
else:
|
158 |
-
img_bytes = image_data
|
159 |
-
return hashlib.md5(img_bytes).hexdigest()[:8]
|
160 |
|
161 |
# Node naming - christening the beast! ππΌ
|
162 |
def get_node_name():
|
@@ -177,50 +131,31 @@ def log_action(username, action):
|
|
177 |
user_log = {k: v for k, v in user_log.items() if current_time - v < 10}
|
178 |
st.session_state.action_log[username] = user_log
|
179 |
if action not in user_log:
|
180 |
-
central = pytz.timezone('US/Central')
|
181 |
with open(HISTORY_FILE, 'a') as f:
|
182 |
-
f.write(f"[{datetime.now(
|
183 |
user_log[action] = current_time
|
184 |
|
185 |
# Clean text - strip the fancy stuff! π§Ήπ
|
186 |
def clean_text_for_tts(text):
|
|
|
187 |
cleaned = re.sub(r'[#*!\[\]]+', '', text)
|
|
|
188 |
cleaned = ' '.join(cleaned.split())
|
|
|
189 |
return cleaned[:200] if cleaned else "No text to speak"
|
190 |
|
191 |
# Chat saver - words locked tight! π¬π
|
192 |
-
async def save_chat_entry(username, message
|
193 |
await asyncio.to_thread(log_action, username, "π¬π - Chat saver - words locked tight!")
|
194 |
-
|
195 |
-
|
196 |
-
if is_markdown:
|
197 |
-
entry = f"[{timestamp}] {username}:\n```markdown\n{message}\n```"
|
198 |
-
else:
|
199 |
-
entry = f"[{timestamp}] {username}: {message}"
|
200 |
await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"{entry}\n"))
|
201 |
voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
|
202 |
cleaned_message = clean_text_for_tts(message)
|
203 |
audio_file = await async_edge_tts_generate(cleaned_message, voice)
|
204 |
if audio_file:
|
205 |
with open(HISTORY_FILE, 'a') as f:
|
206 |
-
f.write(f"[{timestamp}] {username}
|
207 |
-
await broadcast_message(f"{username}|{message}", "chat")
|
208 |
-
st.session_state.last_chat_update = time.time()
|
209 |
-
return audio_file
|
210 |
-
|
211 |
-
# Save chat history with image
|
212 |
-
async def save_chat_history_with_image(username, image_path):
|
213 |
-
central = pytz.timezone('US/Central')
|
214 |
-
timestamp = datetime.now(central).strftime("%Y-%m-%d_%H-%M-%S")
|
215 |
-
history_filename = f"chat_history_{timestamp}-by-{username}.md"
|
216 |
-
history_filepath = os.path.join(HISTORY_DIR, history_filename)
|
217 |
-
chat_content = await load_chat()
|
218 |
-
voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
|
219 |
-
with open(history_filepath, 'w') as f:
|
220 |
-
f.write(f"# Chat History at {timestamp} by {username} (Voice: {voice})\n\n")
|
221 |
-
f.write(f"## Image Shared: {os.path.basename(image_path)}\n")
|
222 |
-
f.write(chat_content)
|
223 |
-
return history_filepath
|
224 |
|
225 |
# Chat loader - history unleashed! ππ
|
226 |
async def load_chat():
|
@@ -247,7 +182,7 @@ async def get_user_list(chat_content):
|
|
247 |
async def has_joined_before(client_id, chat_content):
|
248 |
username = st.session_state.get('username', 'System π')
|
249 |
await asyncio.to_thread(log_action, username, "πͺπ - Join checker - been here before?")
|
250 |
-
return any(f"Client-{client_id}" in line for line in chat_content.split('\n'))
|
251 |
|
252 |
# Suggestion maker - old quips resurface! π‘π
|
253 |
async def get_message_suggestions(chat_content, prefix):
|
@@ -260,8 +195,7 @@ async def get_message_suggestions(chat_content, prefix):
|
|
260 |
# Vote saver - cheers recorded! ππ
|
261 |
async def save_vote(file, item, user_hash, username, comment=""):
|
262 |
await asyncio.to_thread(log_action, username, "ππ - Vote saver - cheers recorded!")
|
263 |
-
|
264 |
-
timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
|
265 |
entry = f"[{timestamp}] {user_hash} voted for {item}"
|
266 |
await asyncio.to_thread(lambda: open(file, 'a').write(f"{entry}\n"))
|
267 |
await asyncio.to_thread(lambda: open(HISTORY_FILE, "a").write(f"- {timestamp} - User {user_hash} voted for {item}\n"))
|
@@ -303,53 +237,46 @@ async def generate_user_hash():
|
|
303 |
async def async_edge_tts_generate(text, voice, rate=0, pitch=0, file_format="mp3"):
|
304 |
username = st.session_state.get('username', 'System π')
|
305 |
await asyncio.to_thread(log_action, username, "πΆπ - Audio maker - voices come alive!")
|
306 |
-
timestamp = format_timestamp_prefix(
|
307 |
-
filename = f"{timestamp}.
|
308 |
-
filepath = os.path.join(AUDIO_DIR, filename)
|
309 |
communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
|
310 |
try:
|
311 |
-
await communicate.save(
|
312 |
-
return
|
313 |
except edge_tts.exceptions.NoAudioReceived:
|
314 |
with open(HISTORY_FILE, 'a') as f:
|
315 |
-
|
316 |
-
f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: Audio failed - No audio received for '{text}'\n")
|
317 |
return None
|
318 |
|
319 |
# Audio player - tunes blast off! ππ
|
320 |
def play_and_download_audio(file_path):
|
321 |
if file_path and os.path.exists(file_path):
|
322 |
st.audio(file_path)
|
323 |
-
|
324 |
-
|
325 |
-
b64 = base64.b64encode(f.read()).decode()
|
326 |
-
st.session_state.base64_cache[file_path] = b64
|
327 |
-
b64 = st.session_state.base64_cache[file_path]
|
328 |
dl_link = f'<a href="data:audio/mpeg;base64,{b64}" download="{os.path.basename(file_path)}">π΅ Download {os.path.basename(file_path)}</a>'
|
329 |
st.markdown(dl_link, unsafe_allow_html=True)
|
330 |
|
331 |
-
# Image saver - pics preserved
|
332 |
-
async def save_pasted_image(
|
|
|
333 |
await asyncio.to_thread(log_action, username, "πΈπΎ - Image saver - pics preserved!")
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
# Video renderer - movies roll with autoplay! π₯π¬
|
346 |
async def get_video_html(video_path, width="100%"):
|
347 |
username = st.session_state.get('username', 'System π')
|
348 |
await asyncio.to_thread(log_action, username, "π₯π¬ - Video renderer - movies roll!")
|
349 |
-
|
350 |
-
|
351 |
-
video_url = f"data:video/mp4;base64,{base64.b64encode(video_data).decode()}"
|
352 |
-
return f'<video width="{width}" controls autoplay><source src="{video_url}" type="video/mp4">Your browser does not support the video tag.</video>'
|
353 |
|
354 |
# Audio renderer - sounds soar! πΆβοΈ
|
355 |
async def get_audio_html(audio_path, width="100%"):
|
@@ -375,6 +302,7 @@ async def websocket_handler(websocket, path):
|
|
375 |
if len(parts) == 2:
|
376 |
username, content = parts
|
377 |
await save_chat_entry(username, content)
|
|
|
378 |
except websockets.ConnectionClosed:
|
379 |
pass
|
380 |
finally:
|
@@ -409,214 +337,11 @@ async def process_voice_input(audio_bytes):
|
|
409 |
username = st.session_state.get('username', 'System π')
|
410 |
await asyncio.to_thread(log_action, username, "π€π - Voice processor - speech to text!")
|
411 |
if audio_bytes:
|
412 |
-
text = "Voice input simulation"
|
413 |
await save_chat_entry(username, text)
|
414 |
|
415 |
-
#
|
416 |
-
|
417 |
-
username = st.session_state.get('username', 'System π')
|
418 |
-
result = f"AI Lookup Result for '{query}' (Arxiv: {useArxiv}, Audio: {useArxivAudio})"
|
419 |
-
await save_chat_entry(username, result)
|
420 |
-
if useArxivAudio:
|
421 |
-
audio_file = await async_edge_tts_generate(result, FUN_USERNAMES.get(username, "en-US-AriaNeural"))
|
422 |
-
if audio_file:
|
423 |
-
st.audio(audio_file)
|
424 |
-
|
425 |
-
# Delete all user files function
|
426 |
-
def delete_user_files():
|
427 |
-
protected_files = {'app.py', 'requirements.txt', 'README.md'}
|
428 |
-
deleted_files = []
|
429 |
-
directories = [MEDIA_DIR, AUDIO_DIR, CHAT_DIR, VOTE_DIR, HISTORY_DIR]
|
430 |
-
for directory in directories:
|
431 |
-
if os.path.exists(directory):
|
432 |
-
for root, _, files in os.walk(directory):
|
433 |
-
for file in files:
|
434 |
-
file_path = os.path.join(root, file)
|
435 |
-
if os.path.basename(file_path) not in protected_files:
|
436 |
-
try:
|
437 |
-
os.remove(file_path)
|
438 |
-
deleted_files.append(file_path)
|
439 |
-
except Exception as e:
|
440 |
-
st.error(f"Failed to delete {file_path}: {e}")
|
441 |
-
try:
|
442 |
-
shutil.rmtree(directory, ignore_errors=True)
|
443 |
-
os.makedirs(directory, exist_ok=True)
|
444 |
-
except Exception as e:
|
445 |
-
st.error(f"Failed to remove directory {directory}: {e}")
|
446 |
-
st.session_state.image_hashes.clear()
|
447 |
-
st.session_state.audio_cache.clear()
|
448 |
-
st.session_state.base64_cache.clear()
|
449 |
-
st.session_state.displayed_chat_lines.clear()
|
450 |
-
return deleted_files
|
451 |
-
|
452 |
-
# ASR Component HTML
|
453 |
-
ASR_HTML = """
|
454 |
-
<html>
|
455 |
-
<head>
|
456 |
-
<title>Continuous Speech Demo</title>
|
457 |
-
<style>
|
458 |
-
body {
|
459 |
-
font-family: sans-serif;
|
460 |
-
padding: 20px;
|
461 |
-
max-width: 800px;
|
462 |
-
margin: 0 auto;
|
463 |
-
}
|
464 |
-
button {
|
465 |
-
padding: 10px 20px;
|
466 |
-
margin: 10px 5px;
|
467 |
-
font-size: 16px;
|
468 |
-
}
|
469 |
-
#status {
|
470 |
-
margin: 10px 0;
|
471 |
-
padding: 10px;
|
472 |
-
background: #e8f5e9;
|
473 |
-
border-radius: 4px;
|
474 |
-
}
|
475 |
-
#output {
|
476 |
-
white-space: pre-wrap;
|
477 |
-
padding: 15px;
|
478 |
-
background: #f5f5f5;
|
479 |
-
border-radius: 4px;
|
480 |
-
margin: 10px 0;
|
481 |
-
min-height: 100px;
|
482 |
-
max-height: 400px;
|
483 |
-
overflow-y: auto;
|
484 |
-
}
|
485 |
-
.controls {
|
486 |
-
margin: 10px 0;
|
487 |
-
}
|
488 |
-
</style>
|
489 |
-
</head>
|
490 |
-
<body>
|
491 |
-
<div class="controls">
|
492 |
-
<button id="start">Start Listening</button>
|
493 |
-
<button id="stop" disabled>Stop Listening</button>
|
494 |
-
<button id="clear">Clear Text</button>
|
495 |
-
</div>
|
496 |
-
<div id="status">Ready</div>
|
497 |
-
<div id="output"></div>
|
498 |
-
|
499 |
-
<script>
|
500 |
-
if (!('webkitSpeechRecognition' in window)) {
|
501 |
-
alert('Speech recognition not supported');
|
502 |
-
} else {
|
503 |
-
const recognition = new webkitSpeechRecognition();
|
504 |
-
const startButton = document.getElementById('start');
|
505 |
-
const stopButton = document.getElementById('stop');
|
506 |
-
const clearButton = document.getElementById('clear');
|
507 |
-
const status = document.getElementById('status');
|
508 |
-
const output = document.getElementById('output');
|
509 |
-
let fullTranscript = '';
|
510 |
-
let lastUpdateTime = Date.now();
|
511 |
-
|
512 |
-
recognition.continuous = true;
|
513 |
-
recognition.interimResults = true;
|
514 |
-
|
515 |
-
const startRecognition = () => {
|
516 |
-
try {
|
517 |
-
recognition.start();
|
518 |
-
status.textContent = 'Listening...';
|
519 |
-
startButton.disabled = true;
|
520 |
-
stopButton.disabled = false;
|
521 |
-
} catch (e) {
|
522 |
-
console.error(e);
|
523 |
-
status.textContent = 'Error: ' + e.message;
|
524 |
-
}
|
525 |
-
};
|
526 |
-
|
527 |
-
window.addEventListener('load', () => {
|
528 |
-
setTimeout(startRecognition, 1000);
|
529 |
-
});
|
530 |
-
|
531 |
-
startButton.onclick = startRecognition;
|
532 |
-
|
533 |
-
stopButton.onclick = () => {
|
534 |
-
recognition.stop();
|
535 |
-
status.textContent = 'Stopped';
|
536 |
-
startButton.disabled = false;
|
537 |
-
stopButton.disabled = true;
|
538 |
-
};
|
539 |
-
|
540 |
-
clearButton.onclick = () => {
|
541 |
-
fullTranscript = '';
|
542 |
-
output.textContent = '';
|
543 |
-
sendDataToPython({value: '', dataType: "json"});
|
544 |
-
};
|
545 |
-
|
546 |
-
recognition.onresult = (event) => {
|
547 |
-
let interimTranscript = '';
|
548 |
-
let finalTranscript = '';
|
549 |
-
|
550 |
-
for (let i = event.resultIndex; i < event.results.length; i++) {
|
551 |
-
const transcript = event.results[i][0].transcript;
|
552 |
-
if (event.results[i].isFinal) {
|
553 |
-
finalTranscript += transcript + '\\n';
|
554 |
-
} else {
|
555 |
-
interimTranscript += transcript;
|
556 |
-
}
|
557 |
-
}
|
558 |
-
|
559 |
-
if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
|
560 |
-
if (finalTranscript) {
|
561 |
-
fullTranscript += finalTranscript;
|
562 |
-
}
|
563 |
-
lastUpdateTime = Date.now();
|
564 |
-
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
|
565 |
-
output.scrollTop = output.scrollHeight;
|
566 |
-
sendDataToPython({value: fullTranscript, dataType: "json"});
|
567 |
-
}
|
568 |
-
};
|
569 |
-
|
570 |
-
recognition.onend = () => {
|
571 |
-
if (!stopButton.disabled) {
|
572 |
-
try {
|
573 |
-
recognition.start();
|
574 |
-
console.log('Restarted recognition');
|
575 |
-
} catch (e) {
|
576 |
-
console.error('Failed to restart recognition:', e);
|
577 |
-
status.textContent = 'Error restarting: ' + e.message;
|
578 |
-
startButton.disabled = false;
|
579 |
-
stopButton.disabled = true;
|
580 |
-
}
|
581 |
-
}
|
582 |
-
};
|
583 |
-
|
584 |
-
recognition.onerror = (event) => {
|
585 |
-
console.error('Recognition error:', event.error);
|
586 |
-
status.textContent = 'Error: ' + event.error;
|
587 |
-
if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
|
588 |
-
startButton.disabled = false;
|
589 |
-
stopButton.disabled = true;
|
590 |
-
}
|
591 |
-
};
|
592 |
-
}
|
593 |
-
|
594 |
-
function sendDataToPython(data) {
|
595 |
-
window.parent.postMessage({
|
596 |
-
isStreamlitMessage: true,
|
597 |
-
type: "streamlit:setComponentValue",
|
598 |
-
...data
|
599 |
-
}, "*");
|
600 |
-
}
|
601 |
-
|
602 |
-
window.addEventListener('load', function() {
|
603 |
-
window.setTimeout(function() {
|
604 |
-
window.parent.postMessage({
|
605 |
-
isStreamlitMessage: true,
|
606 |
-
type: "streamlit:setFrameHeight",
|
607 |
-
height: document.documentElement.clientHeight
|
608 |
-
}, "*");
|
609 |
-
}, 0);
|
610 |
-
});
|
611 |
-
</script>
|
612 |
-
</body>
|
613 |
-
</html>
|
614 |
-
"""
|
615 |
-
|
616 |
-
# Main execution - letβs roll! π²π
|
617 |
-
def main():
|
618 |
-
NODE_NAME, port = get_node_name()
|
619 |
-
|
620 |
loop = asyncio.new_event_loop()
|
621 |
asyncio.set_event_loop(loop)
|
622 |
|
@@ -625,8 +350,30 @@ def main():
|
|
625 |
chat_content = await load_chat()
|
626 |
available_names = [name for name in FUN_USERNAMES if not any(f"{name} has joined" in line for line in chat_content.split('\n'))]
|
627 |
st.session_state.username = random.choice(available_names) if available_names else random.choice(list(FUN_USERNAMES.keys()))
|
628 |
-
|
629 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
630 |
|
631 |
st.title(f"π€π§ MMO {st.session_state.username}ππ¬")
|
632 |
st.markdown(f"Welcome to {START_ROOM} - chat, vote, upload, paste images, and enjoy quoting! π")
|
@@ -639,140 +386,150 @@ def main():
|
|
639 |
await process_voice_input(audio_bytes)
|
640 |
st.rerun()
|
641 |
|
642 |
-
|
643 |
-
st.subheader("π€ Continuous Speech Input")
|
644 |
-
asr_component = components.html(ASR_HTML, height=400)
|
645 |
-
if asr_component and isinstance(asr_component, dict) and 'value' in asr_component:
|
646 |
-
transcript = asr_component['value'].strip()
|
647 |
-
if transcript and transcript != st.session_state.last_transcript:
|
648 |
-
st.session_state.transcript_history.append(transcript)
|
649 |
-
await save_chat_entry(st.session_state.username, transcript, is_markdown=True)
|
650 |
-
st.session_state.last_transcript = transcript
|
651 |
-
st.rerun()
|
652 |
-
|
653 |
-
# Unified Chat History at Top
|
654 |
-
st.subheader(f"{START_ROOM} Chat History π¬")
|
655 |
chat_content = await load_chat()
|
656 |
chat_lines = chat_content.split('\n')
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
|
681 |
-
|
682 |
-
|
683 |
-
|
684 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
685 |
st.markdown(f"### Quoting: {st.session_state.quote_line}")
|
686 |
-
quote_response = st.text_area("Add your response", key="quote_response"
|
687 |
-
paste_result_quote = paste_image_button("π Paste Image or Text with Quote", key="paste_button_quote")
|
688 |
-
if paste_result_quote.image_data is not None:
|
689 |
-
if isinstance(paste_result_quote.image_data, str):
|
690 |
-
st.session_state.message_text = paste_result_quote.image_data
|
691 |
-
st.text_area("Add your response", key="quote_response", value=st.session_state.message_text)
|
692 |
-
else:
|
693 |
-
st.image(paste_result_quote.image_data, caption="Received Image for Quote")
|
694 |
-
filename = await save_pasted_image(paste_result_quote.image_data, st.session_state.username)
|
695 |
-
if filename:
|
696 |
-
st.session_state.pasted_image_data = filename
|
697 |
if st.button("Send Quote π", key="send_quote"):
|
698 |
-
|
699 |
-
|
700 |
-
markdown_response
|
701 |
-
|
702 |
-
|
703 |
-
|
704 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
705 |
st.session_state.message_text = ''
|
706 |
st.rerun()
|
707 |
|
708 |
-
|
709 |
-
new_username = st.selectbox("Change Name and Voice", [""] + list(FUN_USERNAMES.keys()), index=(list(FUN_USERNAMES.keys()).index(current_selection) + 1 if current_selection else 0), format_func=lambda x: f"{x} ({FUN_USERNAMES.get(x, 'No Voice')})" if x else "Select a name")
|
710 |
if new_username and new_username != st.session_state.username:
|
711 |
-
|
712 |
st.session_state.username = new_username
|
713 |
-
st.session_state.voice = FUN_USERNAMES[new_username]
|
714 |
-
st.markdown(f"**ποΈ Voice Changed**: {st.session_state.voice} π£οΈ for {st.session_state.username}")
|
715 |
st.rerun()
|
716 |
|
717 |
-
|
718 |
-
|
719 |
-
|
720 |
-
|
721 |
-
|
722 |
-
if st.button("Send π", key="send_button"):
|
723 |
-
if message.strip():
|
724 |
-
audio_file = await save_chat_entry(st.session_state.username, message, is_markdown=True)
|
725 |
-
if audio_file:
|
726 |
-
st.session_state.audio_cache[f"{message}_{FUN_USERNAMES[st.session_state.username]}"] = audio_file
|
727 |
-
st.audio(audio_file) # Immediate preview
|
728 |
-
if st.session_state.pasted_image_data:
|
729 |
-
await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")
|
730 |
-
st.session_state.pasted_image_data = None
|
731 |
-
st.session_state.message_text = ''
|
732 |
-
st.rerun()
|
733 |
-
|
734 |
-
paste_result_msg = paste_image_button("π Paste Image or Text with Message", key="paste_button_msg")
|
735 |
-
if paste_result_msg.image_data is not None:
|
736 |
-
if isinstance(paste_result_msg.image_data, str):
|
737 |
-
st.session_state.message_text = paste_result_msg.image_data
|
738 |
-
st.text_input(f"Message as {st.session_state.username} (Voice: {st.session_state.voice})", key="message_input_paste", value=st.session_state.message_text)
|
739 |
-
else:
|
740 |
-
st.image(paste_result_msg.image_data, caption="Received Image for Message")
|
741 |
-
filename = await save_pasted_image(paste_result_msg.image_data, st.session_state.username)
|
742 |
if filename:
|
743 |
-
st.session_state.
|
|
|
|
|
|
|
744 |
|
745 |
-
|
746 |
-
|
747 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
748 |
|
749 |
-
st.subheader("
|
750 |
-
uploaded_file = st.file_uploader("Upload Media", type=['png', 'jpg', '
|
751 |
if uploaded_file:
|
752 |
-
|
753 |
-
|
754 |
-
|
755 |
-
|
756 |
-
|
757 |
-
|
758 |
-
|
759 |
-
|
760 |
-
|
761 |
-
|
762 |
-
|
763 |
-
|
764 |
-
|
765 |
-
|
766 |
-
|
767 |
-
|
768 |
-
|
769 |
-
|
770 |
-
|
771 |
-
|
772 |
-
|
773 |
-
|
774 |
-
|
775 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
776 |
|
777 |
st.subheader("Refresh β³")
|
778 |
refresh_rate = st.slider("Refresh Rate", 1, 300, st.session_state.refresh_rate)
|
@@ -782,42 +539,20 @@ def main():
|
|
782 |
font_name, font_func = random.choice(UNICODE_FONTS)
|
783 |
countdown_str = "".join(UNICODE_DIGITS[int(d)] for d in str(i)) if i < 10 else font_func(str(i))
|
784 |
timer_placeholder.markdown(f"<p class='timer'>β³ {font_func('Refresh in:')} {countdown_str}</p>", unsafe_allow_html=True)
|
785 |
-
|
786 |
st.rerun()
|
787 |
|
788 |
-
|
789 |
-
st.subheader("Media Gallery π¨πΆπ₯")
|
790 |
-
gallery_columns = st.slider("Number of Gallery Tiles", 1, 20, st.session_state.gallery_columns)
|
791 |
-
st.session_state.gallery_columns = gallery_columns
|
792 |
-
media_files = glob.glob(f"{MEDIA_DIR}/*.png") + glob.glob(f"{MEDIA_DIR}/*.jpg") + glob.glob(f"{MEDIA_DIR}/*.mp4")
|
793 |
-
if media_files:
|
794 |
-
media_votes = await load_votes(MEDIA_VOTES_FILE)
|
795 |
-
seen_files = set()
|
796 |
-
cols = st.columns(gallery_columns)
|
797 |
-
col_idx = 0
|
798 |
-
for media_file in sorted(media_files, key=os.path.getmtime, reverse=True):
|
799 |
-
if media_file not in seen_files:
|
800 |
-
seen_files.add(media_file)
|
801 |
-
with cols[col_idx]:
|
802 |
-
filename = os.path.basename(media_file)
|
803 |
-
vote_count = media_votes.get(media_file, 0)
|
804 |
-
st.markdown(f"**{filename}**")
|
805 |
-
if media_file.endswith(('.png', '.jpg')):
|
806 |
-
st.image(media_file, use_container_width=True)
|
807 |
-
elif media_file.endswith('.mp4'):
|
808 |
-
st.markdown(await get_video_html(media_file), unsafe_allow_html=True)
|
809 |
-
if st.button(f"π {vote_count}", key=f"media_vote_{media_file}"):
|
810 |
-
await save_vote(MEDIA_VOTES_FILE, media_file, await generate_user_hash(), st.session_state.username)
|
811 |
-
st.rerun()
|
812 |
-
col_idx = (col_idx + 1) % gallery_columns
|
813 |
-
|
814 |
-
# Full Log at End
|
815 |
-
st.subheader("Full Chat Log π")
|
816 |
with open(HISTORY_FILE, 'r') as f:
|
817 |
history_content = f.read()
|
818 |
-
st.markdown(history_content)
|
819 |
|
820 |
loop.run_until_complete(async_interface())
|
821 |
|
|
|
|
|
|
|
|
|
|
|
822 |
if __name__ == "__main__":
|
823 |
main()
|
|
|
10 |
import hashlib
|
11 |
from PIL import Image
|
12 |
import glob
|
13 |
+
from urllib.parse import quote
|
14 |
import base64
|
15 |
import io
|
16 |
import streamlit.components.v1 as components
|
|
|
18 |
from audio_recorder_streamlit import audio_recorder
|
19 |
import nest_asyncio
|
20 |
import re
|
|
|
|
|
|
|
21 |
|
22 |
# Patch for nested async - sneaky fix! πβ¨
|
23 |
nest_asyncio.apply()
|
|
|
36 |
|
37 |
# Funky usernames - whoβs who in the zoo with unique voices! ππΎποΈ
|
38 |
FUN_USERNAMES = {
|
39 |
+
"CosmicJester π": "en-US-AriaNeural", # US Female, bright & clear
|
40 |
+
"PixelPanda πΌ": "en-US-JennyNeural", # US Female, warm & friendly
|
41 |
+
"QuantumQuack π¦": "en-GB-SoniaNeural", # UK Female, posh & crisp
|
42 |
+
"StellarSquirrel πΏοΈ": "en-AU-NatashaNeural", # AU Female, lively & upbeat
|
43 |
+
"GizmoGuru βοΈ": "en-CA-ClaraNeural", # CA Female, calm & soothing
|
44 |
+
"NebulaNinja π ": "en-US-GuyNeural", # US Male, deep & cool
|
45 |
+
"ByteBuster πΎ": "en-GB-RyanNeural", # UK Male, smooth & refined
|
46 |
+
"GalacticGopher π": "en-AU-WilliamNeural", # AU Male, bold & rugged
|
47 |
+
"RocketRaccoon π": "en-CA-LiamNeural", # CA Male, steady & warm
|
48 |
+
"EchoElf π§": "en-US-AnaNeural", # US Female, soft & gentle (child-like)
|
49 |
+
"PhantomFox π¦": "en-US-BrandonNeural", # US Male, confident & rich
|
50 |
+
"WittyWizard π§": "en-GB-ThomasNeural", # UK Male, authoritative & clear
|
51 |
+
"LunarLlama π": "en-AU-FreyaNeural", # AU Female, sweet & melodic
|
52 |
+
"SolarSloth βοΈ": "en-CA-LindaNeural", # CA Female, neutral & pleasant
|
53 |
+
"AstroAlpaca π¦": "en-US-ChristopherNeural",# US Male, strong & resonant
|
54 |
+
"CyberCoyote πΊ": "en-GB-ElliotNeural", # UK Male, youthful & energetic
|
55 |
+
"MysticMoose π¦": "en-AU-JamesNeural", # AU Male, deep & grounded
|
56 |
+
"GlitchGnome π§": "en-CA-EthanNeural", # CA Male, bright & lively
|
57 |
+
"VortexViper π": "en-US-AmberNeural", # US Female, expressive & vibrant
|
58 |
+
"ChronoChimp π": "en-GB-LibbyNeural" # UK Female, cheerful & distinct
|
59 |
}
|
60 |
|
61 |
# Folders galore - organizing chaos! ππ
|
|
|
64 |
STATE_FILE = "user_state.txt"
|
65 |
AUDIO_DIR = "audio_logs"
|
66 |
HISTORY_DIR = "history_logs"
|
|
|
67 |
os.makedirs(CHAT_DIR, exist_ok=True)
|
68 |
os.makedirs(VOTE_DIR, exist_ok=True)
|
69 |
os.makedirs(AUDIO_DIR, exist_ok=True)
|
70 |
os.makedirs(HISTORY_DIR, exist_ok=True)
|
|
|
71 |
|
72 |
CHAT_FILE = os.path.join(CHAT_DIR, "global_chat.md")
|
73 |
QUOTE_VOTES_FILE = os.path.join(VOTE_DIR, "quote_votes.md")
|
|
|
96 |
("Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x24B6) if 'A' <= c <= 'Z' else chr(ord(c) - 0x61 + 0x24D0) if 'a' <= c <= 'z' else c for c in x)),
|
97 |
("Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F130) if 'A' <= c <= 'Z' else c for c in x)),
|
98 |
("Negative Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F150) if 'A' <= c <= 'Z' else c for c in x)),
|
99 |
+
("Negative Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F170) if 'A' <= c <= 'Z' else c for c in x)),
|
100 |
("Regional Indicator", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F1E6) if 'A' <= c <= 'Z' else c for c in x)),
|
101 |
]
|
102 |
|
|
|
107 |
st.session_state.server_task = None
|
108 |
if 'active_connections' not in st.session_state:
|
109 |
st.session_state.active_connections = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
# Timestamp wizardry - clock ticks with flair! β°π©
|
112 |
+
def format_timestamp_prefix():
|
113 |
+
return datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
# Node naming - christening the beast! ππΌ
|
116 |
def get_node_name():
|
|
|
131 |
user_log = {k: v for k, v in user_log.items() if current_time - v < 10}
|
132 |
st.session_state.action_log[username] = user_log
|
133 |
if action not in user_log:
|
|
|
134 |
with open(HISTORY_FILE, 'a') as f:
|
135 |
+
f.write(f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] {username}: {action}\n")
|
136 |
user_log[action] = current_time
|
137 |
|
138 |
# Clean text - strip the fancy stuff! π§Ήπ
|
139 |
def clean_text_for_tts(text):
|
140 |
+
# Remove Markdown formatting (e.g., #, *, [], ![])
|
141 |
cleaned = re.sub(r'[#*!\[\]]+', '', text)
|
142 |
+
# Replace newlines with spaces and strip extra whitespace
|
143 |
cleaned = ' '.join(cleaned.split())
|
144 |
+
# Ensure some text exists, max 200 chars to avoid edgeTTS limits
|
145 |
return cleaned[:200] if cleaned else "No text to speak"
|
146 |
|
147 |
# Chat saver - words locked tight! π¬π
|
148 |
+
async def save_chat_entry(username, message):
|
149 |
await asyncio.to_thread(log_action, username, "π¬π - Chat saver - words locked tight!")
|
150 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
151 |
+
entry = f"[{timestamp}] {username}: {message}"
|
|
|
|
|
|
|
|
|
152 |
await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"{entry}\n"))
|
153 |
voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
|
154 |
cleaned_message = clean_text_for_tts(message)
|
155 |
audio_file = await async_edge_tts_generate(cleaned_message, voice)
|
156 |
if audio_file:
|
157 |
with open(HISTORY_FILE, 'a') as f:
|
158 |
+
f.write(f"[{timestamp}] {username}: Audio generated - {audio_file}\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
|
160 |
# Chat loader - history unleashed! ππ
|
161 |
async def load_chat():
|
|
|
182 |
async def has_joined_before(client_id, chat_content):
|
183 |
username = st.session_state.get('username', 'System π')
|
184 |
await asyncio.to_thread(log_action, username, "πͺπ - Join checker - been here before?")
|
185 |
+
return any(f"Client-{client_id} has joined" in line for line in chat_content.split('\n'))
|
186 |
|
187 |
# Suggestion maker - old quips resurface! π‘π
|
188 |
async def get_message_suggestions(chat_content, prefix):
|
|
|
195 |
# Vote saver - cheers recorded! ππ
|
196 |
async def save_vote(file, item, user_hash, username, comment=""):
|
197 |
await asyncio.to_thread(log_action, username, "ππ - Vote saver - cheers recorded!")
|
198 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
199 |
entry = f"[{timestamp}] {user_hash} voted for {item}"
|
200 |
await asyncio.to_thread(lambda: open(file, 'a').write(f"{entry}\n"))
|
201 |
await asyncio.to_thread(lambda: open(HISTORY_FILE, "a").write(f"- {timestamp} - User {user_hash} voted for {item}\n"))
|
|
|
237 |
async def async_edge_tts_generate(text, voice, rate=0, pitch=0, file_format="mp3"):
|
238 |
username = st.session_state.get('username', 'System π')
|
239 |
await asyncio.to_thread(log_action, username, "πΆπ - Audio maker - voices come alive!")
|
240 |
+
timestamp = format_timestamp_prefix()
|
241 |
+
filename = os.path.join(AUDIO_DIR, f"audio_{timestamp}_{random.randint(1000, 9999)}.mp3")
|
|
|
242 |
communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
|
243 |
try:
|
244 |
+
await communicate.save(filename)
|
245 |
+
return filename if os.path.exists(filename) else None
|
246 |
except edge_tts.exceptions.NoAudioReceived:
|
247 |
with open(HISTORY_FILE, 'a') as f:
|
248 |
+
f.write(f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] {username}: Audio failed - No audio received for '{text}'\n")
|
|
|
249 |
return None
|
250 |
|
251 |
# Audio player - tunes blast off! ππ
|
252 |
def play_and_download_audio(file_path):
|
253 |
if file_path and os.path.exists(file_path):
|
254 |
st.audio(file_path)
|
255 |
+
with open(file_path, "rb") as f:
|
256 |
+
b64 = base64.b64encode(f.read()).decode()
|
|
|
|
|
|
|
257 |
dl_link = f'<a href="data:audio/mpeg;base64,{b64}" download="{os.path.basename(file_path)}">π΅ Download {os.path.basename(file_path)}</a>'
|
258 |
st.markdown(dl_link, unsafe_allow_html=True)
|
259 |
|
260 |
+
# Image saver - pics preserved! πΈπΎ
|
261 |
+
async def save_pasted_image(image_data):
|
262 |
+
username = st.session_state.get('username', 'System π')
|
263 |
await asyncio.to_thread(log_action, username, "πΈπΎ - Image saver - pics preserved!")
|
264 |
+
timestamp = format_timestamp_prefix()
|
265 |
+
filename = f"paste_{timestamp}.png"
|
266 |
+
filepath = os.path.join('./', filename)
|
267 |
+
if ',' in image_data:
|
268 |
+
image_data = image_data.split(',')[1]
|
269 |
+
img_bytes = base64.b64decode(image_data)
|
270 |
+
img = Image.open(io.BytesIO(img_bytes))
|
271 |
+
await asyncio.to_thread(img.save, filepath, "PNG")
|
272 |
+
return filename
|
273 |
+
|
274 |
+
# Video renderer - movies roll! π₯π¬
|
|
|
275 |
async def get_video_html(video_path, width="100%"):
|
276 |
username = st.session_state.get('username', 'System π')
|
277 |
await asyncio.to_thread(log_action, username, "π₯π¬ - Video renderer - movies roll!")
|
278 |
+
video_url = f"data:video/mp4;base64,{base64.b64encode(await asyncio.to_thread(open, video_path, 'rb').read()).decode()}"
|
279 |
+
return f'<video width="{width}" controls autoplay muted loop><source src="{video_url}" type="video/mp4">Your browser does not support the video tag.</video>'
|
|
|
|
|
280 |
|
281 |
# Audio renderer - sounds soar! πΆβοΈ
|
282 |
async def get_audio_html(audio_path, width="100%"):
|
|
|
302 |
if len(parts) == 2:
|
303 |
username, content = parts
|
304 |
await save_chat_entry(username, content)
|
305 |
+
await broadcast_message(f"{username}|{content}", room_id)
|
306 |
except websockets.ConnectionClosed:
|
307 |
pass
|
308 |
finally:
|
|
|
337 |
username = st.session_state.get('username', 'System π')
|
338 |
await asyncio.to_thread(log_action, username, "π€π - Voice processor - speech to text!")
|
339 |
if audio_bytes:
|
340 |
+
text = "Voice input simulation" # Replace with actual speech-to-text logic
|
341 |
await save_chat_entry(username, text)
|
342 |
|
343 |
+
# Interface builder - UI takes shape! π¨ποΈ
|
344 |
+
def create_streamlit_interface():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
345 |
loop = asyncio.new_event_loop()
|
346 |
asyncio.set_event_loop(loop)
|
347 |
|
|
|
350 |
chat_content = await load_chat()
|
351 |
available_names = [name for name in FUN_USERNAMES if not any(f"{name} has joined" in line for line in chat_content.split('\n'))]
|
352 |
st.session_state.username = random.choice(available_names) if available_names else random.choice(list(FUN_USERNAMES.keys()))
|
353 |
+
|
354 |
+
if 'refresh_rate' not in st.session_state:
|
355 |
+
st.session_state.refresh_rate = 5
|
356 |
+
if 'timer_start' not in st.session_state:
|
357 |
+
st.session_state.timer_start = time.time()
|
358 |
+
if 'quote_line' not in st.session_state:
|
359 |
+
st.session_state.quote_line = None
|
360 |
+
if 'pasted_image_data' not in st.session_state:
|
361 |
+
st.session_state.pasted_image_data = None
|
362 |
+
if 'message_text' not in st.session_state:
|
363 |
+
st.session_state.message_text = ""
|
364 |
+
if 'audio_cache' not in st.session_state:
|
365 |
+
st.session_state.audio_cache = {}
|
366 |
+
if 'chat_history' not in st.session_state:
|
367 |
+
st.session_state.chat_history = []
|
368 |
+
|
369 |
+
st.markdown("""
|
370 |
+
<style>
|
371 |
+
.chat-box {font-family: monospace; background: #1e1e1e; color: #d4d4d4; padding: 10px; border-radius: 5px; height: 300px; overflow-y: auto;}
|
372 |
+
.timer {font-size: 24px; color: #ffcc00; text-align: center; animation: pulse 1s infinite;}
|
373 |
+
@keyframes pulse {0% {transform: scale(1);} 50% {transform: scale(1.1);} 100% {transform: scale(1);}}
|
374 |
+
#paste-target {border: 2px dashed #ccc; padding: 20px; text-align: center; cursor: pointer;}
|
375 |
+
</style>
|
376 |
+
""", unsafe_allow_html=True)
|
377 |
|
378 |
st.title(f"π€π§ MMO {st.session_state.username}ππ¬")
|
379 |
st.markdown(f"Welcome to {START_ROOM} - chat, vote, upload, paste images, and enjoy quoting! π")
|
|
|
386 |
await process_voice_input(audio_bytes)
|
387 |
st.rerun()
|
388 |
|
389 |
+
st.subheader(f"{START_ROOM} Chat π¬")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
390 |
chat_content = await load_chat()
|
391 |
chat_lines = chat_content.split('\n')
|
392 |
+
chat_votes = await load_votes(QUOTE_VOTES_FILE)
|
393 |
+
for i, line in enumerate(chat_lines):
|
394 |
+
if line.strip() and ': ' in line:
|
395 |
+
col1, col2, col3 = st.columns([4, 1, 1])
|
396 |
+
with col1:
|
397 |
+
st.markdown(line)
|
398 |
+
username = line.split(': ')[1].split(' ')[0]
|
399 |
+
audio_file = None
|
400 |
+
cache_key = f"{line}_{FUN_USERNAMES.get(username, 'en-US-AriaNeural')}"
|
401 |
+
if cache_key in st.session_state.audio_cache:
|
402 |
+
audio_file = st.session_state.audio_cache[cache_key]
|
403 |
+
else:
|
404 |
+
cleaned_text = clean_text_for_tts(line.split(': ', 1)[1])
|
405 |
+
audio_file = await async_edge_tts_generate(cleaned_text, FUN_USERNAMES.get(username, "en-US-AriaNeural"))
|
406 |
+
st.session_state.audio_cache[cache_key] = audio_file
|
407 |
+
if audio_file:
|
408 |
+
play_and_download_audio(audio_file)
|
409 |
+
with col2:
|
410 |
+
vote_count = chat_votes.get(line.split('. ')[1] if '. ' in line else line, 0)
|
411 |
+
if st.button(f"π {vote_count}", key=f"chat_vote_{i}"):
|
412 |
+
comment = st.session_state.message_text
|
413 |
+
await save_vote(QUOTE_VOTES_FILE, line.split('. ')[1] if '. ' in line else line, await generate_user_hash(), st.session_state.username, comment)
|
414 |
+
if st.session_state.pasted_image_data:
|
415 |
+
filename = await save_pasted_image(st.session_state.pasted_image_data)
|
416 |
+
if filename:
|
417 |
+
await save_chat_entry(st.session_state.username, f"Pasted image: {filename}")
|
418 |
+
st.session_state.pasted_image_data = None
|
419 |
+
st.session_state.message_text = ''
|
420 |
+
st.rerun()
|
421 |
+
with col3:
|
422 |
+
if st.button("π’ Quote", key=f"quote_{i}"):
|
423 |
+
st.session_state.quote_line = line
|
424 |
+
st.rerun()
|
425 |
+
|
426 |
+
if 'quote_line' in st.session_state:
|
427 |
st.markdown(f"### Quoting: {st.session_state.quote_line}")
|
428 |
+
quote_response = st.text_area("Add your response", key="quote_response")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
429 |
if st.button("Send Quote π", key="send_quote"):
|
430 |
+
async def process_quote():
|
431 |
+
await asyncio.to_thread(log_action, st.session_state.username, "π’π¬ - Quote processor - echoes resound!")
|
432 |
+
markdown_response = f"### Quote Response\n- **Original**: {st.session_state.quote_line}\n- **{st.session_state.username} Replies**: {quote_response}"
|
433 |
+
if st.session_state.pasted_image_data:
|
434 |
+
filename = await save_pasted_image(st.session_state.pasted_image_data)
|
435 |
+
if filename:
|
436 |
+
markdown_response += f"\n- **Image**: "
|
437 |
+
st.session_state.pasted_image_data = None
|
438 |
+
try:
|
439 |
+
await save_chat_entry(st.session_state.username, markdown_response)
|
440 |
+
except edge_tts.exceptions.NoAudioReceived:
|
441 |
+
# Log failure but continue without audio
|
442 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
443 |
+
with open(HISTORY_FILE, 'a') as f:
|
444 |
+
f.write(f"[{timestamp}] {st.session_state.username}: Quote saved without audio - No audio received\n")
|
445 |
+
await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"[{timestamp}] {st.session_state.username}: {markdown_response}\n"))
|
446 |
+
loop.run_until_complete(process_quote())
|
447 |
+
del st.session_state.quote_line
|
448 |
st.session_state.message_text = ''
|
449 |
st.rerun()
|
450 |
|
451 |
+
new_username = st.selectbox("Change Name", [""] + list(FUN_USERNAMES.keys()), index=0)
|
|
|
452 |
if new_username and new_username != st.session_state.username:
|
453 |
+
loop.run_until_complete(save_chat_entry("System π", f"{st.session_state.username} changed name to {new_username}"))
|
454 |
st.session_state.username = new_username
|
|
|
|
|
455 |
st.rerun()
|
456 |
|
457 |
+
message = st.text_input(f"Message as {st.session_state.username}", key="message_input", value=st.session_state.message_text, on_change=lambda: st.session_state.update(message_text=st.session_state.message_input))
|
458 |
+
if st.button("Send π", key="send_button") and message.strip():
|
459 |
+
loop.run_until_complete(save_chat_entry(st.session_state.username, message))
|
460 |
+
if st.session_state.pasted_image_data:
|
461 |
+
filename = loop.run_until_complete(save_pasted_image(st.session_state.pasted_image_data))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
if filename:
|
463 |
+
loop.run_until_complete(save_chat_entry(st.session_state.username, f"Pasted image: {filename}"))
|
464 |
+
st.session_state.pasted_image_data = None
|
465 |
+
st.session_state.message_text = ''
|
466 |
+
st.rerun()
|
467 |
|
468 |
+
components.html(
|
469 |
+
"""
|
470 |
+
<div id="paste-target">Paste an image here (Ctrl+V)</div>
|
471 |
+
<script>
|
472 |
+
const pasteTarget = document.getElementById('paste-target');
|
473 |
+
pasteTarget.addEventListener('paste', (event) => {
|
474 |
+
const items = (event.clipboardData || window.clipboardData).items;
|
475 |
+
for (let i = 0; i < items.length; i++) {
|
476 |
+
if (items[i].type.indexOf('image') !== -1) {
|
477 |
+
const blob = items[i].getAsFile();
|
478 |
+
const reader = new FileReader();
|
479 |
+
reader.onload = (e) => {
|
480 |
+
window.parent.postMessage({
|
481 |
+
type: 'streamlit:setComponentValue',
|
482 |
+
value: e.target.result
|
483 |
+
}, '*');
|
484 |
+
pasteTarget.innerHTML = '<p>Image pasted! Processing...</p>';
|
485 |
+
};
|
486 |
+
reader.readAsDataURL(blob);
|
487 |
+
}
|
488 |
+
}
|
489 |
+
event.preventDefault();
|
490 |
+
});
|
491 |
+
</script>
|
492 |
+
""",
|
493 |
+
height=100
|
494 |
+
)
|
495 |
|
496 |
+
st.subheader("Media Gallery π¨πΆπ₯")
|
497 |
+
uploaded_file = st.file_uploader("Upload Media", type=['png', 'jpg', 'mp3', 'mp4'])
|
498 |
if uploaded_file:
|
499 |
+
file_path = os.path.join('./', uploaded_file.name)
|
500 |
+
await asyncio.to_thread(lambda: open(file_path, 'wb').write(uploaded_file.getbuffer()))
|
501 |
+
st.success(f"Uploaded {uploaded_file.name}")
|
502 |
+
|
503 |
+
media_files = glob.glob("./*.png") + glob.glob("./*.jpg") + glob.glob("./*.mp3") + glob.glob("./*.mp4")
|
504 |
+
if media_files:
|
505 |
+
cols = st.columns(3)
|
506 |
+
media_votes = loop.run_until_complete(load_votes(MEDIA_VOTES_FILE))
|
507 |
+
for idx, media_file in enumerate(media_files):
|
508 |
+
vote_count = media_votes.get(media_file, 0)
|
509 |
+
if vote_count > 0:
|
510 |
+
with cols[idx % 3]:
|
511 |
+
if media_file.endswith(('.png', '.jpg')):
|
512 |
+
st.image(media_file, use_container_width=True)
|
513 |
+
elif media_file.endswith('.mp3'):
|
514 |
+
st.markdown(loop.run_until_complete(get_audio_html(media_file)), unsafe_allow_html=True)
|
515 |
+
elif media_file.endswith('.mp4'):
|
516 |
+
st.markdown(loop.run_until_complete(get_video_html(media_file)), unsafe_allow_html=True)
|
517 |
+
col1, col2 = st.columns(2)
|
518 |
+
with col1:
|
519 |
+
if st.button(f"π {vote_count}", key=f"media_vote_{idx}"):
|
520 |
+
comment = st.session_state.message_text
|
521 |
+
loop.run_until_complete(save_vote(MEDIA_VOTES_FILE, media_file, await generate_user_hash(), st.session_state.username, comment))
|
522 |
+
if st.session_state.pasted_image_data:
|
523 |
+
filename = loop.run_until_complete(save_pasted_image(st.session_state.pasted_image_data))
|
524 |
+
if filename:
|
525 |
+
loop.run_until_complete(save_chat_entry(st.session_state.username, f"Pasted image: {filename}"))
|
526 |
+
st.session_state.pasted_image_data = None
|
527 |
+
st.session_state.message_text = ''
|
528 |
+
st.rerun()
|
529 |
+
with col2:
|
530 |
+
if st.button("ποΈ", key=f"media_delete_{idx}"):
|
531 |
+
await asyncio.to_thread(os.remove, media_file)
|
532 |
+
st.rerun()
|
533 |
|
534 |
st.subheader("Refresh β³")
|
535 |
refresh_rate = st.slider("Refresh Rate", 1, 300, st.session_state.refresh_rate)
|
|
|
539 |
font_name, font_func = random.choice(UNICODE_FONTS)
|
540 |
countdown_str = "".join(UNICODE_DIGITS[int(d)] for d in str(i)) if i < 10 else font_func(str(i))
|
541 |
timer_placeholder.markdown(f"<p class='timer'>β³ {font_func('Refresh in:')} {countdown_str}</p>", unsafe_allow_html=True)
|
542 |
+
loop.run_until_complete(asyncio.sleep(1))
|
543 |
st.rerun()
|
544 |
|
545 |
+
st.sidebar.subheader("Chat History π")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
546 |
with open(HISTORY_FILE, 'r') as f:
|
547 |
history_content = f.read()
|
548 |
+
st.sidebar.markdown(history_content)
|
549 |
|
550 |
loop.run_until_complete(async_interface())
|
551 |
|
552 |
+
# Main execution - letβs roll! π²π
|
553 |
+
def main():
|
554 |
+
NODE_NAME, port = get_node_name()
|
555 |
+
create_streamlit_interface()
|
556 |
+
|
557 |
if __name__ == "__main__":
|
558 |
main()
|