awacke1 commited on
Commit
eb9c602
Β·
verified Β·
1 Parent(s): a8499f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +152 -467
app.py CHANGED
@@ -20,8 +20,11 @@ import re
20
  from streamlit_paste_button import paste_image_button
21
  import pytz
22
  import shutil
23
- import wave
24
- import audioop
 
 
 
25
 
26
  # Patch for nested async
27
  nest_asyncio.apply()
@@ -69,11 +72,8 @@ STATE_FILE = "user_state.txt"
69
  AUDIO_DIR = "audio_logs"
70
  HISTORY_DIR = "history_logs"
71
  MEDIA_DIR = "media_files"
72
- os.makedirs(CHAT_DIR, exist_ok=True)
73
- os.makedirs(VOTE_DIR, exist_ok=True)
74
- os.makedirs(AUDIO_DIR, exist_ok=True)
75
- os.makedirs(HISTORY_DIR, exist_ok=True)
76
- os.makedirs(MEDIA_DIR, exist_ok=True)
77
 
78
  CHAT_FILE = os.path.join(CHAT_DIR, "global_chat.md")
79
  QUOTE_VOTES_FILE = os.path.join(VOTE_DIR, "quote_votes.md")
@@ -83,27 +83,10 @@ HISTORY_FILE = os.path.join(HISTORY_DIR, "chat_history.md")
83
  # Unicode digits
84
  UNICODE_DIGITS = {i: f"{i}\uFE0F⃣" for i in range(10)}
85
 
86
- # Font collection
87
  UNICODE_FONTS = [
88
  ("Normal", lambda x: x),
89
  ("Bold", lambda x: "".join(chr(ord(c) + 0x1D400 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D41A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
90
- ("Italic", lambda x: "".join(chr(ord(c) + 0x1D434 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D44E - 0x61) if 'a' <= c <= 'z' else c for c in x)),
91
- ("Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D468 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D482 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
92
- ("Script", lambda x: "".join(chr(ord(c) + 0x1D49C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D4B6 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
93
- ("Bold Script", lambda x: "".join(chr(ord(c) + 0x1D4D0 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D4EA - 0x61) if 'a' <= c <= 'z' else c for c in x)),
94
- ("Fraktur", lambda x: "".join(chr(ord(c) + 0x1D504 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D51E - 0x61) if 'a' <= c <= 'z' else c for c in x)),
95
- ("Bold Fraktur", lambda x: "".join(chr(ord(c) + 0x1D56C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D586 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
96
- ("Double Struck", lambda x: "".join(chr(ord(c) + 0x1D538 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D552 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
97
- ("Sans Serif", lambda x: "".join(chr(ord(c) + 0x1D5A0 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D5BA - 0x61) if 'a' <= c <= 'z' else c for c in x)),
98
- ("Sans Serif Bold", lambda x: "".join(chr(ord(c) + 0x1D5D4 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D5EE - 0x61) if 'a' <= c <= 'z' else c for c in x)),
99
- ("Sans Serif Italic", lambda x: "".join(chr(ord(c) + 0x1D608 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D622 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
100
- ("Sans Serif Bold Italic", lambda x: "".join(chr(ord(c) + 0x1D63C - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D656 - 0x61) if 'a' <= c <= 'z' else c for c in x)),
101
- ("Monospace", lambda x: "".join(chr(ord(c) + 0x1D670 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D68A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
102
- ("Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x24B6) if 'A' <= c <= 'Z' else chr(ord(c) - 0x61 + 0x24D0) if 'a' <= c <= 'z' else c for c in x)),
103
- ("Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F130) if 'A' <= c <= 'Z' else c for c in x)),
104
- ("Negative Circled", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F150) if 'A' <= c <= 'Z' else c for c in x)),
105
- ("Negative Squared", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F170) if 'A' <= c <= 'Z' else c for c in x)),
106
- ("Regional Indicator", lambda x: "".join(chr(ord(c) - 0x41 + 0x1F1E6) if 'A' <= c <= 'Z' else c for c in x)),
107
  ]
108
 
109
  # Global state
@@ -119,10 +102,6 @@ if 'last_chat_update' not in st.session_state:
119
  st.session_state.last_chat_update = 0
120
  if 'displayed_chat_lines' not in st.session_state:
121
  st.session_state.displayed_chat_lines = []
122
- if 'old_val' not in st.session_state:
123
- st.session_state.old_val = ""
124
- if 'last_query' not in st.session_state:
125
- st.session_state.last_query = ""
126
  if 'message_text' not in st.session_state:
127
  st.session_state.message_text = ""
128
  if 'audio_cache' not in st.session_state:
@@ -162,22 +141,13 @@ def get_node_name():
162
  parser.add_argument('--node-name', type=str, default=None)
163
  parser.add_argument('--port', type=int, default=8501)
164
  args = parser.parse_args()
165
- username = st.session_state.get('username', 'System 🌟')
166
- log_action(username, "🌐🍼 - Node naming - christening the beast!")
167
  return args.node_name or f"node-{uuid.uuid4().hex[:8]}", args.port
168
 
169
  def log_action(username, action):
170
- if 'action_log' not in st.session_state:
171
- st.session_state.action_log = {}
172
- user_log = st.session_state.action_log.setdefault(username, {})
173
- current_time = time.time()
174
- user_log = {k: v for k, v in user_log.items() if current_time - v < 10}
175
- st.session_state.action_log[username] = user_log
176
- if action not in user_log:
177
  central = pytz.timezone('US/Central')
178
- with open(HISTORY_FILE, 'a') as f:
179
- f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: {action}\n")
180
- user_log[action] = current_time
181
 
182
  def clean_text_for_tts(text):
183
  cleaned = re.sub(r'[#*!\[\]]+', '', text)
@@ -185,108 +155,64 @@ def clean_text_for_tts(text):
185
  return cleaned[:200] if cleaned else "No text to speak"
186
 
187
  async def save_chat_entry(username, message, is_markdown=False):
188
- await asyncio.to_thread(log_action, username, "πŸ’¬πŸ”’ - Chat saver - words locked tight!")
189
- central = pytz.timezone('US/Central')
190
- timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
191
- if is_markdown:
192
- entry = f"[{timestamp}] {username}:\n```markdown\n{message}\n```"
193
- else:
194
- entry = f"[{timestamp}] {username}: {message}"
195
- await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"{entry}\n"))
196
- voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
197
- cleaned_message = clean_text_for_tts(message)
198
- audio_file = await async_edge_tts_generate(cleaned_message, voice)
199
- if audio_file:
200
- with open(HISTORY_FILE, 'a') as f:
201
- f.write(f"[{timestamp}] {username}: Audio generated - {audio_file}\n")
202
- await broadcast_message(f"{username}|{message}", "chat")
203
- st.session_state.last_chat_update = time.time()
204
- return audio_file
 
 
205
 
206
  async def load_chat():
207
- username = st.session_state.get('username', 'System 🌟')
208
- await asyncio.to_thread(log_action, username, "πŸ“œπŸš€ - Chat loader - history unleashed!")
209
- if not os.path.exists(CHAT_FILE):
210
- await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"# {START_ROOM} Chat\n\nWelcome to the cosmic hub - start chatting! 🎀\n"))
211
- with open(CHAT_FILE, 'r') as f:
212
- content = await asyncio.to_thread(f.read)
213
- return content
214
-
215
- async def get_user_list(chat_content):
216
- username = st.session_state.get('username', 'System 🌟')
217
- await asyncio.to_thread(log_action, username, "πŸ‘₯πŸŽ‰ - User lister - who’s in the gang!")
218
- users = set()
219
- for line in chat_content.split('\n'):
220
- if line.strip() and ': ' in line:
221
- user = line.split(': ')[1].split(' ')[0]
222
- users.add(user)
223
- return sorted(list(users))
224
-
225
- async def has_joined_before(client_id, chat_content):
226
- username = st.session_state.get('username', 'System 🌟')
227
- await asyncio.to_thread(log_action, username, "πŸšͺπŸ” - Join checker - been here before?")
228
- return any(f"Client-{client_id}" in line for line in chat_content.split('\n'))
229
-
230
- async def get_message_suggestions(chat_content, prefix):
231
- username = st.session_state.get('username', 'System 🌟')
232
- await asyncio.to_thread(log_action, username, "πŸ’‘πŸ“ - Suggestion maker - old quips resurface!")
233
- lines = chat_content.split('\n')
234
- messages = [line.split(': ', 1)[1] for line in lines if ': ' in line and line.strip()]
235
- return [msg for msg in messages if msg.lower().startswith(prefix.lower())][:5]
236
-
237
- async def save_vote(file, item, user_hash, username, comment=""):
238
- await asyncio.to_thread(log_action, username, "πŸ‘πŸ“Š - Vote saver - cheers recorded!")
239
- central = pytz.timezone('US/Central')
240
- timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
241
- entry = f"[{timestamp}] {user_hash} voted for {item}"
242
- await asyncio.to_thread(lambda: open(file, 'a').write(f"{entry}\n"))
243
- await asyncio.to_thread(lambda: open(HISTORY_FILE, "a").write(f"- {timestamp} - User {user_hash} voted for {item}\n"))
244
- chat_message = f"{username} upvoted: \"{item}\""
245
- if comment:
246
- chat_message += f" - {comment}"
247
- await save_chat_entry(username, chat_message)
248
-
249
- async def load_votes(file):
250
- username = st.session_state.get('username', 'System 🌟')
251
- await asyncio.to_thread(log_action, username, "πŸ†πŸ“ˆ - Vote counter - tallying the love!")
252
- if not os.path.exists(file):
253
- await asyncio.to_thread(lambda: open(file, 'w').write("# Vote Tally\n\nNo votes yet - get clicking! πŸ–±οΈ\n"))
254
- with open(file, 'r') as f:
255
- content = await asyncio.to_thread(f.read)
256
- lines = content.strip().split('\n')[2:]
257
- votes = {}
258
- user_votes = set()
259
- for line in lines:
260
- if line.strip() and 'voted for' in line:
261
- user_hash = line.split('] ')[1].split(' voted for ')[0]
262
- item = line.split('voted for ')[1]
263
- vote_key = f"{user_hash}-{item}"
264
- if vote_key not in user_votes:
265
- votes[item] = votes.get(item, 0) + 1
266
- user_votes.add(vote_key)
267
- return votes
268
-
269
- async def generate_user_hash():
270
- username = st.session_state.get('username', 'System 🌟')
271
- await asyncio.to_thread(log_action, username, "πŸ”‘πŸ•΅οΈ - Hash generator - secret codes ahoy!")
272
- if 'user_hash' not in st.session_state:
273
- st.session_state.user_hash = hashlib.md5(str(random.getrandbits(128)).encode()).hexdigest()[:8]
274
- return st.session_state.user_hash
275
 
276
  async def async_edge_tts_generate(text, voice, rate=0, pitch=0, file_format="mp3"):
277
- username = st.session_state.get('username', 'System 🌟')
278
- await asyncio.to_thread(log_action, username, "🎢🌟 - Audio maker - voices come alive!")
279
- timestamp = format_timestamp_prefix(username)
280
- filename = f"{timestamp}.{file_format}"
281
- filepath = os.path.join(AUDIO_DIR, filename)
282
- communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
283
  try:
 
 
 
 
284
  await communicate.save(filepath)
285
  return filepath if os.path.exists(filepath) else None
286
- except edge_tts.exceptions.NoAudioReceived:
287
- with open(HISTORY_FILE, 'a') as f:
288
- central = pytz.timezone('US/Central')
289
- f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: Audio failed - No audio received for '{text}'\n")
290
  return None
291
 
292
  def play_and_download_audio(file_path):
@@ -297,176 +223,58 @@ def play_and_download_audio(file_path):
297
  b64 = base64.b64encode(f.read()).decode()
298
  st.session_state.base64_cache[file_path] = b64
299
  b64 = st.session_state.base64_cache[file_path]
300
- dl_link = f'<a href="data:audio/mpeg;base64,{b64}" download="{os.path.basename(file_path)}">🎡 Download {os.path.basename(file_path)}</a>'
301
- st.markdown(dl_link, unsafe_allow_html=True)
302
 
303
- async def save_pasted_image(image, username):
304
- await asyncio.to_thread(log_action, username, "πŸ“ΈπŸ’Ύ - Image saver - pics preserved!")
305
- img_hash = compute_image_hash(image)
306
- if img_hash in st.session_state.image_hashes:
307
- return None
308
- timestamp = format_timestamp_prefix(username)
309
- filename = f"{timestamp}-{img_hash}.png"
310
- filepath = os.path.join(MEDIA_DIR, filename)
311
- await asyncio.to_thread(image.save, filepath, "PNG")
312
- st.session_state.image_hashes.add(img_hash)
313
- return filepath
314
-
315
- async def get_video_html(video_path, width="100%"):
316
- username = st.session_state.get('username', 'System 🌟')
317
- await asyncio.to_thread(log_action, username, "πŸŽ₯🎬 - Video renderer - movies roll!")
318
- with open(video_path, 'rb') as f:
319
- video_data = await asyncio.to_thread(f.read)
320
- video_url = f"data:video/mp4;base64,{base64.b64encode(video_data).decode()}"
321
- return f'<video width="{width}" controls autoplay><source src="{video_url}" type="video/mp4">Your browser does not support the video tag.</video>'
322
-
323
- async def get_audio_html(audio_path, width="100%"):
324
- username = st.session_state.get('username', 'System 🌟')
325
- await asyncio.to_thread(log_action, username, "🎢✈️ - Audio renderer - sounds soar!")
326
- audio_url = f"data:audio/mpeg;base64,{base64.b64encode(await asyncio.to_thread(open, audio_path, 'rb').read()).decode()}"
327
- return f'<audio controls style="width: {width};"><source src="{audio_url}" type="audio/mpeg">Your browser does not support the audio element.</audio>'
328
 
329
  async def websocket_handler(websocket, path):
330
- username = st.session_state.get('username', 'System 🌟')
331
- await asyncio.to_thread(log_action, username, "πŸŒπŸ”— - Websocket handler - chat links up!")
332
  try:
333
  client_id = str(uuid.uuid4())
334
  room_id = "chat"
335
  st.session_state.active_connections.setdefault(room_id, {})[client_id] = websocket
336
  chat_content = await load_chat()
337
  username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys())))
338
- if not await has_joined_before(client_id, chat_content):
339
  await save_chat_entry(f"Client-{client_id}", f"{username} has joined {START_ROOM}!")
340
  async for message in websocket:
341
  parts = message.split('|', 1)
342
  if len(parts) == 2:
343
  username, content = parts
344
  await save_chat_entry(username, content)
345
- except websockets.ConnectionClosed:
346
- pass
347
  finally:
348
  if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]:
349
  del st.session_state.active_connections[room_id][client_id]
350
 
351
- async def broadcast_message(message, room_id):
352
- username = st.session_state.get('username', 'System 🌟')
353
- await asyncio.to_thread(log_action, username, "πŸ“’βœˆοΈ - Message broadcaster - words fly far!")
354
- if room_id in st.session_state.active_connections:
355
- disconnected = []
356
- for client_id, ws in st.session_state.active_connections[room_id].items():
357
- try:
358
- await ws.send(message)
359
- except websockets.ConnectionClosed:
360
- disconnected.append(client_id)
361
- for client_id in disconnected:
362
- del st.session_state.active_connections[room_id][client_id]
363
-
364
  async def run_websocket_server():
365
- username = st.session_state.get('username', 'System 🌟')
366
- await asyncio.to_thread(log_action, username, "πŸ–₯οΈπŸŒ€ - Server starter - web spins up!")
367
- if not st.session_state.server_running:
368
- server = await websockets.serve(websocket_handler, '0.0.0.0', 8765)
369
- st.session_state.server_running = True
370
- await server.wait_closed()
371
-
372
- async def save_audio_recording(audio_bytes, username, voice):
373
- timestamp = format_timestamp_prefix(username)
374
- voice_id = voice.split('-')[-1].lower()
375
- filename = f"rec_{username}_{voice_id}_{timestamp}.mp3"
376
- filepath = os.path.join(AUDIO_DIR, filename)
377
-
378
- with open(filepath, 'wb') as f:
379
- f.write(audio_bytes)
380
-
381
- cleaned_text = "Audio recording"
382
- audio_file = await async_edge_tts_generate(cleaned_text, voice, file_format="mp3")
383
- if audio_file:
384
- os.rename(audio_file, filepath)
385
-
386
- return filepath
387
-
388
- async def process_voice_input(audio_bytes, username, voice):
389
- await asyncio.to_thread(log_action, username, "πŸŽ€πŸ“ - Voice processor - speech to text!")
390
- if audio_bytes:
391
- audio_file = await save_audio_recording(audio_bytes, username, voice)
392
- text = f"Voice recording saved: {os.path.basename(audio_file)}"
393
- await save_chat_entry(username, text)
394
- return audio_file
395
- return None
396
-
397
- async def perform_ai_lookup(query, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False, useArxiv=True, useArxivAudio=False):
398
- username = st.session_state.get('username', 'System 🌟')
399
- result = f"AI Lookup Result for '{query}' (Arxiv: {useArxiv}, Audio: {useArxivAudio})"
400
- await save_chat_entry(username, result)
401
- if useArxivAudio:
402
- audio_file = await async_edge_tts_generate(result, FUN_USERNAMES.get(username, "en-US-AriaNeural"))
403
- if audio_file:
404
- st.audio(audio_file)
405
-
406
- def delete_user_files():
407
- protected_files = {'app.py', 'requirements.txt', 'README.md'}
408
- deleted_files = []
409
- directories = [MEDIA_DIR, AUDIO_DIR, CHAT_DIR, VOTE_DIR, HISTORY_DIR]
410
-
411
- for directory in directories:
412
- if os.path.exists(directory):
413
- for root, _, files in os.walk(directory):
414
- for file in files:
415
- file_path = os.path.join(root, file)
416
- if os.path.basename(file_path) not in protected_files:
417
- try:
418
- os.remove(file_path)
419
- deleted_files.append(file_path)
420
- except Exception as e:
421
- st.error(f"Failed to delete {file_path}: {e}")
422
- try:
423
- shutil.rmtree(directory, ignore_errors=True)
424
- os.makedirs(directory, exist_ok=True)
425
- except Exception as e:
426
- st.error(f"Failed to remove directory {directory}: {e}")
427
-
428
- st.session_state.image_hashes.clear()
429
- st.session_state.audio_cache.clear()
430
- st.session_state.base64_cache.clear()
431
- st.session_state.displayed_chat_lines.clear()
432
-
433
- return deleted_files
434
 
435
  ASR_HTML = """
436
  <html>
437
  <head>
438
  <title>Continuous Speech Demo</title>
439
  <style>
440
- body {
441
- font-family: sans-serif;
442
- padding: 20px;
443
- max-width: 800px;
444
- margin: 0 auto;
445
- }
446
- button {
447
- padding: 10px 20px;
448
- margin: 10px 5px;
449
- font-size: 16px;
450
- }
451
- #status {
452
- margin: 10px 0;
453
- padding: 10px;
454
- background: #e8f5e9;
455
- border-radius: 4px;
456
- }
457
- #output {
458
- white-space: pre-wrap;
459
- padding: 15px;
460
- background: #f5f5f5;
461
- border-radius: 4px;
462
- margin: 10px 0;
463
- min-height: 100px;
464
- max-height: 400px;
465
- overflow-y: auto;
466
- }
467
- .controls {
468
- margin: 10px 0;
469
- }
470
  </style>
471
  </head>
472
  <body>
@@ -506,10 +314,6 @@ ASR_HTML = """
506
  }
507
  };
508
 
509
- window.addEventListener('load', () => {
510
- setTimeout(startRecognition, 1000);
511
- });
512
-
513
  startButton.onclick = startRecognition;
514
 
515
  stopButton.onclick = () => {
@@ -518,11 +322,7 @@ ASR_HTML = """
518
  startButton.disabled = false;
519
  stopButton.disabled = true;
520
  if (fullTranscript) {
521
- sendDataToPython({
522
- value: fullTranscript,
523
- dataType: "json",
524
- stopped: true
525
- });
526
  }
527
  };
528
 
@@ -552,11 +352,7 @@ ASR_HTML = """
552
  lastUpdateTime = Date.now();
553
  output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
554
  output.scrollTop = output.scrollHeight;
555
- sendDataToPython({
556
- value: fullTranscript,
557
- dataType: "json",
558
- stopped: false
559
- });
560
  }
561
  };
562
 
@@ -564,7 +360,6 @@ ASR_HTML = """
564
  if (!stopButton.disabled) {
565
  try {
566
  recognition.start();
567
- console.log('Restarted recognition');
568
  } catch (e) {
569
  console.error('Failed to restart recognition:', e);
570
  status.textContent = 'Error restarting: ' + e.message;
@@ -577,10 +372,8 @@ ASR_HTML = """
577
  recognition.onerror = (event) => {
578
  console.error('Recognition error:', event.error);
579
  status.textContent = 'Error: ' + event.error;
580
- if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
581
- startButton.disabled = false;
582
- stopButton.disabled = true;
583
- }
584
  };
585
  }
586
 
@@ -608,177 +401,69 @@ ASR_HTML = """
608
 
609
  def main():
610
  NODE_NAME, port = get_node_name()
611
- loop = asyncio.new_event_loop()
612
- asyncio.set_event_loop(loop)
613
-
614
- async def async_interface():
615
- if 'username' not in st.session_state:
616
- chat_content = await load_chat()
617
- available_names = [name for name in FUN_USERNAMES if not any(f"{name} has joined" in line for line in chat_content.split('\n'))]
618
- st.session_state.username = random.choice(available_names) if available_names else random.choice(list(FUN_USERNAMES.keys()))
619
- st.session_state.voice = FUN_USERNAMES[st.session_state.username]
620
- st.markdown(f"**πŸŽ™οΈ Voice Selected**: {st.session_state.voice} πŸ—£οΈ for {st.session_state.username}")
621
-
622
- st.title(f"πŸ€–πŸ§ MMO {st.session_state.username}πŸ“πŸ”¬")
623
- st.markdown(f"Welcome to {START_ROOM} - chat, vote, upload, paste images, and enjoy quoting! πŸŽ‰")
624
-
625
- if not st.session_state.server_task:
626
- st.session_state.server_task = loop.create_task(run_websocket_server())
627
-
628
- audio_bytes = audio_recorder()
629
- if audio_bytes:
630
- audio_file = await process_voice_input(audio_bytes, st.session_state.username, st.session_state.voice)
631
- if audio_file:
632
- st.audio(audio_file)
633
- st.rerun()
634
 
635
- st.subheader("🎀 Continuous Speech Input")
636
- asr_component = components.html(ASR_HTML, height=400)
637
- if asr_component and isinstance(asr_component, dict) and 'value' in asr_component:
638
- transcript = asr_component['value'].strip()
639
- stopped = asr_component.get('stopped', False)
640
- if transcript and transcript != st.session_state.last_transcript:
641
- st.session_state.transcript_history.append(transcript)
642
- st.session_state.last_transcript = transcript
643
- if stopped:
644
- audio_file = await save_audio_recording(audio_bytes, st.session_state.username, st.session_state.voice)
645
- await save_chat_entry(st.session_state.username, f"Voice message: {transcript}\nAudio file: {os.path.basename(audio_file)}", is_markdown=True)
646
- st.rerun()
647
-
648
- st.subheader("🎡 Recorded Audio Files")
649
- audio_files = glob.glob(f"{AUDIO_DIR}/rec_{st.session_state.username}_*.mp3")
650
- if audio_files:
651
- st.write(f"Found {len(audio_files)} recordings for {st.session_state.username}")
652
- for audio_file in sorted(audio_files, key=os.path.getmtime, reverse=True):
653
- col1, col2 = st.columns([3, 1])
654
- with col1:
655
- st.audio(audio_file)
656
- st.write(f"File: {os.path.basename(audio_file)}")
657
- with col2:
658
- if audio_file not in st.session_state.base64_cache:
659
- with open(audio_file, "rb") as f:
660
- b64 = base64.b64encode(f.read()).decode()
661
- st.session_state.base64_cache[audio_file] = b64
662
- b64 = st.session_state.base64_cache[audio_file]
663
- dl_link = f'<a href="data:audio/mpeg;base64,{b64}" download="{os.path.basename(audio_file)}">🎡 Download</a>'
664
- st.markdown(dl_link, unsafe_allow_html=True)
665
-
666
- st.subheader(f"{START_ROOM} Chat πŸ’¬")
667
- chat_content = await load_chat()
668
- chat_lines = chat_content.split('\n')
669
- chat_votes = await load_votes(QUOTE_VOTES_FILE)
670
 
671
- current_time = time.time()
672
- if current_time - st.session_state.last_chat_update > 1 or not st.session_state.displayed_chat_lines:
673
- new_lines = [line for line in chat_lines if line.strip() and ': ' in line and line not in st.session_state.displayed_chat_lines and not line.startswith('#')]
674
- st.session_state.displayed_chat_lines.extend(new_lines)
675
- st.session_state.last_chat_update = current_time
676
 
677
- for i, line in enumerate(st.session_state.displayed_chat_lines):
678
- col1, col2, col3, col4 = st.columns([3, 1, 1, 2])
679
- with col1:
680
- if "```markdown" in line:
681
- markdown_content = re.search(r'```markdown\n(.*?)```', line, re.DOTALL)
682
- if markdown_content:
683
- st.markdown(markdown_content.group(1))
684
- else:
685
- st.markdown(line)
686
- else:
687
- st.markdown(line)
688
- if "Pasted image:" in line or "Uploaded media:" in line:
689
- file_path = line.split(': ')[-1].strip()
690
- if os.path.exists(file_path):
691
- if file_path not in st.session_state.base64_cache:
692
- with open(file_path, "rb") as f:
693
- b64 = base64.b64encode(f.read()).decode()
694
- st.session_state.base64_cache[file_path] = b64
695
- b64 = st.session_state.base64_cache[file_path]
696
- mime_type = "image/png" if file_path.endswith(('.png', '.jpg')) else "video/mp4" if file_path.endswith('.mp4') else "audio/mpeg"
697
- if file_path.endswith(('.png', '.jpg')):
698
- st.image(file_path, use_container_width=True)
699
- dl_link = f'<a href="data:{mime_type};base64,{b64}" download="{os.path.basename(file_path)}">πŸ“₯ Download {os.path.basename(file_path)}</a>'
700
- st.markdown(dl_link, unsafe_allow_html=True)
701
- elif file_path.endswith('.mp4'):
702
- st.markdown(await get_video_html(file_path), unsafe_allow_html=True)
703
- dl_link = f'<a href="data:{mime_type};base64,{b64}" download="{os.path.basename(file_path)}">πŸ“₯ Download {os.path.basename(file_path)}</a>'
704
- st.markdown(dl_link, unsafe_allow_html=True)
705
- elif file_path.endswith('.mp3'):
706
- st.markdown(await get_audio_html(file_path), unsafe_allow_html=True)
707
- dl_link = f'<a href="data:{mime_type};base64,{b64}" download="{os.path.basename(file_path)}">πŸ“₯ Download {os.path.basename(file_path)}</a>'
708
- st.markdown(dl_link, unsafe_allow_html=True)
709
- with col2:
710
- vote_count = chat_votes.get(line.split('. ')[1] if '. ' in line else line, 0)
711
- if st.button(f"πŸ‘ {vote_count}", key=f"chat_vote_{i}"):
712
- comment = st.session_state.message_text
713
- await save_vote(QUOTE_VOTES_FILE, line.split('. ')[1] if '. ' in line else line, await generate_user_hash(), st.session_state.username, comment)
714
- st.session_state.message_text = ''
715
- st.rerun()
716
- with col3:
717
- if st.button("πŸ“’ Quote", key=f"quote_{i}"):
718
- st.session_state.quote_line = line
719
- st.rerun()
720
- with col4:
721
- username = line.split(': ')[1].split(' ')[0]
722
- cache_key = f"{line}_{FUN_USERNAMES.get(username, 'en-US-AriaNeural')}"
723
- if cache_key not in st.session_state.audio_cache:
724
- cleaned_text = clean_text_for_tts(line.split(': ', 1)[1])
725
- audio_file = await async_edge_tts_generate(cleaned_text, FUN_USERNAMES.get(username, "en-US-AriaNeural"))
726
- st.session_state.audio_cache[cache_key] = audio_file
727
- audio_file = st.session_state.audio_cache.get(cache_key)
728
- if audio_file:
729
- play_and_download_audio(audio_file)
730
-
731
- if st.session_state.quote_line:
732
- st.markdown(f"### Quoting: {st.session_state.quote_line}")
733
- quote_response = st.text_area("Add your response", key="quote_response", value=st.session_state.message_text)
734
- paste_result_quote = paste_image_button("πŸ“‹ Paste Image or Text with Quote", key="paste_button_quote")
735
- if paste_result_quote.image_data is not None:
736
- if isinstance(paste_result_quote.image_data, str):
737
- st.session_state.message_text = paste_result_quote.image_data
738
- st.text_area("Add your response", key="quote_response", value=st.session_state.message_text)
739
- else:
740
- st.image(paste_result_quote.image_data, caption="Received Image for Quote")
741
- filename = await save_pasted_image(paste_result_quote.image_data, st.session_state.username)
742
- if filename:
743
- st.session_state.pasted_image_data = filename
744
- if st.button("Send Quote πŸš€", key="send_quote"):
745
- markdown_response = f"### Quote Response\n- **Original**: {st.session_state.quote_line}\n- **{st.session_state.username} Replies**: {quote_response}"
746
- if st.session_state.pasted_image_data:
747
- markdown_response += f"\n- **Image**: ![Pasted Image]({st.session_state.pasted_image_data})"
748
- await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")
749
- st.session_state.pasted_image_data = None
750
- await save_chat_entry(st.session_state.username, markdown_response, is_markdown=True)
751
- st.session_state.quote_line = None
752
- st.session_state.message_text = ''
753
- st.rerun()
754
 
755
- current_selection = st.session_state.username if st.session_state.username in FUN_USERNAMES else ""
756
- new_username = st.selectbox("Change Name and Voice", [""] + list(FUN_USERNAMES.keys()), index=(list(FUN_USERNAMES.keys()).index(current_selection) + 1 if current_selection else 0), format_func=lambda x: f"{x} ({FUN_USERNAMES.get(x, 'No Voice')})" if x else "Select a name")
757
- if new_username and new_username != st.session_state.username:
758
- await save_chat_entry("System 🌟", f"{st.session_state.username} changed name to {new_username}")
759
- st.session_state.username = new_username
760
- st.session_state.voice = FUN_USERNAMES[new_username]
761
- st.markdown(f"**πŸŽ™οΈ Voice Changed**: {st.session_state.voice} πŸ—£οΈ for {st.session_state.username}")
762
  st.rerun()
763
 
764
- message = st.text_input(f"Message as {st.session_state.username} (Voice: {st.session_state.voice})", key="message_input", value=st.session_state.message_text)
765
- paste_result_msg = paste_image_button("πŸ“‹ Paste Image or Text with Message", key="paste_button_msg")
766
- if paste_result_msg.image_data is not None:
767
- if isinstance(paste_result_msg.image_data, str):
768
- st.session_state.message_text = paste_result_msg.image_data
769
- st.text_input(f"Message as {st.session_state.username} (Voice: {st.session_state.voice})", key="message_input_paste", value=st.session_state.message_text)
770
- else:
771
- st.image(paste_result_msg.image_data, caption="Received Image for Message")
772
- filename = await save_pasted_image(paste_result_msg.image_data, st.session_state.username)
773
- if filename:
774
- st.session_state.pasted_image_data = filename
775
- if st.button("Send πŸš€", key="send_button") and (message.strip() or st.session_state.pasted_image_data):
776
- if message.strip():
777
- audio_file = await save_chat_entry(st.session_state.username, message, is_markdown=True)
778
- if audio_file:
779
- st.session_state.audio_cache[f"{message}_{FUN_USERNAMES[st.session_state.username]}"] = audio_file
780
- if st.session_state.pasted_image_data:
781
- await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")
782
- st.session_state.pasted_image_data = None
783
- st.session_state.message_text = ''
784
- st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  from streamlit_paste_button import paste_image_button
21
  import pytz
22
  import shutil
23
+ import logging
24
+
25
+ # Set up basic logging
26
+ logging.basicConfig(level=logging.DEBUG, filename="app.log", filemode="a",
27
+ format="%(asctime)s - %(levelname)s - %(message)s")
28
 
29
  # Patch for nested async
30
  nest_asyncio.apply()
 
72
  AUDIO_DIR = "audio_logs"
73
  HISTORY_DIR = "history_logs"
74
  MEDIA_DIR = "media_files"
75
+ for dir in [CHAT_DIR, VOTE_DIR, AUDIO_DIR, HISTORY_DIR, MEDIA_DIR]:
76
+ os.makedirs(dir, exist_ok=True)
 
 
 
77
 
78
  CHAT_FILE = os.path.join(CHAT_DIR, "global_chat.md")
79
  QUOTE_VOTES_FILE = os.path.join(VOTE_DIR, "quote_votes.md")
 
83
  # Unicode digits
84
  UNICODE_DIGITS = {i: f"{i}\uFE0F⃣" for i in range(10)}
85
 
86
+ # Font collection (simplified for brevity)
87
  UNICODE_FONTS = [
88
  ("Normal", lambda x: x),
89
  ("Bold", lambda x: "".join(chr(ord(c) + 0x1D400 - 0x41) if 'A' <= c <= 'Z' else chr(ord(c) + 0x1D41A - 0x61) if 'a' <= c <= 'z' else c for c in x)),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  ]
91
 
92
  # Global state
 
102
  st.session_state.last_chat_update = 0
103
  if 'displayed_chat_lines' not in st.session_state:
104
  st.session_state.displayed_chat_lines = []
 
 
 
 
105
  if 'message_text' not in st.session_state:
106
  st.session_state.message_text = ""
107
  if 'audio_cache' not in st.session_state:
 
141
  parser.add_argument('--node-name', type=str, default=None)
142
  parser.add_argument('--port', type=int, default=8501)
143
  args = parser.parse_args()
 
 
144
  return args.node_name or f"node-{uuid.uuid4().hex[:8]}", args.port
145
 
146
  def log_action(username, action):
147
+ logging.debug(f"{username}: {action}")
148
+ with open(HISTORY_FILE, 'a') as f:
 
 
 
 
 
149
  central = pytz.timezone('US/Central')
150
+ f.write(f"[{datetime.now(central).strftime('%Y-%m-%d %H:%M:%S')}] {username}: {action}\n")
 
 
151
 
152
  def clean_text_for_tts(text):
153
  cleaned = re.sub(r'[#*!\[\]]+', '', text)
 
155
  return cleaned[:200] if cleaned else "No text to speak"
156
 
157
  async def save_chat_entry(username, message, is_markdown=False):
158
+ try:
159
+ log_action(username, "Saving chat entry")
160
+ central = pytz.timezone('US/Central')
161
+ timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
162
+ entry = f"[{timestamp}] {username}:\n```markdown\n{message}\n```" if is_markdown else f"[{timestamp}] {username}: {message}"
163
+ with open(CHAT_FILE, 'a') as f:
164
+ f.write(f"{entry}\n")
165
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
166
+ cleaned_message = clean_text_for_tts(message)
167
+ audio_file = await async_edge_tts_generate(cleaned_message, voice)
168
+ if audio_file:
169
+ with open(HISTORY_FILE, 'a') as f:
170
+ f.write(f"[{timestamp}] {username}: Audio generated - {audio_file}\n")
171
+ await broadcast_message(f"{username}|{message}", "chat")
172
+ st.session_state.last_chat_update = time.time()
173
+ return audio_file
174
+ except Exception as e:
175
+ logging.error(f"Error in save_chat_entry: {str(e)}")
176
+ return None
177
 
178
  async def load_chat():
179
+ try:
180
+ if not os.path.exists(CHAT_FILE):
181
+ with open(CHAT_FILE, 'a') as f:
182
+ f.write(f"# {START_ROOM} Chat\n\nWelcome to the cosmic hub - start chatting! 🎀\n")
183
+ with open(CHAT_FILE, 'r') as f:
184
+ return f.read()
185
+ except Exception as e:
186
+ logging.error(f"Error in load_chat: {str(e)}")
187
+ return ""
188
+
189
+ async def save_audio_recording(transcript, username, voice):
190
+ try:
191
+ timestamp = format_timestamp_prefix(username)
192
+ voice_id = voice.split('-')[-1].lower()
193
+ filename = f"rec_{username}_{voice_id}_{timestamp}.mp3"
194
+ filepath = os.path.join(AUDIO_DIR, filename)
195
+
196
+ # Use edge_tts to generate audio from transcript since we don't have raw audio
197
+ audio_file = await async_edge_tts_generate(transcript or "Audio recording", voice, file_format="mp3")
198
+ if audio_file and os.path.exists(audio_file):
199
+ os.rename(audio_file, filepath)
200
+ return filepath
201
+ return None
202
+ except Exception as e:
203
+ logging.error(f"Error in save_audio_recording: {str(e)}")
204
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
  async def async_edge_tts_generate(text, voice, rate=0, pitch=0, file_format="mp3"):
 
 
 
 
 
 
207
  try:
208
+ timestamp = format_timestamp_prefix(st.session_state.get('username', 'System 🌟'))
209
+ filename = f"{timestamp}.{file_format}"
210
+ filepath = os.path.join(AUDIO_DIR, filename)
211
+ communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
212
  await communicate.save(filepath)
213
  return filepath if os.path.exists(filepath) else None
214
+ except Exception as e:
215
+ logging.error(f"Error in async_edge_tts_generate: {str(e)}")
 
 
216
  return None
217
 
218
  def play_and_download_audio(file_path):
 
223
  b64 = base64.b64encode(f.read()).decode()
224
  st.session_state.base64_cache[file_path] = b64
225
  b64 = st.session_state.base64_cache[file_path]
226
+ st.markdown(f'<a href="data:audio/mpeg;base64,{b64}" download="{os.path.basename(file_path)}">🎡 Download {os.path.basename(file_path)}</a>', unsafe_allow_html=True)
 
227
 
228
+ async def broadcast_message(message, room_id):
229
+ if room_id in st.session_state.active_connections:
230
+ disconnected = []
231
+ for client_id, ws in st.session_state.active_connections[room_id].items():
232
+ try:
233
+ await ws.send(message)
234
+ except websockets.ConnectionClosed:
235
+ disconnected.append(client_id)
236
+ for client_id in disconnected:
237
+ del st.session_state.active_connections[room_id][client_id]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
  async def websocket_handler(websocket, path):
 
 
240
  try:
241
  client_id = str(uuid.uuid4())
242
  room_id = "chat"
243
  st.session_state.active_connections.setdefault(room_id, {})[client_id] = websocket
244
  chat_content = await load_chat()
245
  username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys())))
246
+ if not any(f"Client-{client_id}" in line for line in chat_content.split('\n')):
247
  await save_chat_entry(f"Client-{client_id}", f"{username} has joined {START_ROOM}!")
248
  async for message in websocket:
249
  parts = message.split('|', 1)
250
  if len(parts) == 2:
251
  username, content = parts
252
  await save_chat_entry(username, content)
253
+ except Exception as e:
254
+ logging.error(f"Error in websocket_handler: {str(e)}")
255
  finally:
256
  if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]:
257
  del st.session_state.active_connections[room_id][client_id]
258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
  async def run_websocket_server():
260
+ try:
261
+ if not st.session_state.server_running:
262
+ server = await websockets.serve(websocket_handler, '0.0.0.0', 8765)
263
+ st.session_state.server_running = True
264
+ await server.wait_closed()
265
+ except Exception as e:
266
+ logging.error(f"Error in run_websocket_server: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
 
268
  ASR_HTML = """
269
  <html>
270
  <head>
271
  <title>Continuous Speech Demo</title>
272
  <style>
273
+ body { font-family: sans-serif; padding: 20px; max-width: 800px; margin: 0 auto; }
274
+ button { padding: 10px 20px; margin: 10px 5px; font-size: 16px; }
275
+ #status { margin: 10px 0; padding: 10px; background: #e8f5e9; border-radius: 4px; }
276
+ #output { white-space: pre-wrap; padding: 15px; background: #f5f5f5; border-radius: 4px; margin: 10px 0; min-height: 100px; max-height: 400px; overflow-y: auto; }
277
+ .controls { margin: 10px 0; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
  </style>
279
  </head>
280
  <body>
 
314
  }
315
  };
316
 
 
 
 
 
317
  startButton.onclick = startRecognition;
318
 
319
  stopButton.onclick = () => {
 
322
  startButton.disabled = false;
323
  stopButton.disabled = true;
324
  if (fullTranscript) {
325
+ sendDataToPython({value: fullTranscript, dataType: "json", stopped: true});
 
 
 
 
326
  }
327
  };
328
 
 
352
  lastUpdateTime = Date.now();
353
  output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
354
  output.scrollTop = output.scrollHeight;
355
+ sendDataToPython({value: fullTranscript, dataType: "json", stopped: false});
 
 
 
 
356
  }
357
  };
358
 
 
360
  if (!stopButton.disabled) {
361
  try {
362
  recognition.start();
 
363
  } catch (e) {
364
  console.error('Failed to restart recognition:', e);
365
  status.textContent = 'Error restarting: ' + e.message;
 
372
  recognition.onerror = (event) => {
373
  console.error('Recognition error:', event.error);
374
  status.textContent = 'Error: ' + event.error;
375
+ startButton.disabled = false;
376
+ stopButton.disabled = true;
 
 
377
  };
378
  }
379
 
 
401
 
402
  def main():
403
  NODE_NAME, port = get_node_name()
404
+ logging.info(f"Starting app with node name: {NODE_NAME}, port: {port}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
 
406
+ if 'username' not in st.session_state:
407
+ chat_content = asyncio.run(load_chat())
408
+ available_names = [name for name in FUN_USERNAMES if not any(f"{name} has joined" in line for line in chat_content.split('\n'))]
409
+ st.session_state.username = random.choice(available_names) if available_names else random.choice(list(FUN_USERNAMES.keys()))
410
+ st.session_state.voice = FUN_USERNAMES[st.session_state.username]
411
+ st.markdown(f"**πŸŽ™οΈ Voice Selected**: {st.session_state.voice} πŸ—£οΈ for {st.session_state.username}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412
 
413
+ st.title(f"πŸ€–πŸ§ MMO {st.session_state.username}πŸ“πŸ”¬")
414
+ st.markdown(f"Welcome to {START_ROOM} - chat, vote, upload, paste images, and enjoy quoting! πŸŽ‰")
 
 
 
415
 
416
+ if not st.session_state.server_task:
417
+ st.session_state.server_task = asyncio.create_task(run_websocket_server())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418
 
419
+ audio_bytes = audio_recorder()
420
+ if audio_bytes:
421
+ audio_file = asyncio.run(process_voice_input(audio_bytes, st.session_state.username, st.session_state.voice))
422
+ if audio_file:
423
+ st.audio(audio_file)
 
 
424
  st.rerun()
425
 
426
+ st.subheader("🎀 Continuous Speech Input")
427
+ asr_component = components.html(ASR_HTML, height=400)
428
+ if asr_component and isinstance(asr_component, dict) and 'value' in asr_component:
429
+ transcript = asr_component['value'].strip()
430
+ stopped = asr_component.get('stopped', False)
431
+ if transcript and transcript != st.session_state.last_transcript:
432
+ st.session_state.transcript_history.append(transcript)
433
+ st.session_state.last_transcript = transcript
434
+ if stopped:
435
+ audio_file = asyncio.run(save_audio_recording(transcript, st.session_state.username, st.session_state.voice))
436
+ asyncio.run(save_chat_entry(st.session_state.username, f"Voice message: {transcript}\nAudio file: {os.path.basename(audio_file)}", is_markdown=True))
437
+ st.rerun()
438
+
439
+ st.subheader("🎡 Recorded Audio Files")
440
+ audio_files = glob.glob(f"{AUDIO_DIR}/rec_{st.session_state.username}_*.mp3")
441
+ if audio_files:
442
+ st.write(f"Found {len(audio_files)} recordings for {st.session_state.username}")
443
+ for audio_file in sorted(audio_files, key=os.path.getmtime, reverse=True):
444
+ col1, col2 = st.columns([3, 1])
445
+ with col1:
446
+ st.audio(audio_file)
447
+ st.write(f"File: {os.path.basename(audio_file)}")
448
+ with col2:
449
+ play_and_download_audio(audio_file)
450
+
451
+ st.subheader(f"{START_ROOM} Chat πŸ’¬")
452
+ chat_content = asyncio.run(load_chat())
453
+ chat_lines = chat_content.split('\n')
454
+ for i, line in enumerate(chat_lines):
455
+ if line.strip() and ': ' in line and not line.startswith('#'):
456
+ st.markdown(line)
457
+
458
+ message = st.text_input(f"Message as {st.session_state.username} (Voice: {st.session_state.voice})", key="message_input", value=st.session_state.message_text)
459
+ if st.button("Send πŸš€", key="send_button") and message.strip():
460
+ asyncio.run(save_chat_entry(st.session_state.username, message, is_markdown=True))
461
+ st.session_state.message_text = ''
462
+ st.rerun()
463
+
464
+ if __name__ == "__main__":
465
+ try:
466
+ main()
467
+ except Exception as e:
468
+ logging.error(f"Main execution failed: {str(e)}")
469
+ st.error(f"An error occurred: {str(e)}")