awacke1 commited on
Commit
90747a3
Β·
verified Β·
1 Parent(s): 4f5107e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +247 -35
app.py CHANGED
@@ -10,14 +10,13 @@ import time
10
  import hashlib
11
  from PIL import Image
12
  import glob
13
- from urllib.parse import quote
14
  import base64
15
  import io
16
  import streamlit.components.v1 as components
17
  import edge_tts
18
  from audio_recorder_streamlit import audio_recorder
19
  import nest_asyncio
20
- import re # Added back the re import for regular expressions
21
  from streamlit_paste_button import paste_image_button
22
 
23
  # Patch for nested async - sneaky fix! 🐍✨
@@ -129,15 +128,32 @@ if 'pasted_image_data' not in st.session_state:
129
  if 'quote_line' not in st.session_state:
130
  st.session_state.quote_line = None
131
  if 'refresh_rate' not in st.session_state:
132
- st.session_state.refresh_rate = 5 # Default refresh rate
133
  if 'base64_cache' not in st.session_state:
134
- st.session_state.base64_cache = {} # Cache for base64 strings
 
 
 
 
 
 
135
 
136
  # Timestamp wizardry - clock ticks with flair! ⏰🎩
137
  def format_timestamp_prefix(username):
138
  now = datetime.now()
139
  return f"{now.strftime('%I-%M-%p-ct-%m-%d-%Y')}-by-{username}"
140
 
 
 
 
 
 
 
 
 
 
 
 
141
  # Node naming - christening the beast! 🌐🍼
142
  def get_node_name():
143
  parser = argparse.ArgumentParser(description='Start a chat node with a specific name')
@@ -168,10 +184,13 @@ def clean_text_for_tts(text):
168
  return cleaned[:200] if cleaned else "No text to speak"
169
 
170
  # Chat saver - words locked tight! πŸ’¬πŸ”’
171
- async def save_chat_entry(username, message):
172
  await asyncio.to_thread(log_action, username, "πŸ’¬πŸ”’ - Chat saver - words locked tight!")
173
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") # Keep this for log consistency
174
- entry = f"[{timestamp}] {username}: {message}"
 
 
 
175
  await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"{entry}\n"))
176
  voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
177
  cleaned_message = clean_text_for_tts(message)
@@ -289,10 +308,14 @@ def play_and_download_audio(file_path):
289
  # Image saver - pics preserved with naming! πŸ“ΈπŸ’Ύ
290
  async def save_pasted_image(image, username):
291
  await asyncio.to_thread(log_action, username, "πŸ“ΈπŸ’Ύ - Image saver - pics preserved!")
 
 
 
292
  timestamp = format_timestamp_prefix(username)
293
- filename = f"{timestamp}.png"
294
  filepath = os.path.join(MEDIA_DIR, filename)
295
  await asyncio.to_thread(image.save, filepath, "PNG")
 
296
  return filepath
297
 
298
  # Video renderer - movies roll with autoplay! πŸŽ₯🎬
@@ -375,6 +398,170 @@ async def perform_ai_lookup(query, vocal_summary=True, extended_refs=False, titl
375
  if audio_file:
376
  st.audio(audio_file)
377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378
  # Main execution - let’s roll! πŸŽ²πŸš€
379
  def main():
380
  NODE_NAME, port = get_node_name()
@@ -399,6 +586,17 @@ def main():
399
  await process_voice_input(audio_bytes)
400
  st.rerun()
401
 
 
 
 
 
 
 
 
 
 
 
 
402
  # Load and display chat
403
  st.subheader(f"{START_ROOM} Chat πŸ’¬")
404
  chat_content = await load_chat()
@@ -414,7 +612,15 @@ def main():
414
  for i, line in enumerate(st.session_state.displayed_chat_lines):
415
  col1, col2, col3, col4 = st.columns([3, 1, 1, 2])
416
  with col1:
417
- st.markdown(line)
 
 
 
 
 
 
 
 
418
  if "Pasted image:" in line or "Uploaded media:" in line:
419
  file_path = line.split(': ')[-1].strip()
420
  if os.path.exists(file_path):
@@ -460,20 +666,23 @@ def main():
460
 
461
  if st.session_state.quote_line:
462
  st.markdown(f"### Quoting: {st.session_state.quote_line}")
463
- quote_response = st.text_area("Add your response", key="quote_response")
464
- paste_result_quote = paste_image_button("πŸ“‹ Paste Image with Quote", key="paste_button_quote")
465
  if paste_result_quote.image_data is not None:
466
- st.image(paste_result_quote.image_data, caption="Received Image for Quote")
467
- filename = await save_pasted_image(paste_result_quote.image_data, st.session_state.username)
468
- if filename:
469
- st.session_state.pasted_image_data = filename # Store for use in quote submission
 
 
 
470
  if st.button("Send Quote πŸš€", key="send_quote"):
471
  markdown_response = f"### Quote Response\n- **Original**: {st.session_state.quote_line}\n- **{st.session_state.username} Replies**: {quote_response}"
472
  if st.session_state.pasted_image_data:
473
  markdown_response += f"\n- **Image**: ![Pasted Image]({st.session_state.pasted_image_data})"
474
  await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")
475
  st.session_state.pasted_image_data = None
476
- await save_chat_entry(st.session_state.username, markdown_response)
477
  st.session_state.quote_line = None
478
  st.session_state.message_text = ''
479
  st.rerun()
@@ -485,53 +694,56 @@ def main():
485
  st.rerun()
486
 
487
  message = st.text_input(f"Message as {st.session_state.username}", key="message_input", value=st.session_state.message_text, on_change=lambda: st.session_state.update(message_text=st.session_state.message_input))
488
- paste_result_msg = paste_image_button("πŸ“‹ Paste Image with Message", key="paste_button_msg")
489
  if paste_result_msg.image_data is not None:
490
- st.image(paste_result_msg.image_data, caption="Received Image for Message")
491
- filename = await save_pasted_image(paste_result_msg.image_data, st.session_state.username)
492
- if filename:
493
- st.session_state.pasted_image_data = filename # Store for use in message submission
 
 
 
494
  if st.button("Send πŸš€", key="send_button") and (message.strip() or st.session_state.pasted_image_data):
495
  if message.strip():
496
- await save_chat_entry(st.session_state.username, message)
497
  if st.session_state.pasted_image_data:
498
  await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")
499
  st.session_state.pasted_image_data = None
500
  st.session_state.message_text = ''
501
  st.rerun()
502
 
503
- # Main action tabs and model use choices
504
  tab_main = st.radio("Action:", ["🎀 Voice", "πŸ“Έ Media", "πŸ” ArXiv", "πŸ“ Editor"], horizontal=True)
505
  useArxiv = st.checkbox("Search Arxiv for Research Paper Answers", value=True)
506
  useArxivAudio = st.checkbox("Generate Audio File for Research Paper Answers", value=False)
507
 
508
- # Enhanced Media Gallery with Image, Audio, Video
509
  st.subheader("Upload Media 🎨🎢πŸŽ₯")
510
  uploaded_file = st.file_uploader("Upload Media", type=['png', 'jpg', 'mp4', 'mp3'])
511
  if uploaded_file:
512
  timestamp = format_timestamp_prefix(st.session_state.username)
513
  username = st.session_state.username
514
  ext = uploaded_file.name.split('.')[-1]
515
- filename = f"{timestamp}.{ext}"
516
- file_path = os.path.join(MEDIA_DIR, filename)
517
- await asyncio.to_thread(lambda: open(file_path, 'wb').write(uploaded_file.getbuffer()))
518
- st.success(f"Uploaded {filename}")
519
- await save_chat_entry(username, f"Uploaded media: {file_path}")
520
- if file_path.endswith('.mp4'):
521
- st.session_state.media_notifications.append(file_path)
 
 
 
522
 
523
  st.subheader("Media Gallery 🎨🎢πŸŽ₯")
524
  media_files = glob.glob(f"{MEDIA_DIR}/*.png") + glob.glob(f"{MEDIA_DIR}/*.jpg") + glob.glob(f"{MEDIA_DIR}/*.mp4") + glob.glob(f"{MEDIA_DIR}/*.mp3")
525
  if media_files:
526
  media_votes = await load_votes(MEDIA_VOTES_FILE)
527
  st.write("### All Media Uploads")
528
- seen_files = set() # Track unique file paths to prevent duplicates
529
  for media_file in sorted(media_files, key=os.path.getmtime, reverse=True):
530
- if media_file not in seen_files: # Only add if not already seen
531
  seen_files.add(media_file)
532
  filename = os.path.basename(media_file)
533
  vote_count = media_votes.get(media_file, 0)
534
-
535
  col1, col2 = st.columns([3, 1])
536
  with col1:
537
  st.markdown(f"**{filename}**")
@@ -554,7 +766,7 @@ def main():
554
  font_name, font_func = random.choice(UNICODE_FONTS)
555
  countdown_str = "".join(UNICODE_DIGITS[int(d)] for d in str(i)) if i < 10 else font_func(str(i))
556
  timer_placeholder.markdown(f"<p class='timer'>⏳ {font_func('Refresh in:')} {countdown_str}</p>", unsafe_allow_html=True)
557
- time.sleep(1) # Use synchronous sleep
558
  st.rerun()
559
 
560
  st.sidebar.subheader("Chat History πŸ“œ")
 
10
  import hashlib
11
  from PIL import Image
12
  import glob
 
13
  import base64
14
  import io
15
  import streamlit.components.v1 as components
16
  import edge_tts
17
  from audio_recorder_streamlit import audio_recorder
18
  import nest_asyncio
19
+ import re
20
  from streamlit_paste_button import paste_image_button
21
 
22
  # Patch for nested async - sneaky fix! 🐍✨
 
128
  if 'quote_line' not in st.session_state:
129
  st.session_state.quote_line = None
130
  if 'refresh_rate' not in st.session_state:
131
+ st.session_state.refresh_rate = 5
132
  if 'base64_cache' not in st.session_state:
133
+ st.session_state.base64_cache = {}
134
+ if 'transcript_history' not in st.session_state:
135
+ st.session_state.transcript_history = []
136
+ if 'last_transcript' not in st.session_state:
137
+ st.session_state.last_transcript = ""
138
+ if 'image_hashes' not in st.session_state:
139
+ st.session_state.image_hashes = set() # Track unique image hashes
140
 
141
  # Timestamp wizardry - clock ticks with flair! ⏰🎩
142
  def format_timestamp_prefix(username):
143
  now = datetime.now()
144
  return f"{now.strftime('%I-%M-%p-ct-%m-%d-%Y')}-by-{username}"
145
 
146
+ # Compute image hash from binary data
147
+ def compute_image_hash(image_data):
148
+ if isinstance(image_data, Image.Image):
149
+ # Convert PIL Image to bytes
150
+ img_byte_arr = io.BytesIO()
151
+ image_data.save(img_byte_arr, format='PNG')
152
+ img_bytes = img_byte_arr.getvalue()
153
+ else:
154
+ img_bytes = image_data # Assume raw bytes if not PIL Image
155
+ return hashlib.md5(img_bytes).hexdigest()[:8] # Shortened hash for filename
156
+
157
  # Node naming - christening the beast! 🌐🍼
158
  def get_node_name():
159
  parser = argparse.ArgumentParser(description='Start a chat node with a specific name')
 
184
  return cleaned[:200] if cleaned else "No text to speak"
185
 
186
  # Chat saver - words locked tight! πŸ’¬πŸ”’
187
+ async def save_chat_entry(username, message, is_markdown=False):
188
  await asyncio.to_thread(log_action, username, "πŸ’¬πŸ”’ - Chat saver - words locked tight!")
189
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
190
+ if is_markdown:
191
+ entry = f"[{timestamp}] {username}:\n```markdown\n{message}\n```"
192
+ else:
193
+ entry = f"[{timestamp}] {username}: {message}"
194
  await asyncio.to_thread(lambda: open(CHAT_FILE, 'a').write(f"{entry}\n"))
195
  voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
196
  cleaned_message = clean_text_for_tts(message)
 
308
  # Image saver - pics preserved with naming! πŸ“ΈπŸ’Ύ
309
  async def save_pasted_image(image, username):
310
  await asyncio.to_thread(log_action, username, "πŸ“ΈπŸ’Ύ - Image saver - pics preserved!")
311
+ img_hash = compute_image_hash(image)
312
+ if img_hash in st.session_state.image_hashes:
313
+ return None # Suppress duplicate
314
  timestamp = format_timestamp_prefix(username)
315
+ filename = f"{timestamp}-{img_hash}.png"
316
  filepath = os.path.join(MEDIA_DIR, filename)
317
  await asyncio.to_thread(image.save, filepath, "PNG")
318
+ st.session_state.image_hashes.add(img_hash)
319
  return filepath
320
 
321
  # Video renderer - movies roll with autoplay! πŸŽ₯🎬
 
398
  if audio_file:
399
  st.audio(audio_file)
400
 
401
+ # ASR Component HTML
402
+ ASR_HTML = """
403
+ <html>
404
+ <head>
405
+ <title>Continuous Speech Demo</title>
406
+ <style>
407
+ body {
408
+ font-family: sans-serif;
409
+ padding: 20px;
410
+ max-width: 800px;
411
+ margin: 0 auto;
412
+ }
413
+ button {
414
+ padding: 10px 20px;
415
+ margin: 10px 5px;
416
+ font-size: 16px;
417
+ }
418
+ #status {
419
+ margin: 10px 0;
420
+ padding: 10px;
421
+ background: #e8f5e9;
422
+ border-radius: 4px;
423
+ }
424
+ #output {
425
+ white-space: pre-wrap;
426
+ padding: 15px;
427
+ background: #f5f5f5;
428
+ border-radius: 4px;
429
+ margin: 10px 0;
430
+ min-height: 100px;
431
+ max-height: 400px;
432
+ overflow-y: auto;
433
+ }
434
+ .controls {
435
+ margin: 10px 0;
436
+ }
437
+ </style>
438
+ </head>
439
+ <body>
440
+ <div class="controls">
441
+ <button id="start">Start Listening</button>
442
+ <button id="stop" disabled>Stop Listening</button>
443
+ <button id="clear">Clear Text</button>
444
+ </div>
445
+ <div id="status">Ready</div>
446
+ <div id="output"></div>
447
+
448
+ <script>
449
+ if (!('webkitSpeechRecognition' in window)) {
450
+ alert('Speech recognition not supported');
451
+ } else {
452
+ const recognition = new webkitSpeechRecognition();
453
+ const startButton = document.getElementById('start');
454
+ const stopButton = document.getElementById('stop');
455
+ const clearButton = document.getElementById('clear');
456
+ const status = document.getElementById('status');
457
+ const output = document.getElementById('output');
458
+ let fullTranscript = '';
459
+ let lastUpdateTime = Date.now();
460
+
461
+ recognition.continuous = true;
462
+ recognition.interimResults = true;
463
+
464
+ const startRecognition = () => {
465
+ try {
466
+ recognition.start();
467
+ status.textContent = 'Listening...';
468
+ startButton.disabled = true;
469
+ stopButton.disabled = false;
470
+ } catch (e) {
471
+ console.error(e);
472
+ status.textContent = 'Error: ' + e.message;
473
+ }
474
+ };
475
+
476
+ window.addEventListener('load', () => {
477
+ setTimeout(startRecognition, 1000);
478
+ });
479
+
480
+ startButton.onclick = startRecognition;
481
+
482
+ stopButton.onclick = () => {
483
+ recognition.stop();
484
+ status.textContent = 'Stopped';
485
+ startButton.disabled = false;
486
+ stopButton.disabled = true;
487
+ };
488
+
489
+ clearButton.onclick = () => {
490
+ fullTranscript = '';
491
+ output.textContent = '';
492
+ sendDataToPython({value: '', dataType: "json"});
493
+ };
494
+
495
+ recognition.onresult = (event) => {
496
+ let interimTranscript = '';
497
+ let finalTranscript = '';
498
+
499
+ for (let i = event.resultIndex; i < event.results.length; i++) {
500
+ const transcript = event.results[i][0].transcript;
501
+ if (event.results[i].isFinal) {
502
+ finalTranscript += transcript + '\\n';
503
+ } else {
504
+ interimTranscript += transcript;
505
+ }
506
+ }
507
+
508
+ if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
509
+ if (finalTranscript) {
510
+ fullTranscript += finalTranscript;
511
+ }
512
+ lastUpdateTime = Date.now();
513
+ output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
514
+ output.scrollTop = output.scrollHeight;
515
+ sendDataToPython({value: fullTranscript, dataType: "json"});
516
+ }
517
+ };
518
+
519
+ recognition.onend = () => {
520
+ if (!stopButton.disabled) {
521
+ try {
522
+ recognition.start();
523
+ console.log('Restarted recognition');
524
+ } catch (e) {
525
+ console.error('Failed to restart recognition:', e);
526
+ status.textContent = 'Error restarting: ' + e.message;
527
+ startButton.disabled = false;
528
+ stopButton.disabled = true;
529
+ }
530
+ }
531
+ };
532
+
533
+ recognition.onerror = (event) => {
534
+ console.error('Recognition error:', event.error);
535
+ status.textContent = 'Error: ' + event.error;
536
+ if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
537
+ startButton.disabled = false;
538
+ stopButton.disabled = true;
539
+ }
540
+ };
541
+ }
542
+
543
+ function sendDataToPython(data) {
544
+ window.parent.postMessage({
545
+ isStreamlitMessage: true,
546
+ type: "streamlit:setComponentValue",
547
+ ...data
548
+ }, "*");
549
+ }
550
+
551
+ window.addEventListener('load', function() {
552
+ window.setTimeout(function() {
553
+ window.parent.postMessage({
554
+ isStreamlitMessage: true,
555
+ type: "streamlit:setFrameHeight",
556
+ height: document.documentElement.clientHeight
557
+ }, "*");
558
+ }, 0);
559
+ });
560
+ </script>
561
+ </body>
562
+ </html>
563
+ """
564
+
565
  # Main execution - let’s roll! πŸŽ²πŸš€
566
  def main():
567
  NODE_NAME, port = get_node_name()
 
586
  await process_voice_input(audio_bytes)
587
  st.rerun()
588
 
589
+ # Continuous Speech Input (ASR)
590
+ st.subheader("🎀 Continuous Speech Input")
591
+ asr_component = components.html(ASR_HTML, height=400)
592
+ if asr_component and isinstance(asr_component, dict) and 'value' in asr_component:
593
+ transcript = asr_component['value'].strip()
594
+ if transcript and transcript != st.session_state.last_transcript:
595
+ st.session_state.transcript_history.append(transcript)
596
+ await save_chat_entry(st.session_state.username, transcript, is_markdown=True)
597
+ st.session_state.last_transcript = transcript
598
+ st.rerun()
599
+
600
  # Load and display chat
601
  st.subheader(f"{START_ROOM} Chat πŸ’¬")
602
  chat_content = await load_chat()
 
612
  for i, line in enumerate(st.session_state.displayed_chat_lines):
613
  col1, col2, col3, col4 = st.columns([3, 1, 1, 2])
614
  with col1:
615
+ if "```markdown" in line:
616
+ # Extract Markdown content
617
+ markdown_content = re.search(r'```markdown\n(.*?)```', line, re.DOTALL)
618
+ if markdown_content:
619
+ st.markdown(markdown_content.group(1))
620
+ else:
621
+ st.markdown(line)
622
+ else:
623
+ st.markdown(line)
624
  if "Pasted image:" in line or "Uploaded media:" in line:
625
  file_path = line.split(': ')[-1].strip()
626
  if os.path.exists(file_path):
 
666
 
667
  if st.session_state.quote_line:
668
  st.markdown(f"### Quoting: {st.session_state.quote_line}")
669
+ quote_response = st.text_area("Add your response", key="quote_response", value=st.session_state.message_text)
670
+ paste_result_quote = paste_image_button("πŸ“‹ Paste Image or Text with Quote", key="paste_button_quote")
671
  if paste_result_quote.image_data is not None:
672
+ if isinstance(paste_result_quote.image_data, str): # Text from clipboard
673
+ st.session_state.message_text = paste_result_quote.image_data
674
+ else: # Image
675
+ st.image(paste_result_quote.image_data, caption="Received Image for Quote")
676
+ filename = await save_pasted_image(paste_result_quote.image_data, st.session_state.username)
677
+ if filename:
678
+ st.session_state.pasted_image_data = filename
679
  if st.button("Send Quote πŸš€", key="send_quote"):
680
  markdown_response = f"### Quote Response\n- **Original**: {st.session_state.quote_line}\n- **{st.session_state.username} Replies**: {quote_response}"
681
  if st.session_state.pasted_image_data:
682
  markdown_response += f"\n- **Image**: ![Pasted Image]({st.session_state.pasted_image_data})"
683
  await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")
684
  st.session_state.pasted_image_data = None
685
+ await save_chat_entry(st.session_state.username, markdown_response, is_markdown=True)
686
  st.session_state.quote_line = None
687
  st.session_state.message_text = ''
688
  st.rerun()
 
694
  st.rerun()
695
 
696
  message = st.text_input(f"Message as {st.session_state.username}", key="message_input", value=st.session_state.message_text, on_change=lambda: st.session_state.update(message_text=st.session_state.message_input))
697
+ paste_result_msg = paste_image_button("πŸ“‹ Paste Image or Text with Message", key="paste_button_msg")
698
  if paste_result_msg.image_data is not None:
699
+ if isinstance(paste_result_msg.image_data, str): # Text from clipboard
700
+ st.session_state.message_text = paste_result_msg.image_data
701
+ else: # Image
702
+ st.image(paste_result_msg.image_data, caption="Received Image for Message")
703
+ filename = await save_pasted_image(paste_result_msg.image_data, st.session_state.username)
704
+ if filename:
705
+ st.session_state.pasted_image_data = filename
706
  if st.button("Send πŸš€", key="send_button") and (message.strip() or st.session_state.pasted_image_data):
707
  if message.strip():
708
+ await save_chat_entry(st.session_state.username, message, is_markdown=True)
709
  if st.session_state.pasted_image_data:
710
  await save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")
711
  st.session_state.pasted_image_data = None
712
  st.session_state.message_text = ''
713
  st.rerun()
714
 
 
715
  tab_main = st.radio("Action:", ["🎀 Voice", "πŸ“Έ Media", "πŸ” ArXiv", "πŸ“ Editor"], horizontal=True)
716
  useArxiv = st.checkbox("Search Arxiv for Research Paper Answers", value=True)
717
  useArxivAudio = st.checkbox("Generate Audio File for Research Paper Answers", value=False)
718
 
 
719
  st.subheader("Upload Media 🎨🎢πŸŽ₯")
720
  uploaded_file = st.file_uploader("Upload Media", type=['png', 'jpg', 'mp4', 'mp3'])
721
  if uploaded_file:
722
  timestamp = format_timestamp_prefix(st.session_state.username)
723
  username = st.session_state.username
724
  ext = uploaded_file.name.split('.')[-1]
725
+ file_hash = hashlib.md5(uploaded_file.getbuffer()).hexdigest()[:8]
726
+ if file_hash not in st.session_state.image_hashes:
727
+ filename = f"{timestamp}-{file_hash}.{ext}"
728
+ file_path = os.path.join(MEDIA_DIR, filename)
729
+ await asyncio.to_thread(lambda: open(file_path, 'wb').write(uploaded_file.getbuffer()))
730
+ st.success(f"Uploaded {filename}")
731
+ await save_chat_entry(username, f"Uploaded media: {file_path}")
732
+ st.session_state.image_hashes.add(file_hash)
733
+ if file_path.endswith('.mp4'):
734
+ st.session_state.media_notifications.append(file_path)
735
 
736
  st.subheader("Media Gallery 🎨🎢πŸŽ₯")
737
  media_files = glob.glob(f"{MEDIA_DIR}/*.png") + glob.glob(f"{MEDIA_DIR}/*.jpg") + glob.glob(f"{MEDIA_DIR}/*.mp4") + glob.glob(f"{MEDIA_DIR}/*.mp3")
738
  if media_files:
739
  media_votes = await load_votes(MEDIA_VOTES_FILE)
740
  st.write("### All Media Uploads")
741
+ seen_files = set()
742
  for media_file in sorted(media_files, key=os.path.getmtime, reverse=True):
743
+ if media_file not in seen_files:
744
  seen_files.add(media_file)
745
  filename = os.path.basename(media_file)
746
  vote_count = media_votes.get(media_file, 0)
 
747
  col1, col2 = st.columns([3, 1])
748
  with col1:
749
  st.markdown(f"**{filename}**")
 
766
  font_name, font_func = random.choice(UNICODE_FONTS)
767
  countdown_str = "".join(UNICODE_DIGITS[int(d)] for d in str(i)) if i < 10 else font_func(str(i))
768
  timer_placeholder.markdown(f"<p class='timer'>⏳ {font_func('Refresh in:')} {countdown_str}</p>", unsafe_allow_html=True)
769
+ time.sleep(1)
770
  st.rerun()
771
 
772
  st.sidebar.subheader("Chat History πŸ“œ")