awacke1 commited on
Commit
77b751e
Β·
verified Β·
1 Parent(s): d414ee5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -163
app.py CHANGED
@@ -241,24 +241,24 @@ def load_mp3_viewer():
241
  if filename not in st.session_state['mp3_files']:
242
  st.session_state['mp3_files'][filename] = mp3
243
 
244
- async def save_chat_entry(username, message, is_markdown=False):
 
 
245
  central = pytz.timezone('US/Central')
246
  timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
247
- entry = f"[{timestamp}] {username}: {message}" if not is_markdown else f"[{timestamp}] {username}:\n```markdown\n{message}\n```"
 
248
  with open(CHAT_FILE, 'a') as f:
249
  f.write(f"{entry}\n")
250
- voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
251
  audio_file, _ = await async_edge_tts_generate(message, voice, username)
252
  if audio_file:
253
  with open(HISTORY_FILE, 'a') as f:
254
  f.write(f"[{timestamp}] {username}: Audio - {audio_file}\n")
255
  st.session_state['mp3_files'][os.path.basename(audio_file)] = audio_file
256
- else:
257
- print(f"No audio generated for message: '{message}' by {username}")
258
  await broadcast_message(f"{username}|{message}", "chat")
259
  st.session_state.last_chat_update = time.time()
260
  st.session_state.chat_history.append(entry)
261
- return audio_file
262
 
263
  async def load_chat():
264
  if not os.path.exists(CHAT_FILE):
@@ -267,11 +267,15 @@ async def load_chat():
267
  with open(CHAT_FILE, 'r') as f:
268
  content = f.read().strip()
269
  lines = content.split('\n')
270
- numbered_content = "\n".join(f"{i+1}. {line}" for i, line in enumerate(lines) if line.strip())
 
 
271
  return numbered_content
272
 
273
  # Claude Search Function
274
  async def perform_claude_search(query, username):
 
 
275
  client = anthropic.Anthropic(api_key=anthropic_key)
276
  response = client.messages.create(
277
  model="claude-3-sonnet-20240229",
@@ -282,22 +286,14 @@ async def perform_claude_search(query, username):
282
  st.markdown(f"### Claude's Reply 🧠\n{result}")
283
 
284
  # Save to chat history with audio
285
- audio_file, _ = await async_edge_tts_generate(result, st.session_state['tts_voice'], username)
286
- if audio_file:
287
- central = pytz.timezone('US/Central')
288
- timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
289
- entry = f"[{timestamp}] {username}:\n```markdown\nClaude Search: {query}\nResponse: {result}\n```"
290
- with open(CHAT_FILE, 'a') as f:
291
- f.write(f"{entry}\n")
292
- with open(HISTORY_FILE, 'a') as f:
293
- f.write(f"[{timestamp}] {username}: Audio - {audio_file}\n")
294
- st.session_state['mp3_files'][os.path.basename(audio_file)] = audio_file
295
- st.session_state.chat_history.append(entry)
296
- play_and_download_audio(audio_file)
297
- return result
298
 
299
  # ArXiv Search Function
300
  async def perform_arxiv_search(query, username):
 
 
301
  # Step 1: Claude Search
302
  client = anthropic.Anthropic(api_key=anthropic_key)
303
  claude_response = client.messages.create(
@@ -317,26 +313,10 @@ async def perform_arxiv_search(query, username):
317
  result = f"πŸ”Ž {enhanced_query}\n\n{refs}"
318
  st.markdown(f"### ArXiv Results πŸ”\n{result}")
319
 
320
- # Parse papers and generate long audio
321
- papers = parse_arxiv_refs(refs)
322
- if papers:
323
- long_audio_text = f"Claude Search: {query}\nResponse: {claude_result}\n\nArXiv Results:\n"
324
- for i, paper in enumerate(papers, 1):
325
- long_audio_text += f"Paper {i}: {paper['title']} by {paper['authors']}. Summary: {paper['summary']}\n"
326
- audio_file, _ = await async_edge_tts_generate(long_audio_text, st.session_state['tts_voice'], username)
327
- if audio_file:
328
- central = pytz.timezone('US/Central')
329
- timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
330
- entry = f"[{timestamp}] {username}:\n```markdown\nArXiv Search: {query}\nClaude Response: {claude_result}\nArXiv Results: {refs}\n```"
331
- with open(CHAT_FILE, 'a') as f:
332
- f.write(f"{entry}\n")
333
- with open(HISTORY_FILE, 'a') as f:
334
- f.write(f"[{timestamp}] {username}: Audio - {audio_file}\n")
335
- st.session_state['mp3_files'][os.path.basename(audio_file)] = audio_file
336
- st.session_state.chat_history.append(entry)
337
- st.subheader("πŸ“ Full ArXiv Audio")
338
- play_and_download_audio(audio_file)
339
- return result, papers
340
 
341
  async def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False, useArxiv=True, useArxivAudio=False):
342
  start = time.time()
@@ -350,7 +330,7 @@ async def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_s
350
  st.markdown(response.content[0].text)
351
 
352
  result = response.content[0].text
353
- md_file = create_file(q, result, "System")
354
  audio_file, _ = await async_edge_tts_generate(result, st.session_state['tts_voice'], "System")
355
  st.subheader("πŸ“ Main Response Audio")
356
  play_and_download_audio(audio_file)
@@ -363,7 +343,7 @@ async def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_s
363
  q, 10, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md"
364
  )[0]
365
  result = f"πŸ”Ž {q}\n\n{refs}"
366
- md_file = create_file(q, result, "System")
367
  audio_file, _ = await async_edge_tts_generate(result, st.session_state['tts_voice'], "System")
368
  st.subheader("πŸ“ ArXiv Response Audio")
369
  play_and_download_audio(audio_file)
@@ -386,16 +366,17 @@ async def websocket_handler(websocket, path):
386
  username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys())))
387
  chat_content = await load_chat()
388
  if not any(f"Client-{client_id}" in line for line in chat_content.split('\n')):
389
- await save_chat_entry("System 🌟", f"{username} has joined {START_ROOM}!")
390
  try:
391
  async for message in websocket:
392
  if '|' in message:
393
  username, content = message.split('|', 1)
394
- await save_chat_entry(username, content)
 
395
  else:
396
  await websocket.send("ERROR|Message format: username|content")
397
  except websockets.ConnectionClosed:
398
- await save_chat_entry("System 🌟", f"{username} has left {START_ROOM}!")
399
  finally:
400
  if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]:
401
  del st.session_state.active_connections[room_id][client_id]
@@ -581,7 +562,7 @@ def main():
581
  available = [n for n in FUN_USERNAMES if not any(f"{n} has joined" in l for l in asyncio.run(load_chat()).split('\n'))]
582
  st.session_state.username = random.choice(available or list(FUN_USERNAMES.keys()))
583
  st.session_state.tts_voice = FUN_USERNAMES[st.session_state.username]
584
- asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} has joined {START_ROOM}!"))
585
  save_username(st.session_state.username)
586
 
587
  st.title(f"{Site_Name} for {st.session_state.username}")
@@ -590,35 +571,18 @@ def main():
590
 
591
  # Speech Component at Top Level
592
  mycomponent = components.declare_component("mycomponent", path="mycomponent")
593
- val = mycomponent(my_input_value="Hello from MyComponent")
594
- if val:
595
- val_stripped = val.replace('\\n', ' ')
596
- edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100, key="speech_input")
597
- run_option = st.selectbox("Model:", ["Chat", "Arxiv"], key="model_select")
598
- col1, col2 = st.columns(2)
599
- with col1:
600
- st.checkbox("βš™ AutoRun", value=True, key="autorun") # Let Streamlit manage autorun state
601
- with col2:
602
- full_audio = st.checkbox("πŸ“š FullAudio", value=False, key="full_audio")
603
-
604
- input_changed = (val != st.session_state.old_val)
605
-
606
- if st.session_state.autorun and input_changed:
607
- st.session_state.old_val = val
608
- st.session_state.last_query = edited_input
609
- if run_option == "Chat":
610
- asyncio.run(save_chat_entry(st.session_state.username, edited_input, True))
611
- elif run_option == "Arxiv":
612
- asyncio.run(perform_ai_lookup(edited_input, useArxiv=True, useArxivAudio=full_audio))
613
- elif st.button("β–Ά Run", key="run_button"):
614
  st.session_state.old_val = val
615
- st.session_state.last_query = edited_input
616
- if run_option == "Chat":
617
- asyncio.run(save_chat_entry(st.session_state.username, edited_input, True))
618
- elif run_option == "Arxiv":
619
- asyncio.run(perform_ai_lookup(edited_input, useArxiv=True, useArxivAudio=full_audio))
620
 
621
- tab_main = st.radio("Action:", ["🎀 Chat & Voice", "πŸ“Έ Media", "πŸ” ArXiv", "πŸ“š PDF to Audio"], horizontal=True, key="tab_main")
622
  useArxiv = st.checkbox("Search ArXiv", True, key="use_arxiv")
623
  useArxivAudio = st.checkbox("ArXiv Audio", False, key="use_arxiv_audio")
624
  st.checkbox("Autosend Chat", value=True, key="autosend")
@@ -630,23 +594,7 @@ def main():
630
  chat_content = asyncio.run(load_chat())
631
  chat_container = st.container()
632
  with chat_container:
633
- lines = chat_content.split('\n')
634
- for i, line in enumerate(lines):
635
- if line.strip():
636
- col1, col2 = st.columns([5, 1])
637
- with col1:
638
- st.markdown(line)
639
- for mp3_name, mp3_path in st.session_state['mp3_files'].items():
640
- if st.session_state.username in mp3_name and any(word in mp3_name for word in line.split()):
641
- st.audio(mp3_path)
642
- break
643
- with col2:
644
- if st.button(f"πŸ‘", key=f"chat_vote_{i}"):
645
- user_hash = generate_user_hash()
646
- save_vote(QUOTE_VOTES_FILE, line, user_hash)
647
- st.session_state.timer_start = time.time()
648
- save_username(st.session_state.username)
649
- st.rerun()
650
 
651
  message = st.text_input(f"Message as {st.session_state.username}", key="message_input")
652
  paste_result = paste_image_button("πŸ“‹ Paste Image or Text", key="paste_button_msg")
@@ -665,80 +613,48 @@ def main():
665
  col_send, col_claude, col_arxiv = st.columns([1, 1, 1])
666
 
667
  with col_send:
668
- if st.session_state.autosend or st.button("Send πŸš€", key="send_button"): # Existing Send button
 
669
  if message.strip():
670
- asyncio.run(save_chat_entry(st.session_state.username, message, True))
 
 
671
  if st.session_state.pasted_image_data:
672
- asyncio.run(save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}"))
673
  st.session_state.pasted_image_data = None
674
  st.session_state.timer_start = time.time()
675
  save_username(st.session_state.username)
676
  st.rerun()
677
 
678
  with col_claude:
679
- if st.button("🧠 Claude", key="claude_button"): # New Claude button
 
680
  if message.strip():
681
- asyncio.run(perform_claude_search(message, st.session_state.username))
 
 
682
  st.session_state.timer_start = time.time()
683
  save_username(st.session_state.username)
684
  st.rerun()
685
 
686
  with col_arxiv:
687
- if st.button("πŸ” ArXiv", key="arxiv_button"): # New ArXiv button
 
688
  if message.strip():
689
- asyncio.run(perform_arxiv_search(message, st.session_state.username))
 
 
690
  st.session_state.timer_start = time.time()
691
  save_username(st.session_state.username)
692
  st.rerun()
693
 
694
- # πŸ“Έ Media
695
- elif tab_main == "πŸ“Έ Media":
696
- st.header("πŸ“Έ Media Gallery")
697
- all_files = sorted(glob.glob("*.md") + glob.glob("*.mp3") + glob.glob("*.png") + glob.glob("*.mp4"), key=os.path.getmtime, reverse=True)
698
- md_files = [f for f in all_files if f.endswith('.md')]
699
- mp3_files = [f for f in all_files if f.endswith('.mp3')]
700
- png_files = [f for f in all_files if f.endswith('.png')]
701
- mp4_files = [f for f in all_files if f.endswith('.mp4')]
702
-
703
- st.subheader("All Submitted Text")
704
- all_md_content = concatenate_markdown_files()
705
- st.markdown(all_md_content)
706
-
707
- st.subheader("🎡 Audio (MP3)")
708
- for mp3 in mp3_files:
709
- with st.expander(os.path.basename(mp3)):
710
- st.audio(mp3)
711
- st.markdown(get_download_link(mp3, "mp3"), unsafe_allow_html=True)
712
-
713
- st.subheader("πŸ–ΌοΈ Images (PNG)")
714
- for png in png_files:
715
- with st.expander(os.path.basename(png)):
716
- st.image(png, use_container_width=True)
717
- st.markdown(get_download_link(png, "png"), unsafe_allow_html=True)
718
-
719
- st.subheader("πŸŽ₯ Videos (MP4)")
720
- for mp4 in mp4_files:
721
- with st.expander(os.path.basename(mp4)):
722
- st.video(mp4)
723
- st.markdown(get_download_link(mp4, "mp4"), unsafe_allow_html=True)
724
-
725
- uploaded_file = st.file_uploader("Upload Media", type=['png', 'mp4', 'mp3'], key="media_upload")
726
- if uploaded_file:
727
- filename = f"{format_timestamp_prefix(st.session_state.username)}-{hashlib.md5(uploaded_file.getbuffer()).hexdigest()[:8]}.{uploaded_file.name.split('.')[-1]}"
728
- with open(filename, 'wb') as f:
729
- f.write(uploaded_file.getbuffer())
730
- asyncio.run(save_chat_entry(st.session_state.username, f"Uploaded: {filename}"))
731
- st.session_state.timer_start = time.time()
732
- save_username(st.session_state.username)
733
- st.rerun()
734
-
735
  # πŸ” ArXiv
736
  elif tab_main == "πŸ” ArXiv":
737
  st.subheader("πŸ” Query ArXiv")
738
  q = st.text_input("πŸ” Query:", key="arxiv_query")
739
  if q and q != st.session_state.last_query:
740
  st.session_state.last_query = q
741
- if st.session_state.autosearch or st.button("πŸ” Run", key="arxiv_run"): # Use st.session_state.autosearch directly
742
  result, papers = asyncio.run(perform_ai_lookup(q, useArxiv=useArxiv, useArxivAudio=useArxivAudio))
743
  for i, p in enumerate(papers, 1):
744
  with st.expander(f"{i}. πŸ“„ {p['title']}"):
@@ -763,13 +679,44 @@ def main():
763
  if audios.get(i):
764
  st.audio(audios[i])
765
  st.markdown(get_download_link(audios[i], "mp3"), unsafe_allow_html=True)
766
- asyncio.run(save_chat_entry(st.session_state.username, f"PDF Page {i+1} converted to audio: {audios[i]}"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
767
 
768
  # πŸ—‚οΈ Sidebar with Dialog and Audio
769
  st.sidebar.subheader("Voice Settings")
770
  new_username = st.sidebar.selectbox("Change Name/Voice", list(FUN_USERNAMES.keys()), index=list(FUN_USERNAMES.keys()).index(st.session_state.username), key="username_select")
771
  if new_username != st.session_state.username:
772
- asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} changed to {new_username}"))
773
  st.session_state.username, st.session_state.tts_voice = new_username, FUN_USERNAMES[new_username]
774
  st.session_state.timer_start = time.time()
775
  save_username(st.session_state.username)
@@ -777,24 +724,7 @@ def main():
777
 
778
  st.sidebar.markdown("### πŸ’¬ Chat Dialog & Media")
779
  chat_content = asyncio.run(load_chat())
780
- lines = chat_content.split('\n')
781
- all_files = sorted(glob.glob("*.md") + glob.glob("*.mp3") + glob.glob("*.png") + glob.glob("*.mp4"), key=os.path.getmtime, reverse=True)
782
- for line in lines[-10:]:
783
- if line.strip():
784
- st.sidebar.markdown(f"**{line}**")
785
- for f in all_files:
786
- f_name = os.path.basename(f)
787
- if st.session_state.username in f_name and any(word in f_name for word in line.split()):
788
- if f.endswith('.mp3'):
789
- st.sidebar.audio(f)
790
- st.sidebar.markdown(get_download_link(f, "mp3"), unsafe_allow_html=True)
791
- elif f.endswith('.png'):
792
- st.sidebar.image(f, use_container_width=True)
793
- st.sidebar.markdown(get_download_link(f, "png"), unsafe_allow_html=True)
794
- elif f.endswith('.mp4'):
795
- st.sidebar.video(f)
796
- st.sidebar.markdown(get_download_link(f, "mp4"), unsafe_allow_html=True)
797
- break
798
 
799
  st.sidebar.subheader("Vote Totals")
800
  chat_votes = load_votes(QUOTE_VOTES_FILE)
@@ -804,10 +734,6 @@ def main():
804
  for image, count in image_votes.items():
805
  st.sidebar.write(f"{image}: {count} votes")
806
 
807
- md_files = [f for f in all_files if f.endswith('.md')]
808
- mp3_files = [f for f in all_files if f.endswith('.mp3')]
809
- png_files = [f for f in all_files if f.endswith('.png')]
810
- mp4_files = [f for f in all_files if f.endswith('.mp4')]
811
  st.sidebar.markdown("### πŸ“‚ File History")
812
  for f in all_files[:10]:
813
  st.sidebar.write(f"{FILE_EMOJIS.get(f.split('.')[-1], 'πŸ“„')} {os.path.basename(f)}")
 
241
  if filename not in st.session_state['mp3_files']:
242
  st.session_state['mp3_files'][filename] = mp3
243
 
244
+ async def save_chat_entry(username, message, voice, is_markdown=False):
245
+ if not message.strip():
246
+ return None, None
247
  central = pytz.timezone('US/Central')
248
  timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
249
+ entry = f"[{timestamp}] {username} ({voice}): {message}" if not is_markdown else f"[{timestamp}] {username} ({voice}):\n```markdown\n{message}\n```"
250
+ md_file = create_file(entry, username, "md")
251
  with open(CHAT_FILE, 'a') as f:
252
  f.write(f"{entry}\n")
 
253
  audio_file, _ = await async_edge_tts_generate(message, voice, username)
254
  if audio_file:
255
  with open(HISTORY_FILE, 'a') as f:
256
  f.write(f"[{timestamp}] {username}: Audio - {audio_file}\n")
257
  st.session_state['mp3_files'][os.path.basename(audio_file)] = audio_file
 
 
258
  await broadcast_message(f"{username}|{message}", "chat")
259
  st.session_state.last_chat_update = time.time()
260
  st.session_state.chat_history.append(entry)
261
+ return md_file, audio_file
262
 
263
  async def load_chat():
264
  if not os.path.exists(CHAT_FILE):
 
267
  with open(CHAT_FILE, 'r') as f:
268
  content = f.read().strip()
269
  lines = content.split('\n')
270
+ # Remove duplicates and empty lines
271
+ unique_lines = list(dict.fromkeys(line for line in lines if line.strip()))
272
+ numbered_content = "\n".join(f"{i+1}. {line}" for i, line in enumerate(unique_lines))
273
  return numbered_content
274
 
275
  # Claude Search Function
276
  async def perform_claude_search(query, username):
277
+ if not query.strip():
278
+ return None, None
279
  client = anthropic.Anthropic(api_key=anthropic_key)
280
  response = client.messages.create(
281
  model="claude-3-sonnet-20240229",
 
286
  st.markdown(f"### Claude's Reply 🧠\n{result}")
287
 
288
  # Save to chat history with audio
289
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
290
+ md_file, audio_file = await save_chat_entry(username, f"Claude Search: {query}\nResponse: {result}", voice, True)
291
+ return md_file, audio_file
 
 
 
 
 
 
 
 
 
 
292
 
293
  # ArXiv Search Function
294
  async def perform_arxiv_search(query, username):
295
+ if not query.strip():
296
+ return None, None
297
  # Step 1: Claude Search
298
  client = anthropic.Anthropic(api_key=anthropic_key)
299
  claude_response = client.messages.create(
 
313
  result = f"πŸ”Ž {enhanced_query}\n\n{refs}"
314
  st.markdown(f"### ArXiv Results πŸ”\n{result}")
315
 
316
+ # Save to chat history with audio
317
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
318
+ md_file, audio_file = await save_chat_entry(username, f"ArXiv Search: {query}\nClaude Response: {claude_result}\nArXiv Results: {refs}", voice, True)
319
+ return md_file, audio_file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
 
321
  async def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False, useArxiv=True, useArxivAudio=False):
322
  start = time.time()
 
330
  st.markdown(response.content[0].text)
331
 
332
  result = response.content[0].text
333
+ md_file = create_file(result, "System", "md")
334
  audio_file, _ = await async_edge_tts_generate(result, st.session_state['tts_voice'], "System")
335
  st.subheader("πŸ“ Main Response Audio")
336
  play_and_download_audio(audio_file)
 
343
  q, 10, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md"
344
  )[0]
345
  result = f"πŸ”Ž {q}\n\n{refs}"
346
+ md_file = create_file(result, "System", "md")
347
  audio_file, _ = await async_edge_tts_generate(result, st.session_state['tts_voice'], "System")
348
  st.subheader("πŸ“ ArXiv Response Audio")
349
  play_and_download_audio(audio_file)
 
366
  username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys())))
367
  chat_content = await load_chat()
368
  if not any(f"Client-{client_id}" in line for line in chat_content.split('\n')):
369
+ await save_chat_entry("System 🌟", f"{username} has joined {START_ROOM}!", "en-US-AriaNeural")
370
  try:
371
  async for message in websocket:
372
  if '|' in message:
373
  username, content = message.split('|', 1)
374
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
375
+ await save_chat_entry(username, content, voice)
376
  else:
377
  await websocket.send("ERROR|Message format: username|content")
378
  except websockets.ConnectionClosed:
379
+ await save_chat_entry("System 🌟", f"{username} has left {START_ROOM}!", "en-US-AriaNeural")
380
  finally:
381
  if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]:
382
  del st.session_state.active_connections[room_id][client_id]
 
562
  available = [n for n in FUN_USERNAMES if not any(f"{n} has joined" in l for l in asyncio.run(load_chat()).split('\n'))]
563
  st.session_state.username = random.choice(available or list(FUN_USERNAMES.keys()))
564
  st.session_state.tts_voice = FUN_USERNAMES[st.session_state.username]
565
+ asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} has joined {START_ROOM}!", "en-US-AriaNeural"))
566
  save_username(st.session_state.username)
567
 
568
  st.title(f"{Site_Name} for {st.session_state.username}")
 
571
 
572
  # Speech Component at Top Level
573
  mycomponent = components.declare_component("mycomponent", path="mycomponent")
574
+ val = mycomponent(my_input_value="")
575
+ if val and val != st.session_state.old_val:
576
+ val_stripped = val.strip().replace('\n', ' ')
577
+ if val_stripped:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578
  st.session_state.old_val = val
579
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
580
+ md_file, audio_file = asyncio.run(save_chat_entry(st.session_state.username, val_stripped, voice))
581
+ if audio_file:
582
+ play_and_download_audio(audio_file)
583
+ st.rerun()
584
 
585
+ tab_main = st.radio("Action:", ["🎀 Chat & Voice", "πŸ” ArXiv", "πŸ“š PDF to Audio"], horizontal=True, key="tab_main")
586
  useArxiv = st.checkbox("Search ArXiv", True, key="use_arxiv")
587
  useArxivAudio = st.checkbox("ArXiv Audio", False, key="use_arxiv_audio")
588
  st.checkbox("Autosend Chat", value=True, key="autosend")
 
594
  chat_content = asyncio.run(load_chat())
595
  chat_container = st.container()
596
  with chat_container:
597
+ st.markdown(chat_content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
598
 
599
  message = st.text_input(f"Message as {st.session_state.username}", key="message_input")
600
  paste_result = paste_image_button("πŸ“‹ Paste Image or Text", key="paste_button_msg")
 
613
  col_send, col_claude, col_arxiv = st.columns([1, 1, 1])
614
 
615
  with col_send:
616
+ if st.session_state.autosend or st.button("Send πŸš€", key="send_button"):
617
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
618
  if message.strip():
619
+ md_file, audio_file = asyncio.run(save_chat_entry(st.session_state.username, message, voice, True))
620
+ if audio_file:
621
+ play_and_download_audio(audio_file)
622
  if st.session_state.pasted_image_data:
623
+ asyncio.run(save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}", voice))
624
  st.session_state.pasted_image_data = None
625
  st.session_state.timer_start = time.time()
626
  save_username(st.session_state.username)
627
  st.rerun()
628
 
629
  with col_claude:
630
+ if st.button("🧠 Claude", key="claude_button"):
631
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
632
  if message.strip():
633
+ md_file, audio_file = asyncio.run(perform_claude_search(message, st.session_state.username))
634
+ if audio_file:
635
+ play_and_download_audio(audio_file)
636
  st.session_state.timer_start = time.time()
637
  save_username(st.session_state.username)
638
  st.rerun()
639
 
640
  with col_arxiv:
641
+ if st.button("πŸ” ArXiv", key="arxiv_button"):
642
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
643
  if message.strip():
644
+ md_file, audio_file = asyncio.run(perform_arxiv_search(message, st.session_state.username))
645
+ if audio_file:
646
+ play_and_download_audio(audio_file)
647
  st.session_state.timer_start = time.time()
648
  save_username(st.session_state.username)
649
  st.rerun()
650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651
  # πŸ” ArXiv
652
  elif tab_main == "πŸ” ArXiv":
653
  st.subheader("πŸ” Query ArXiv")
654
  q = st.text_input("πŸ” Query:", key="arxiv_query")
655
  if q and q != st.session_state.last_query:
656
  st.session_state.last_query = q
657
+ if st.session_state.autosearch or st.button("πŸ” Run", key="arxiv_run"):
658
  result, papers = asyncio.run(perform_ai_lookup(q, useArxiv=useArxiv, useArxivAudio=useArxivAudio))
659
  for i, p in enumerate(papers, 1):
660
  with st.expander(f"{i}. πŸ“„ {p['title']}"):
 
679
  if audios.get(i):
680
  st.audio(audios[i])
681
  st.markdown(get_download_link(audios[i], "mp3"), unsafe_allow_html=True)
682
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
683
+ asyncio.run(save_chat_entry(st.session_state.username, f"PDF Page {i+1} converted to audio: {audios[i]}", voice))
684
+
685
+ # Always Visible Media Gallery
686
+ st.header("πŸ“Έ Media Gallery")
687
+ all_files = sorted(glob.glob("*.md") + glob.glob("*.mp3") + glob.glob("*.png") + glob.glob("*.mp4"), key=os.path.getmtime, reverse=True)
688
+ md_files = [f for f in all_files if f.endswith('.md')]
689
+ mp3_files = [f for f in all_files if f.endswith('.mp3')]
690
+ png_files = [f for f in all_files if f.endswith('.png')]
691
+ mp4_files = [f for f in all_files if f.endswith('.mp4')]
692
+
693
+ st.subheader("All Submitted Text")
694
+ all_md_content = concatenate_markdown_files()
695
+ st.markdown(all_md_content)
696
+
697
+ st.subheader("🎡 Audio (MP3)")
698
+ for mp3 in mp3_files:
699
+ with st.expander(os.path.basename(mp3)):
700
+ st.audio(mp3)
701
+ st.markdown(get_download_link(mp3, "mp3"), unsafe_allow_html=True)
702
+
703
+ st.subheader("πŸ–ΌοΈ Images (PNG)")
704
+ for png in png_files:
705
+ with st.expander(os.path.basename(png)):
706
+ st.image(png, use_container_width=True)
707
+ st.markdown(get_download_link(png, "png"), unsafe_allow_html=True)
708
+
709
+ st.subheader("πŸŽ₯ Videos (MP4)")
710
+ for mp4 in mp4_files:
711
+ with st.expander(os.path.basename(mp4)):
712
+ st.video(mp4)
713
+ st.markdown(get_download_link(mp4, "mp4"), unsafe_allow_html=True)
714
 
715
  # πŸ—‚οΈ Sidebar with Dialog and Audio
716
  st.sidebar.subheader("Voice Settings")
717
  new_username = st.sidebar.selectbox("Change Name/Voice", list(FUN_USERNAMES.keys()), index=list(FUN_USERNAMES.keys()).index(st.session_state.username), key="username_select")
718
  if new_username != st.session_state.username:
719
+ asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} changed to {new_username}", "en-US-AriaNeural"))
720
  st.session_state.username, st.session_state.tts_voice = new_username, FUN_USERNAMES[new_username]
721
  st.session_state.timer_start = time.time()
722
  save_username(st.session_state.username)
 
724
 
725
  st.sidebar.markdown("### πŸ’¬ Chat Dialog & Media")
726
  chat_content = asyncio.run(load_chat())
727
+ st.sidebar.markdown(chat_content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728
 
729
  st.sidebar.subheader("Vote Totals")
730
  chat_votes = load_votes(QUOTE_VOTES_FILE)
 
734
  for image, count in image_votes.items():
735
  st.sidebar.write(f"{image}: {count} votes")
736
 
 
 
 
 
737
  st.sidebar.markdown("### πŸ“‚ File History")
738
  for f in all_files[:10]:
739
  st.sidebar.write(f"{FILE_EMOJIS.get(f.split('.')[-1], 'πŸ“„')} {os.path.basename(f)}")