Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import asyncio | |
| import websockets | |
| import uuid | |
| from datetime import datetime | |
| import os | |
| import random | |
| import time | |
| import hashlib | |
| from PIL import Image | |
| import glob | |
| import base64 | |
| import io | |
| import streamlit.components.v1 as components | |
| import edge_tts | |
| from audio_recorder_streamlit import audio_recorder | |
| import nest_asyncio | |
| import re | |
| from streamlit_paste_button import paste_image_button | |
| import pytz | |
| import shutil | |
| import anthropic | |
| import openai | |
| from PyPDF2 import PdfReader | |
| import threading | |
| import json | |
| import zipfile | |
| from gradio_client import Client | |
| from dotenv import load_dotenv | |
| from streamlit_marquee import streamlit_marquee | |
| from collections import defaultdict, Counter | |
| import pandas as pd | |
| # 🛠️ Patch asyncio for nesting | |
| nest_asyncio.apply() | |
| # 🎨 Page Config | |
| st.set_page_config( | |
| page_title="🚲TalkingAIResearcher🏆", | |
| page_icon="🚲🏆", | |
| layout="wide", | |
| initial_sidebar_state="auto" | |
| ) | |
| # 🌟 Static Config | |
| icons = '🤖🧠🔬📝' | |
| Site_Name = '🤖🧠Chat & Quote Node📝🔬' | |
| START_ROOM = "Sector 🌌" | |
| FUN_USERNAMES = { | |
| "CosmicJester 🌌": "en-US-AriaNeural", | |
| "PixelPanda 🐼": "en-US-JennyNeural", | |
| "QuantumQuack 🦆": "en-GB-SoniaNeural", | |
| "StellarSquirrel 🐿️": "en-AU-NatashaNeural", | |
| "GizmoGuru ⚙️": "en-CA-ClaraNeural", | |
| "NebulaNinja 🌠": "en-US-GuyNeural", | |
| "ByteBuster 💾": "en-GB-RyanNeural", | |
| "GalacticGopher 🌍": "en-AU-WilliamNeural", | |
| "RocketRaccoon 🚀": "en-CA-LiamNeural", | |
| "EchoElf 🧝": "en-US-AnaNeural", | |
| "PhantomFox 🦊": "en-US-BrandonNeural", | |
| "WittyWizard 🧙": "en-GB-ThomasNeural", | |
| "LunarLlama 🌙": "en-AU-FreyaNeural", | |
| "SolarSloth ☀️": "en-CA-LindaNeural", | |
| "AstroAlpaca 🦙": "en-US-ChristopherNeural", | |
| "CyberCoyote 🐺": "en-GB-ElliotNeural", | |
| "MysticMoose 🦌": "en-AU-JamesNeural", | |
| "GlitchGnome 🧚": "en-CA-EthanNeural", | |
| "VortexViper 🐍": "en-US-AmberNeural", | |
| "ChronoChimp 🐒": "en-GB-LibbyNeural" | |
| } | |
| EDGE_TTS_VOICES = list(set(FUN_USERNAMES.values())) | |
| FILE_EMOJIS = {"md": "📝", "mp3": "🎵", "png": "🖼️", "mp4": "🎥"} | |
| # 📁 Directories (Media at Root) | |
| for d in ["chat_logs", "vote_logs", "audio_logs", "history_logs", "audio_cache"]: | |
| os.makedirs(d, exist_ok=True) | |
| CHAT_DIR = "chat_logs" | |
| VOTE_DIR = "vote_logs" | |
| MEDIA_DIR = "." | |
| AUDIO_CACHE_DIR = "audio_cache" | |
| AUDIO_DIR = "audio_logs" | |
| STATE_FILE = "user_state.txt" | |
| CHAT_FILE = os.path.join(CHAT_DIR, "global_chat.md") | |
| QUOTE_VOTES_FILE = os.path.join(VOTE_DIR, "quote_votes.md") | |
| IMAGE_VOTES_FILE = os.path.join(VOTE_DIR, "image_votes.md") | |
| HISTORY_FILE = os.path.join(VOTE_DIR, "vote_history.md") | |
| # 🔑 API Keys | |
| load_dotenv() | |
| anthropic_key = os.getenv('ANTHROPIC_API_KEY', st.secrets.get('ANTHROPIC_API_KEY', "")) | |
| openai_api_key = os.getenv('OPENAI_API_KEY', st.secrets.get('OPENAI_API_KEY', "")) | |
| openai_client = openai.OpenAI(api_key=openai_api_key) | |
| # 🕒 Timestamp Helper | |
| def format_timestamp_prefix(username=""): | |
| central = pytz.timezone('US/Central') | |
| now = datetime.now(central) | |
| return f"{now.strftime('%Y%m%d_%H%M%S')}-by-{username}" | |
| # 📈 Performance Timer | |
| class PerformanceTimer: | |
| def __init__(self, name): | |
| self.name, self.start = name, None | |
| def __enter__(self): | |
| self.start = time.time() | |
| return self | |
| def __exit__(self, *args): | |
| duration = time.time() - self.start | |
| st.session_state['operation_timings'][self.name] = duration | |
| st.session_state['performance_metrics'][self.name].append(duration) | |
| # 🎛️ Session State Init | |
| def init_session_state(): | |
| defaults = { | |
| 'server_running': False, 'server_task': None, 'active_connections': {}, | |
| 'media_notifications': [], 'last_chat_update': 0, 'displayed_chat_lines': [], | |
| 'message_text': "", 'audio_cache': {}, 'pasted_image_data': None, | |
| 'quote_line': None, 'refresh_rate': 5, 'base64_cache': {}, | |
| 'transcript_history': [], 'last_transcript': "", 'image_hashes': set(), | |
| 'tts_voice': "en-US-AriaNeural", 'chat_history': [], 'marquee_settings': { | |
| "background": "#1E1E1E", "color": "#FFFFFF", "font-size": "14px", | |
| "animationDuration": "20s", "width": "100%", "lineHeight": "35px" | |
| }, 'operation_timings': {}, 'performance_metrics': defaultdict(list), | |
| 'enable_audio': True, 'download_link_cache': {}, 'username': None, | |
| 'autosend': True, 'autosearch': True, 'last_message': "", 'last_query': "", | |
| 'mp3_files': {}, 'timer_start': time.time(), 'quote_index': 0, | |
| 'quote_source': "famous", 'last_sent_transcript': "", 'old_val': None | |
| } | |
| for k, v in defaults.items(): | |
| if k not in st.session_state: | |
| st.session_state[k] = v | |
| # 🖌️ Marquee Helpers | |
| def update_marquee_settings_ui(): | |
| st.sidebar.markdown("### 🎯 Marquee Settings") | |
| cols = st.sidebar.columns(2) | |
| with cols[0]: | |
| st.session_state['marquee_settings']['background'] = st.color_picker("🎨 Background", "#1E1E1E") | |
| st.session_state['marquee_settings']['color'] = st.color_picker("✍️ Text", "#FFFFFF") | |
| with cols[1]: | |
| st.session_state['marquee_settings']['font-size'] = f"{st.slider('📏 Size', 10, 24, 14)}px" | |
| st.session_state['marquee_settings']['animationDuration'] = f"{st.slider('⏱️ Speed', 1, 20, 20)}s" | |
| def display_marquee(text, settings, key_suffix=""): | |
| truncated = text[:280] + "..." if len(text) > 280 else text | |
| streamlit_marquee(content=truncated, **settings, key=f"marquee_{key_suffix}") | |
| st.write("") | |
| # 📝 Text & File Helpers | |
| def clean_text_for_tts(text): | |
| return re.sub(r'[#*!\[\]]+', '', ' '.join(text.split()))[:200] or "No text" | |
| def clean_text_for_filename(text): | |
| return '_'.join(re.sub(r'[^\w\s-]', '', text.lower()).split())[:200] | |
| def get_high_info_terms(text, top_n=10): | |
| stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with'} | |
| words = re.findall(r'\b\w+(?:-\w+)*\b', text.lower()) | |
| bi_grams = [' '.join(pair) for pair in zip(words, words[1:])] | |
| filtered = [t for t in words + bi_grams if t not in stop_words and len(t.split()) <= 2] | |
| return [t for t, _ in Counter(filtered).most_common(top_n)] | |
| def generate_filename(prompt, username, file_type="md"): | |
| timestamp = format_timestamp_prefix(username) | |
| hash_val = hashlib.md5(prompt.encode()).hexdigest()[:8] | |
| return f"{timestamp}-{hash_val}.{file_type}" | |
| def create_file(prompt, username, file_type="md"): | |
| filename = generate_filename(prompt, username, file_type) | |
| with open(filename, 'w', encoding='utf-8') as f: | |
| f.write(prompt) | |
| return filename | |
| def get_download_link(file, file_type="mp3"): | |
| cache_key = f"dl_{file}" | |
| if cache_key not in st.session_state['download_link_cache']: | |
| with open(file, "rb") as f: | |
| b64 = base64.b64encode(f.read()).decode() | |
| mime_types = {"mp3": "audio/mpeg", "png": "image/png", "mp4": "video/mp4", "md": "text/markdown"} | |
| st.session_state['download_link_cache'][cache_key] = f'<a href="data:{mime_types.get(file_type, "application/octet-stream")};base64,{b64}" download="{os.path.basename(file)}">{FILE_EMOJIS.get(file_type, "Download")} Download {os.path.basename(file)}</a>' | |
| return st.session_state['download_link_cache'][cache_key] | |
| def save_username(username): | |
| try: | |
| with open(STATE_FILE, 'w') as f: | |
| f.write(username) | |
| except Exception as e: | |
| print(f"Failed to save username: {e}") | |
| def load_username(): | |
| if os.path.exists(STATE_FILE): | |
| try: | |
| with open(STATE_FILE, 'r') as f: | |
| return f.read().strip() | |
| except Exception as e: | |
| print(f"Failed to load username: {e}") | |
| return None | |
| def concatenate_markdown_files(): | |
| md_files = sorted(glob.glob("*.md"), key=os.path.getmtime, reverse=True) | |
| all_md_content = "" | |
| for md_file in md_files: | |
| with open(md_file, 'r', encoding='utf-8') as f: | |
| all_md_content += f.read() + "\n\n---\n\n" | |
| return all_md_content.strip() | |
| # 🎶 Audio Processing | |
| async def async_edge_tts_generate(text, voice, username, rate=0, pitch=0, file_format="mp3"): | |
| cache_key = f"{text[:100]}_{voice}_{rate}_{pitch}_{file_format}" | |
| if cache_key in st.session_state['audio_cache']: | |
| return st.session_state['audio_cache'][cache_key], 0 | |
| start_time = time.time() | |
| text = clean_text_for_tts(text) | |
| if not text or text == "No text": | |
| print(f"Skipping audio generation for empty/invalid text: '{text}'") | |
| return None, 0 | |
| filename = f"{format_timestamp_prefix(username)}-{hashlib.md5(text.encode()).hexdigest()[:8]}.{file_format}" | |
| try: | |
| communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz") | |
| await communicate.save(filename) | |
| st.session_state['audio_cache'][cache_key] = filename | |
| return filename, time.time() - start_time | |
| except edge_tts.exceptions.NoAudioReceived as e: | |
| print(f"No audio received for text: '{text}' with voice: {voice}. Error: {e}") | |
| return None, 0 | |
| except Exception as e: | |
| print(f"Error generating audio for text: '{text}' with voice: {voice}. Error: {e}") | |
| return None, 0 | |
| def play_and_download_audio(file_path): | |
| if file_path and os.path.exists(file_path): | |
| st.audio(file_path) | |
| st.markdown(get_download_link(file_path), unsafe_allow_html=True) | |
| def load_mp3_viewer(): | |
| mp3_files = sorted(glob.glob(f"*.mp3"), key=os.path.getmtime, reverse=True) | |
| for mp3 in mp3_files: | |
| filename = os.path.basename(mp3) | |
| if filename not in st.session_state['mp3_files']: | |
| st.session_state['mp3_files'][filename] = mp3 | |
| async def save_chat_entry(username, message, is_markdown=False): | |
| central = pytz.timezone('US/Central') | |
| timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S") | |
| entry = f"[{timestamp}] {username}: {message}" if not is_markdown else f"[{timestamp}] {username}:\n```markdown\n{message}\n```" | |
| with open(CHAT_FILE, 'a') as f: | |
| f.write(f"{entry}\n") | |
| voice = FUN_USERNAMES.get(username, "en-US-AriaNeural") | |
| audio_file, _ = await async_edge_tts_generate(message, voice, username) | |
| if audio_file: | |
| with open(HISTORY_FILE, 'a') as f: | |
| f.write(f"[{timestamp}] {username}: Audio - {audio_file}\n") | |
| st.session_state['mp3_files'][os.path.basename(audio_file)] = audio_file | |
| else: | |
| print(f"No audio generated for message: '{message}' by {username}") | |
| await broadcast_message(f"{username}|{message}", "chat") | |
| st.session_state.last_chat_update = time.time() | |
| st.session_state.chat_history.append(entry) | |
| return audio_file | |
| async def load_chat(): | |
| if not os.path.exists(CHAT_FILE): | |
| with open(CHAT_FILE, 'a') as f: | |
| f.write(f"# {START_ROOM} Chat\n\nWelcome to the cosmic hub! 🎤\n") | |
| with open(CHAT_FILE, 'r') as f: | |
| content = f.read().strip() | |
| lines = content.split('\n') | |
| numbered_content = "\n".join(f"{i+1}. {line}" for i, line in enumerate(lines) if line.strip()) | |
| return numbered_content | |
| async def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False, useArxiv=True, useArxivAudio=False): | |
| start = time.time() | |
| client = anthropic.Anthropic(api_key=anthropic_key) | |
| response = client.messages.create( | |
| model="claude-3-sonnet-20240229", | |
| max_tokens=1000, | |
| messages=[{"role": "user", "content": q}] | |
| ) | |
| st.write("Claude's reply 🧠:") | |
| st.markdown(response.content[0].text) | |
| result = response.content[0].text | |
| md_file = create_file(q, result, "System") | |
| audio_file, _ = await async_edge_tts_generate(result, st.session_state['tts_voice'], "System") | |
| st.subheader("📝 Main Response Audio") | |
| play_and_download_audio(audio_file) | |
| if useArxiv: | |
| q = q + result | |
| st.write('Running Arxiv RAG with Claude inputs.') | |
| gradio_client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern") | |
| refs = gradio_client.predict( | |
| q, 10, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md" | |
| )[0] | |
| result = f"🔎 {q}\n\n{refs}" | |
| md_file = create_file(q, result, "System") | |
| audio_file, _ = await async_edge_tts_generate(result, st.session_state['tts_voice'], "System") | |
| st.subheader("📝 ArXiv Response Audio") | |
| play_and_download_audio(audio_file) | |
| papers = parse_arxiv_refs(refs) | |
| if papers and useArxivAudio: | |
| await create_paper_audio_files(papers, q) | |
| return result, papers | |
| elapsed = time.time() - start | |
| st.write(f"**Total Elapsed:** {elapsed:.2f} s") | |
| return result, [] | |
| # 🌐 WebSocket Handling | |
| async def websocket_handler(websocket, path): | |
| client_id = str(uuid.uuid4()) | |
| room_id = "chat" | |
| if room_id not in st.session_state.active_connections: | |
| st.session_state.active_connections[room_id] = {} | |
| st.session_state.active_connections[room_id][client_id] = websocket | |
| username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys()))) | |
| chat_content = await load_chat() | |
| if not any(f"Client-{client_id}" in line for line in chat_content.split('\n')): | |
| await save_chat_entry("System 🌟", f"{username} has joined {START_ROOM}!") | |
| try: | |
| async for message in websocket: | |
| if '|' in message: | |
| username, content = message.split('|', 1) | |
| await save_chat_entry(username, content) | |
| else: | |
| await websocket.send("ERROR|Message format: username|content") | |
| except websockets.ConnectionClosed: | |
| await save_chat_entry("System 🌟", f"{username} has left {START_ROOM}!") | |
| finally: | |
| if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]: | |
| del st.session_state.active_connections[room_id][client_id] | |
| async def broadcast_message(message, room_id): | |
| if room_id in st.session_state.active_connections: | |
| disconnected = [] | |
| for client_id, ws in st.session_state.active_connections[room_id].items(): | |
| try: | |
| await ws.send(message) | |
| except websockets.ConnectionClosed: | |
| disconnected.append(client_id) | |
| for client_id in disconnected: | |
| if client_id in st.session_state.active_connections[room_id]: | |
| del st.session_state.active_connections[room_id][client_id] | |
| async def run_websocket_server(): | |
| if not st.session_state.server_running: | |
| server = await websockets.serve(websocket_handler, '0.0.0.0', 8765) | |
| st.session_state.server_running = True | |
| await server.wait_closed() | |
| def start_websocket_server(): | |
| asyncio.run(run_websocket_server()) | |
| # 📚 PDF to Audio | |
| class AudioProcessor: | |
| def __init__(self): | |
| self.cache_dir = AUDIO_CACHE_DIR | |
| os.makedirs(self.cache_dir, exist_ok=True) | |
| self.metadata = json.load(open(f"{self.cache_dir}/metadata.json")) if os.path.exists(f"{self.cache_dir}/metadata.json") else {} | |
| def _save_metadata(self): | |
| with open(f"{self.cache_dir}/metadata.json", 'w') as f: | |
| json.dump(self.metadata, f) | |
| async def create_audio(self, text, voice='en-US-AriaNeural'): | |
| cache_key = hashlib.md5(f"{text}:{voice}".encode()).hexdigest() | |
| cache_path = f"{self.cache_dir}/{cache_key}.mp3" | |
| if cache_key in self.metadata and os.path.exists(cache_path): | |
| return cache_path | |
| text = clean_text_for_tts(text) | |
| if not text: | |
| return None | |
| communicate = edge_tts.Communicate(text, voice) | |
| await communicate.save(cache_path) | |
| self.metadata[cache_key] = {'timestamp': datetime.now().isoformat(), 'text_length': len(text), 'voice': voice} | |
| self._save_metadata() | |
| return cache_path | |
| def process_pdf(pdf_file, max_pages, voice, audio_processor): | |
| reader = PdfReader(pdf_file) | |
| total_pages = min(len(reader.pages), max_pages) | |
| texts, audios = [], {} | |
| async def process_page(i, text): | |
| audio_path = await audio_processor.create_audio(text, voice) | |
| if audio_path: | |
| audios[i] = audio_path | |
| for i in range(total_pages): | |
| text = reader.pages[i].extract_text() | |
| texts.append(text) | |
| threading.Thread(target=lambda: asyncio.run(process_page(i, text))).start() | |
| return texts, audios, total_pages | |
| # 🔍 ArXiv & AI Lookup | |
| def parse_arxiv_refs(ref_text): | |
| if not ref_text: | |
| return [] | |
| papers = [] | |
| current = {} | |
| for line in ref_text.split('\n'): | |
| if line.count('|') == 2: | |
| if current: | |
| papers.append(current) | |
| date, title, *_ = line.strip('* ').split('|') | |
| url = re.search(r'(https://arxiv.org/\S+)', line).group(1) if re.search(r'(https://arxiv.org/\S+)', line) else f"paper_{len(papers)}" | |
| current = {'date': date, 'title': title, 'url': url, 'authors': '', 'summary': '', 'full_audio': None, 'download_base64': ''} | |
| elif current: | |
| if not current['authors']: | |
| current['authors'] = line.strip('* ') | |
| else: | |
| current['summary'] += ' ' + line.strip() if current['summary'] else line.strip() | |
| if current: | |
| papers.append(current) | |
| return papers[:20] | |
| def generate_5min_feature_markdown(paper): | |
| title, summary, authors, date, url = paper['title'], paper['summary'], paper['authors'], paper['date'], paper['url'] | |
| pdf_url = url.replace("abs", "pdf") + (".pdf" if not url.endswith(".pdf") else "") | |
| wct, sw = len(title.split()), len(summary.split()) | |
| terms = get_high_info_terms(summary, 15) | |
| rouge = round((len(terms) / max(sw, 1)) * 100, 2) | |
| mermaid = "```mermaid\nflowchart TD\n" + "\n".join(f' T{i+1}["{t}"] --> T{i+2}["{terms[i+1]}"]' for i in range(len(terms)-1)) + "\n```" | |
| return f""" | |
| ## 📄 {title} | |
| **Authors:** {authors} | **Date:** {date} | **Words:** Title: {wct}, Summary: {sw} | |
| **Links:** [Abstract]({url}) | [PDF]({pdf_url}) | |
| **Terms:** {', '.join(terms)} | **ROUGE:** {rouge}% | |
| ### 🎤 TTF Read Aloud | |
| - **Title:** {title} | **Terms:** {', '.join(terms)} | **ROUGE:** {rouge}% | |
| #### Concepts Graph | |
| {mermaid} | |
| --- | |
| """ | |
| def create_detailed_paper_md(papers): | |
| return "# Detailed Summary\n" + "\n".join(generate_5min_feature_markdown(p) for p in papers) | |
| async def create_paper_audio_files(papers, query): | |
| for p in papers: | |
| audio_text = clean_text_for_tts(f"{p['title']} by {p['authors']}. {p['summary']}") | |
| p['full_audio'], _ = await async_edge_tts_generate(audio_text, st.session_state['tts_voice'], p['authors']) | |
| if p['full_audio']: | |
| p['download_base64'] = get_download_link(p['full_audio']) | |
| def save_vote(file, item, user_hash): | |
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| entry = f"[{timestamp}] {user_hash} voted for {item}" | |
| try: | |
| with open(file, 'a') as f: | |
| f.write(f"{entry}\n") | |
| with open(HISTORY_FILE, 'a') as f: | |
| f.write(f"- {timestamp} - User {user_hash} voted for {item}\n") | |
| return True | |
| except Exception as e: | |
| print(f"Vote save flop: {e}") | |
| return False | |
| def load_votes(file): | |
| if not os.path.exists(file): | |
| with open(file, 'w') as f: | |
| f.write("# Vote Tally\n\nNo votes yet - get clicking! 🖱️\n") | |
| try: | |
| with open(file, 'r') as f: | |
| lines = f.read().strip().split('\n') | |
| votes = {} | |
| for line in lines[2:]: # Skip header | |
| if line.strip() and 'voted for' in line: | |
| item = line.split('voted for ')[1] | |
| votes[item] = votes.get(item, 0) + 1 | |
| return votes | |
| except Exception as e: | |
| print(f"Vote load oopsie: {e}") | |
| return {} | |
| def generate_user_hash(): | |
| if 'user_hash' not in st.session_state: | |
| session_id = str(random.getrandbits(128)) | |
| hash_object = hashlib.md5(session_id.encode()) | |
| st.session_state['user_hash'] = hash_object.hexdigest()[:8] | |
| return st.session_state['user_hash'] | |
| async def save_pasted_image(image, username): | |
| img_hash = hashlib.md5(image.tobytes()).hexdigest()[:8] | |
| if img_hash in st.session_state.image_hashes: | |
| return None | |
| timestamp = format_timestamp_prefix(username) | |
| filename = f"{timestamp}-{img_hash}.png" | |
| filepath = filename | |
| image.save(filepath, "PNG") | |
| st.session_state.image_hashes.add(img_hash) | |
| return filepath | |
| # 📦 Zip Files | |
| def create_zip_of_files(md_files, mp3_files, png_files, mp4_files, query): | |
| all_files = md_files + mp3_files + png_files + mp4_files | |
| if not all_files: | |
| return None | |
| terms = get_high_info_terms(" ".join([open(f, 'r', encoding='utf-8').read() if f.endswith('.md') else os.path.splitext(os.path.basename(f))[0].replace('_', ' ') for f in all_files] + [query]), 5) | |
| zip_name = f"{format_timestamp_prefix()}_{'-'.join(terms)[:20]}.zip" | |
| with zipfile.ZipFile(zip_name, 'w') as z: | |
| [z.write(f) for f in all_files] | |
| return zip_name | |
| # 🎮 Main Interface | |
| def main(): | |
| init_session_state() | |
| load_mp3_viewer() | |
| saved_username = load_username() | |
| if saved_username and saved_username in FUN_USERNAMES: | |
| st.session_state.username = saved_username | |
| if not st.session_state.username: | |
| available = [n for n in FUN_USERNAMES if not any(f"{n} has joined" in l for l in asyncio.run(load_chat()).split('\n'))] | |
| st.session_state.username = random.choice(available or list(FUN_USERNAMES.keys())) | |
| st.session_state.tts_voice = FUN_USERNAMES[st.session_state.username] | |
| asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} has joined {START_ROOM}!")) | |
| save_username(st.session_state.username) | |
| st.title(f"{Site_Name} for {st.session_state.username}") | |
| update_marquee_settings_ui() | |
| display_marquee(f"🚀 Welcome to {START_ROOM} | 🤖 {st.session_state.username}", st.session_state['marquee_settings'], "welcome") | |
| # Speech Component at Top Level | |
| mycomponent = components.declare_component("mycomponent", path="mycomponent") | |
| val = mycomponent(my_input_value="Hello from MyComponent") | |
| if val: | |
| val_stripped = val.replace('\\n', ' ') | |
| edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100, key="speech_input") | |
| run_option = st.selectbox("Model:", ["Chat", "Arxiv"], key="model_select") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.checkbox("⚙ AutoRun", value=True, key="autorun") # Let Streamlit manage autorun state | |
| with col2: | |
| full_audio = st.checkbox("📚 FullAudio", value=False, key="full_audio") | |
| input_changed = (val != st.session_state.old_val) | |
| if st.session_state.autorun and input_changed: | |
| st.session_state.old_val = val | |
| st.session_state.last_query = edited_input | |
| if run_option == "Chat": | |
| asyncio.run(save_chat_entry(st.session_state.username, edited_input, True)) | |
| elif run_option == "Arxiv": | |
| asyncio.run(perform_ai_lookup(edited_input, useArxiv=True, useArxivAudio=full_audio)) | |
| elif st.button("▶ Run", key="run_button"): | |
| st.session_state.old_val = val | |
| st.session_state.last_query = edited_input | |
| if run_option == "Chat": | |
| asyncio.run(save_chat_entry(st.session_state.username, edited_input, True)) | |
| elif run_option == "Arxiv": | |
| asyncio.run(perform_ai_lookup(edited_input, useArxiv=True, useArxivAudio=full_audio)) | |
| tab_main = st.radio("Action:", ["🎤 Chat & Voice", "📸 Media", "🔍 ArXiv", "📚 PDF to Audio"], horizontal=True, key="tab_main") | |
| useArxiv = st.checkbox("Search ArXiv", True, key="use_arxiv") | |
| useArxivAudio = st.checkbox("ArXiv Audio", False, key="use_arxiv_audio") | |
| # Use st.checkbox without reassigning to st.session_state | |
| st.checkbox("Autosend Chat", value=True, key="autosend") | |
| st.checkbox("Autosearch ArXiv", value=True, key="autosearch") | |
| # 🎤 Chat & Voice | |
| if tab_main == "🎤 Chat & Voice": | |
| st.subheader(f"{START_ROOM} Chat 💬") | |
| chat_content = asyncio.run(load_chat()) | |
| chat_container = st.container() | |
| with chat_container: | |
| lines = chat_content.split('\n') | |
| for i, line in enumerate(lines): | |
| if line.strip(): | |
| col1, col2 = st.columns([5, 1]) | |
| with col1: | |
| st.markdown(line) | |
| for mp3_name, mp3_path in st.session_state['mp3_files'].items(): | |
| if st.session_state.username in mp3_name and any(word in mp3_name for word in line.split()): | |
| st.audio(mp3_path) | |
| break | |
| with col2: | |
| if st.button(f"👍", key=f"chat_vote_{i}"): | |
| user_hash = generate_user_hash() | |
| save_vote(QUOTE_VOTES_FILE, line, user_hash) | |
| st.session_state.timer_start = time.time() | |
| save_username(st.session_state.username) | |
| st.rerun() | |
| message = st.text_input(f"Message as {st.session_state.username}", key="message_input") | |
| paste_result = paste_image_button("📋 Paste Image or Text", key="paste_button_msg") | |
| if paste_result.image_data is not None: | |
| if isinstance(paste_result.image_data, str): | |
| st.session_state.message_text = paste_result.image_data | |
| message = st.text_input(f"Message as {st.session_state.username}", key="message_input_paste", value=st.session_state.message_text) | |
| else: | |
| st.image(paste_result.image_data, caption="Pasted Image") | |
| filename = asyncio.run(save_pasted_image(paste_result.image_data, st.session_state.username)) | |
| if filename: | |
| st.session_state.pasted_image_data = filename | |
| if (message and message != st.session_state.last_message) or st.session_state.pasted_image_data: | |
| st.session_state.last_message = message | |
| if st.session_state.autosend or st.button("Send 🚀", key="send_button"): # Use st.session_state.autosend directly | |
| if message.strip(): | |
| asyncio.run(save_chat_entry(st.session_state.username, message, True)) | |
| if st.session_state.pasted_image_data: | |
| asyncio.run(save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}")) | |
| st.session_state.pasted_image_data = None | |
| st.session_state.timer_start = time.time() | |
| save_username(st.session_state.username) | |
| st.rerun() | |
| # 📸 Media | |
| elif tab_main == "📸 Media": | |
| st.header("📸 Media Gallery") | |
| all_files = sorted(glob.glob("*.md") + glob.glob("*.mp3") + glob.glob("*.png") + glob.glob("*.mp4"), key=os.path.getmtime, reverse=True) | |
| md_files = [f for f in all_files if f.endswith('.md')] | |
| mp3_files = [f for f in all_files if f.endswith('.mp3')] | |
| png_files = [f for f in all_files if f.endswith('.png')] | |
| mp4_files = [f for f in all_files if f.endswith('.mp4')] | |
| st.subheader("All Submitted Text") | |
| all_md_content = concatenate_markdown_files() | |
| st.markdown(all_md_content) | |
| st.subheader("🎵 Audio (MP3)") | |
| for mp3 in mp3_files: | |
| with st.expander(os.path.basename(mp3)): | |
| st.audio(mp3) | |
| st.markdown(get_download_link(mp3, "mp3"), unsafe_allow_html=True) | |
| st.subheader("🖼️ Images (PNG)") | |
| for png in png_files: | |
| with st.expander(os.path.basename(png)): | |
| st.image(png, use_container_width=True) | |
| st.markdown(get_download_link(png, "png"), unsafe_allow_html=True) | |
| st.subheader("🎥 Videos (MP4)") | |
| for mp4 in mp4_files: | |
| with st.expander(os.path.basename(mp4)): | |
| st.video(mp4) | |
| st.markdown(get_download_link(mp4, "mp4"), unsafe_allow_html=True) | |
| uploaded_file = st.file_uploader("Upload Media", type=['png', 'mp4', 'mp3'], key="media_upload") | |
| if uploaded_file: | |
| filename = f"{format_timestamp_prefix(st.session_state.username)}-{hashlib.md5(uploaded_file.getbuffer()).hexdigest()[:8]}.{uploaded_file.name.split('.')[-1]}" | |
| with open(filename, 'wb') as f: | |
| f.write(uploaded_file.getbuffer()) | |
| asyncio.run(save_chat_entry(st.session_state.username, f"Uploaded: {filename}")) | |
| st.session_state.timer_start = time.time() | |
| save_username(st.session_state.username) | |
| st.rerun() | |
| # 🔍 ArXiv | |
| elif tab_main == "🔍 ArXiv": | |
| st.subheader("🔍 Query ArXiv") | |
| q = st.text_input("🔍 Query:", key="arxiv_query") | |
| if q and q != st.session_state.last_query: | |
| st.session_state.last_query = q | |
| if st.session_state.autosearch or st.button("🔍 Run", key="arxiv_run"): # Use st.session_state.autosearch directly | |
| result, papers = asyncio.run(perform_ai_lookup(q, useArxiv=useArxiv, useArxivAudio=useArxivAudio)) | |
| for i, p in enumerate(papers, 1): | |
| with st.expander(f"{i}. 📄 {p['title']}"): | |
| st.markdown(f"**{p['date']} | {p['title']}** — [Link]({p['url']})") | |
| st.markdown(generate_5min_feature_markdown(p)) | |
| if p.get('full_audio'): | |
| play_and_download_audio(p['full_audio']) | |
| # 📚 PDF to Audio | |
| elif tab_main == "📚 PDF to Audio": | |
| audio_processor = AudioProcessor() | |
| pdf_file = st.file_uploader("Choose PDF", "pdf", key="pdf_upload") | |
| max_pages = st.slider('Pages', 1, 100, 10, key="pdf_pages") | |
| if pdf_file: | |
| with st.spinner('Processing...'): | |
| texts, audios, total = process_pdf(pdf_file, max_pages, st.session_state['tts_voice'], audio_processor) | |
| for i, text in enumerate(texts): | |
| with st.expander(f"Page {i+1}"): | |
| st.markdown(text) | |
| while i not in audios: | |
| time.sleep(0.1) | |
| if audios.get(i): | |
| st.audio(audios[i]) | |
| st.markdown(get_download_link(audios[i], "mp3"), unsafe_allow_html=True) | |
| asyncio.run(save_chat_entry(st.session_state.username, f"PDF Page {i+1} converted to audio: {audios[i]}")) | |
| # 🗂️ Sidebar with Dialog and Audio | |
| st.sidebar.subheader("Voice Settings") | |
| new_username = st.sidebar.selectbox("Change Name/Voice", list(FUN_USERNAMES.keys()), index=list(FUN_USERNAMES.keys()).index(st.session_state.username), key="username_select") | |
| if new_username != st.session_state.username: | |
| asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} changed to {new_username}")) | |
| st.session_state.username, st.session_state.tts_voice = new_username, FUN_USERNAMES[new_username] | |
| st.session_state.timer_start = time.time() | |
| save_username(st.session_state.username) | |
| st.rerun() | |
| st.sidebar.markdown("### 💬 Chat Dialog & Media") | |
| chat_content = asyncio.run(load_chat()) | |
| lines = chat_content.split('\n') | |
| all_files = sorted(glob.glob("*.md") + glob.glob("*.mp3") + glob.glob("*.png") + glob.glob("*.mp4"), key=os.path.getmtime, reverse=True) | |
| for line in lines[-10:]: | |
| if line.strip(): | |
| st.sidebar.markdown(f"**{line}**") | |
| for f in all_files: | |
| f_name = os.path.basename(f) | |
| if st.session_state.username in f_name and any(word in f_name for word in line.split()): | |
| if f.endswith('.mp3'): | |
| st.sidebar.audio(f) | |
| st.sidebar.markdown(get_download_link(f, "mp3"), unsafe_allow_html=True) | |
| elif f.endswith('.png'): | |
| st.sidebar.image(f, use_container_width=True) | |
| st.sidebar.markdown(get_download_link(f, "png"), unsafe_allow_html=True) | |
| elif f.endswith('.mp4'): | |
| st.sidebar.video(f) | |
| st.sidebar.markdown(get_download_link(f, "mp4"), unsafe_allow_html=True) | |
| break | |
| st.sidebar.subheader("Vote Totals") | |
| chat_votes = load_votes(QUOTE_VOTES_FILE) | |
| image_votes = load_votes(IMAGE_VOTES_FILE) | |
| for item, count in chat_votes.items(): | |
| st.sidebar.write(f"{item}: {count} votes") | |
| for image, count in image_votes.items(): | |
| st.sidebar.write(f"{image}: {count} votes") | |
| md_files = [f for f in all_files if f.endswith('.md')] | |
| mp3_files = [f for f in all_files if f.endswith('.mp3')] | |
| png_files = [f for f in all_files if f.endswith('.png')] | |
| mp4_files = [f for f in all_files if f.endswith('.mp4')] | |
| st.sidebar.markdown("### 📂 File History") | |
| for f in all_files[:10]: | |
| st.sidebar.write(f"{FILE_EMOJIS.get(f.split('.')[-1], '📄')} {os.path.basename(f)}") | |
| if st.sidebar.button("⬇️ Zip All", key="zip_all"): | |
| zip_name = create_zip_of_files(md_files, mp3_files, png_files, mp4_files, "latest_query") | |
| if zip_name: | |
| st.sidebar.markdown(get_download_link(zip_name, "zip"), unsafe_allow_html=True) | |
| # Start WebSocket server in a separate thread | |
| if not st.session_state.server_running and not st.session_state.server_task: | |
| st.session_state.server_task = threading.Thread(target=start_websocket_server, daemon=True) | |
| st.session_state.server_task.start() | |
| if __name__ == "__main__": | |
| main() |