awacke1 commited on
Commit
4bc0956
·
verified ·
1 Parent(s): 73ea216

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +897 -0
app.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import asyncio
3
+ import websockets
4
+ import uuid
5
+ from datetime import datetime
6
+ import os
7
+ import random
8
+ import time
9
+ import hashlib
10
+ from PIL import Image
11
+ import glob
12
+ import base64
13
+ import io
14
+ import streamlit.components.v1 as components
15
+ import edge_tts
16
+ from audio_recorder_streamlit import audio_recorder
17
+ import nest_asyncio
18
+ import re
19
+ import pytz
20
+ import shutil
21
+ import anthropic
22
+ import openai
23
+ from PyPDF2 import PdfReader
24
+ import threading
25
+ import json
26
+ import zipfile
27
+ from gradio_client import Client
28
+ from dotenv import load_dotenv
29
+ from streamlit_marquee import streamlit_marquee
30
+ from collections import defaultdict, Counter
31
+ import pandas as pd
32
+
33
+ # 🛠️ Patch asyncio for nesting
34
+ nest_asyncio.apply()
35
+
36
+ # 🎨 Page Config
37
+ st.set_page_config(
38
+ page_title="🚲TalkingAIResearcher🏆",
39
+ page_icon="🚲🏆",
40
+ layout="wide",
41
+ initial_sidebar_state="auto"
42
+ )
43
+
44
+ # 🌟 Static Config
45
+ icons = '🤖🧠🔬📝'
46
+ Site_Name = '🤖🧠Chat & Quote Node📝🔬'
47
+ START_ROOM = "Sector 🌌"
48
+ FUN_USERNAMES = {
49
+ "CosmicJester 🌌": "en-US-AriaNeural",
50
+ "PixelPanda 🐼": "en-US-JennyNeural",
51
+ "QuantumQuack 🦆": "en-GB-SoniaNeural",
52
+ "StellarSquirrel 🐿️": "en-AU-NatashaNeural",
53
+ "GizmoGuru ⚙️": "en-CA-ClaraNeural",
54
+ "NebulaNinja 🌠": "en-US-GuyNeural",
55
+ "ByteBuster 💾": "en-GB-RyanNeural",
56
+ "GalacticGopher 🌍": "en-AU-WilliamNeural",
57
+ "RocketRaccoon 🚀": "en-CA-LiamNeural",
58
+ "EchoElf 🧝": "en-US-AnaNeural",
59
+ "PhantomFox 🦊": "en-US-BrandonNeural",
60
+ "WittyWizard 🧙": "en-GB-ThomasNeural",
61
+ "LunarLlama 🌙": "en-AU-FreyaNeural",
62
+ "SolarSloth ☀️": "en-CA-LindaNeural",
63
+ "AstroAlpaca 🦙": "en-US-ChristopherNeural",
64
+ "CyberCoyote 🐺": "en-GB-ElliotNeural",
65
+ "MysticMoose 🦌": "en-AU-JamesNeural",
66
+ "GlitchGnome 🧚": "en-CA-EthanNeural",
67
+ "VortexViper 🐍": "en-US-AmberNeural",
68
+ "ChronoChimp 🐒": "en-GB-LibbyNeural"
69
+ }
70
+ EDGE_TTS_VOICES = list(set(FUN_USERNAMES.values()))
71
+ FILE_EMOJIS = {"md": "📝", "mp3": "🎵", "png": "🖼️", "mp4": "🎥", "zip": "📦"}
72
+
73
+ # 📁 Directories
74
+ for d in ["chat_logs", "vote_logs", "audio_logs", "history_logs", "audio_cache", "paper_metadata"]:
75
+ os.makedirs(d, exist_ok=True)
76
+
77
+ CHAT_DIR = "chat_logs"
78
+ VOTE_DIR = "vote_logs"
79
+ MEDIA_DIR = "."
80
+ AUDIO_CACHE_DIR = "audio_cache"
81
+ AUDIO_DIR = "audio_logs"
82
+ PAPER_DIR = "paper_metadata"
83
+ STATE_FILE = "user_state.txt"
84
+
85
+ CHAT_FILE = os.path.join(CHAT_DIR, "global_chat.md")
86
+ QUOTE_VOTES_FILE = os.path.join(VOTE_DIR, "quote_votes.md")
87
+ IMAGE_VOTES_FILE = os.path.join(VOTE_DIR, "image_votes.md")
88
+ HISTORY_FILE = os.path.join(VOTE_DIR, "vote_history.md")
89
+
90
+ # 🔑 API Keys
91
+ load_dotenv()
92
+ anthropic_key = os.getenv('ANTHROPIC_API_KEY', st.secrets.get('ANTHROPIC_API_KEY', ""))
93
+ openai_api_key = os.getenv('OPENAI_API_KEY', st.secrets.get('OPENAI_API_KEY', ""))
94
+ openai_client = openai.OpenAI(api_key=openai_api_key)
95
+
96
+ # 🕒 Timestamp Helper
97
+ def format_timestamp_prefix(username=""):
98
+ central = pytz.timezone('US/Central')
99
+ now = datetime.now(central)
100
+ return f"{now.strftime('%Y%m%d_%H%M%S')}-by-{username}"
101
+
102
+ # 📈 Performance Timer
103
+ class PerformanceTimer:
104
+ def __init__(self, name):
105
+ self.name, self.start = name, None
106
+ def __enter__(self):
107
+ self.start = time.time()
108
+ return self
109
+ def __exit__(self, *args):
110
+ duration = time.time() - self.start
111
+ st.session_state['operation_timings'][self.name] = duration
112
+ st.session_state['performance_metrics'][self.name].append(duration)
113
+
114
+ # 🎛️ Session State Init
115
+ def init_session_state():
116
+ defaults = {
117
+ 'server_running': False, 'server_task': None, 'active_connections': {},
118
+ 'media_notifications': [], 'last_chat_update': 0, 'displayed_chat_lines': [],
119
+ 'message_text': "", 'audio_cache': {}, 'pasted_image_data': None,
120
+ 'quote_line': None, 'refresh_rate': 5, 'base64_cache': {},
121
+ 'transcript_history': [], 'last_transcript': "", 'image_hashes': set(),
122
+ 'tts_voice': "en-US-AriaNeural", 'chat_history': [], 'marquee_settings': {
123
+ "background": "#1E1E1E", "color": "#FFFFFF", "font-size": "14px",
124
+ "animationDuration": "20s", "width": "100%", "lineHeight": "35px"
125
+ }, 'operation_timings': {}, 'performance_metrics': defaultdict(list),
126
+ 'enable_audio': True, 'download_link_cache': {}, 'username': None,
127
+ 'autosend': True, 'autosearch': True, 'last_message': "", 'last_query': "",
128
+ 'mp3_files': {}, 'timer_start': time.time(), 'quote_index': 0,
129
+ 'quote_source': "famous", 'last_sent_transcript': "", 'old_val': None,
130
+ 'last_refresh': time.time(), 'paper_metadata': {}, 'paste_image_base64': "",
131
+ 'use_arxiv': True, 'use_arxiv_audio': False, 'speech_processed': False
132
+ }
133
+ for k, v in defaults.items():
134
+ if k not in st.session_state:
135
+ st.session_state[k] = v
136
+
137
+ # 🖌️ Marquee Helpers
138
+ def update_marquee_settings_ui():
139
+ st.sidebar.markdown("### 🎯 Marquee Settings")
140
+ cols = st.sidebar.columns(2)
141
+ with cols[0]:
142
+ st.session_state['marquee_settings']['background'] = st.color_picker("🎨 Background", "#1E1E1E")
143
+ st.session_state['marquee_settings']['color'] = st.color_picker("✍️ Text", "#FFFFFF")
144
+ with cols[1]:
145
+ st.session_state['marquee_settings']['font-size'] = f"{st.slider('📏 Size', 10, 24, 14)}px"
146
+ st.session_state['marquee_settings']['animationDuration'] = f"{st.slider('⏱️ Speed', 1, 20, 20)}s"
147
+
148
+ def display_marquee(text, settings, key_suffix=""):
149
+ truncated = text[:280] + "..." if len(text) > 280 else text
150
+ streamlit_marquee(content=truncated, **settings, key=f"marquee_{key_suffix}")
151
+ st.write("")
152
+
153
+ # 📝 Text & File Helpers
154
+ def clean_text_for_tts(text):
155
+ return re.sub(r'[#*!\[\]]+', '', ' '.join(text.split()))[:200] or "No text"
156
+
157
+ def clean_text_for_filename(text):
158
+ return '_'.join(re.sub(r'[^\w\s-]', '', text.lower()).split())[:50]
159
+
160
+ def get_high_info_terms(text, top_n=10):
161
+ stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with'}
162
+ words = re.findall(r'\b\w+(?:-\w+)*\b', text.lower())
163
+ bi_grams = [' '.join(pair) for pair in zip(words, words[1:])]
164
+ filtered = [t for t in words + bi_grams if t not in stop_words and len(t.split()) <= 2]
165
+ return [t for t, _ in Counter(filtered).most_common(top_n)]
166
+
167
+ def generate_filename(prompt, username, file_type="md", title=None):
168
+ timestamp = format_timestamp_prefix(username)
169
+ if title:
170
+ high_info = '-'.join(get_high_info_terms(title, 5))
171
+ return f"{timestamp}-{clean_text_for_filename(prompt[:20])}-{high_info}.{file_type}"
172
+ hash_val = hashlib.md5(prompt.encode()).hexdigest()[:8]
173
+ return f"{timestamp}-{hash_val}.{file_type}"
174
+
175
+ def create_file(prompt, username, file_type="md", title=None):
176
+ filename = generate_filename(prompt, username, file_type, title)
177
+ with open(filename, 'w', encoding='utf-8') as f:
178
+ f.write(prompt)
179
+ return filename
180
+
181
+ def get_download_link(file, file_type="mp3"):
182
+ cache_key = f"dl_{file}"
183
+ if cache_key not in st.session_state['download_link_cache']:
184
+ with open(file, "rb") as f:
185
+ b64 = base64.b64encode(f.read()).decode()
186
+ mime_types = {"mp3": "audio/mpeg", "png": "image/png", "mp4": "video/mp4", "md": "text/markdown", "zip": "application/zip"}
187
+ st.session_state['download_link_cache'][cache_key] = f'<a href="data:{mime_types.get(file_type, "application/octet-stream")};base64,{b64}" download="{os.path.basename(file)}">{FILE_EMOJIS.get(file_type, "Download")} Download {os.path.basename(file)}</a>'
188
+ return st.session_state['download_link_cache'][cache_key]
189
+
190
+ def save_username(username):
191
+ try:
192
+ with open(STATE_FILE, 'w') as f:
193
+ f.write(username)
194
+ except Exception as e:
195
+ print(f"Failed to save username: {e}")
196
+
197
+ def load_username():
198
+ if os.path.exists(STATE_FILE):
199
+ try:
200
+ with open(STATE_FILE, 'r') as f:
201
+ return f.read().strip()
202
+ except Exception as e:
203
+ print(f"Failed to load username: {e}")
204
+ return None
205
+
206
+ def concatenate_markdown_files(exclude_files=["README.md"]):
207
+ md_files = sorted([f for f in glob.glob("*.md") if os.path.basename(f) not in exclude_files], key=os.path.getmtime)
208
+ all_md_content = ""
209
+ for i, md_file in enumerate(md_files, 1):
210
+ with open(md_file, 'r', encoding='utf-8') as f:
211
+ content = f.read().strip()
212
+ all_md_content += f"{i}. {content}\n"
213
+ return all_md_content.rstrip()
214
+
215
+ # 🎶 Audio Processing
216
+ async def async_edge_tts_generate(text, voice, username, rate=0, pitch=0, file_format="mp3"):
217
+ cache_key = f"{text[:100]}_{voice}_{rate}_{pitch}_{file_format}"
218
+ if cache_key in st.session_state['audio_cache']:
219
+ return st.session_state['audio_cache'][cache_key], 0
220
+ start_time = time.time()
221
+ text = clean_text_for_tts(text)
222
+ if not text or text == "No text":
223
+ print(f"Skipping audio generation for empty/invalid text: '{text}'")
224
+ return None, 0
225
+ filename = f"{format_timestamp_prefix(username)}-{hashlib.md5(text.encode()).hexdigest()[:8]}.{file_format}"
226
+ try:
227
+ communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
228
+ await communicate.save(filename)
229
+ st.session_state['audio_cache'][cache_key] = filename
230
+ return filename, time.time() - start_time
231
+ except edge_tts.exceptions.NoAudioReceived as e:
232
+ print(f"No audio received for text: '{text}' with voice: {voice}. Error: {e}")
233
+ return None, 0
234
+ except Exception as e:
235
+ print(f"Error generating audio for text: '{text}' with voice: {voice}. Error: {e}")
236
+ return None, 0
237
+
238
+ def play_and_download_audio(file_path):
239
+ if file_path and os.path.exists(file_path):
240
+ st.audio(file_path)
241
+ st.markdown(get_download_link(file_path), unsafe_allow_html=True)
242
+
243
+ def load_mp3_viewer():
244
+ mp3_files = sorted(glob.glob("*.mp3"), key=os.path.getmtime)
245
+ for i, mp3 in enumerate(mp3_files, 1):
246
+ filename = os.path.basename(mp3)
247
+ if filename not in st.session_state['mp3_files']:
248
+ st.session_state['mp3_files'][filename] = (i, mp3)
249
+
250
+ async def save_chat_entry(username, message, voice, is_markdown=False):
251
+ if not message.strip() or message == st.session_state.last_transcript:
252
+ return None, None
253
+ central = pytz.timezone('US/Central')
254
+ timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
255
+ entry = f"[{timestamp}] {username} ({voice}): {message}" if not is_markdown else f"[{timestamp}] {username} ({voice}):\n```markdown\n{message}\n```"
256
+ md_file = create_file(entry, username, "md")
257
+ with open(CHAT_FILE, 'a') as f:
258
+ f.write(f"{entry}\n")
259
+ audio_file, _ = await async_edge_tts_generate(message, voice, username)
260
+ if audio_file:
261
+ with open(HISTORY_FILE, 'a') as f:
262
+ f.write(f"[{timestamp}] {username}: Audio - {audio_file}\n")
263
+ st.session_state['mp3_files'][os.path.basename(audio_file)] = (len(st.session_state['chat_history']) + 1, audio_file)
264
+ # Clear the text input if it matches the message and this was from speech
265
+ if st.session_state.get('speech_processed', False) and st.session_state.get('message_input', '') == message:
266
+ st.session_state['message_input'] = ""
267
+ st.session_state['speech_processed'] = False
268
+ await broadcast_message(f"{username}|{message}", "chat")
269
+ st.session_state.last_chat_update = time.time()
270
+ st.session_state.chat_history.append(entry)
271
+ st.session_state.last_transcript = message
272
+ return md_file, audio_file
273
+
274
+ async def load_chat():
275
+ if not os.path.exists(CHAT_FILE):
276
+ with open(CHAT_FILE, 'a') as f:
277
+ f.write(f"# {START_ROOM} Chat\n\nWelcome to the cosmic hub! 🎤\n")
278
+ with open(CHAT_FILE, 'r') as f:
279
+ content = f.read().strip()
280
+ lines = content.split('\n')
281
+ unique_lines = list(dict.fromkeys(line for line in lines if line.strip()))
282
+ return unique_lines
283
+
284
+ # Claude Search Function with Image Support
285
+ async def perform_claude_search(query, username, image=None):
286
+ if not query.strip() or query == st.session_state.last_transcript:
287
+ return None, None, None
288
+ client = anthropic.Anthropic(api_key=anthropic_key)
289
+ message_content = [{"type": "text", "text": query}]
290
+ if image:
291
+ buffered = io.BytesIO()
292
+ image.save(buffered, format="PNG")
293
+ img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
294
+ message_content.append({
295
+ "type": "image",
296
+ "source": {
297
+ "type": "base64",
298
+ "media_type": "image/png",
299
+ "data": img_base64
300
+ }
301
+ })
302
+ response = client.messages.create(
303
+ model="claude-3-sonnet-20240229",
304
+ max_tokens=1000,
305
+ messages=[{"role": "user", "content": message_content}]
306
+ )
307
+ result = response.content[0].text
308
+ st.markdown(f"### Claude's Reply 🧠\n{result}")
309
+
310
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
311
+ md_file, audio_file = await save_chat_entry(username, f"Claude Search: {query}\nResponse: {result}", voice, True)
312
+ return md_file, audio_file, result
313
+
314
+ # ArXiv Search Function
315
+ async def perform_arxiv_search(query, username, claude_result=None):
316
+ if not query.strip() or query == st.session_state.last_transcript:
317
+ return None, None
318
+ if claude_result is None:
319
+ client = anthropic.Anthropic(api_key=anthropic_key)
320
+ claude_response = client.messages.create(
321
+ model="claude-3-sonnet-20240229",
322
+ max_tokens=1000,
323
+ messages=[{"role": "user", "content": query}]
324
+ )
325
+ claude_result = claude_response.content[0].text
326
+ st.markdown(f"### Claude's Reply 🧠\n{claude_result}")
327
+
328
+ enhanced_query = f"{query}\n\n{claude_result}"
329
+ gradio_client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
330
+ refs = gradio_client.predict(
331
+ enhanced_query, 10, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md"
332
+ )[0]
333
+ result = f"🔎 {enhanced_query}\n\n{refs}"
334
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
335
+ md_file, audio_file = await save_chat_entry(username, f"ArXiv Search: {query}\nClaude Response: {claude_result}\nArXiv Results: {refs}", voice, True)
336
+ return md_file, audio_file
337
+
338
+ async def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False, useArxiv=True, useArxivAudio=False):
339
+ start = time.time()
340
+ client = anthropic.Anthropic(api_key=anthropic_key)
341
+ response = client.messages.create(
342
+ model="claude-3-sonnet-20240229",
343
+ max_tokens=1000,
344
+ messages=[{"role": "user", "content": q}]
345
+ )
346
+ st.write("Claude's reply 🧠:")
347
+ st.markdown(response.content[0].text)
348
+
349
+ result = response.content[0].text
350
+ md_file = create_file(result, "System", "md")
351
+ audio_file, _ = await async_edge_tts_generate(result, st.session_state['tts_voice'], "System")
352
+ st.subheader("📝 Main Response Audio")
353
+ play_and_download_audio(audio_file)
354
+
355
+ papers = []
356
+ if useArxiv:
357
+ q = q + result
358
+ st.write('Running Arxiv RAG with Claude inputs.')
359
+ gradio_client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
360
+ refs = gradio_client.predict(
361
+ q, 20, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md"
362
+ )[0]
363
+ papers = parse_arxiv_refs(refs, q)
364
+ for paper in papers:
365
+ filename = create_file(generate_5min_feature_markdown(paper), "System", "md", paper['title'])
366
+ paper['md_file'] = filename
367
+ st.session_state['paper_metadata'][paper['title']] = filename
368
+ if papers and useArxivAudio:
369
+ await create_paper_audio_files(papers, q)
370
+ elapsed = time.time() - start
371
+ st.write(f"**Total Elapsed:** {elapsed:.2f} s")
372
+ return result, papers
373
+
374
+ # 🌐 WebSocket Handling
375
+ async def websocket_handler(websocket, path):
376
+ client_id = str(uuid.uuid4())
377
+ room_id = "chat"
378
+ if room_id not in st.session_state.active_connections:
379
+ st.session_state.active_connections[room_id] = {}
380
+ st.session_state.active_connections[room_id][client_id] = websocket
381
+ username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys())))
382
+ chat_content = await load_chat()
383
+ if not any(f"Client-{client_id}" in line for line in chat_content):
384
+ await save_chat_entry("System 🌟", f"{username} has joined {START_ROOM}!", "en-US-AriaNeural")
385
+ try:
386
+ async for message in websocket:
387
+ if '|' in message:
388
+ username, content = message.split('|', 1)
389
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
390
+ await save_chat_entry(username, content, voice)
391
+ else:
392
+ await websocket.send("ERROR|Message format: username|content")
393
+ except websockets.ConnectionClosed:
394
+ await save_chat_entry("System 🌟", f"{username} has left {START_ROOM}!", "en-US-AriaNeural")
395
+ finally:
396
+ if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]:
397
+ del st.session_state.active_connections[room_id][client_id]
398
+
399
+ async def broadcast_message(message, room_id):
400
+ if room_id in st.session_state.active_connections:
401
+ disconnected = []
402
+ for client_id, ws in st.session_state.active_connections[room_id].items():
403
+ try:
404
+ await ws.send(message)
405
+ except websockets.ConnectionClosed:
406
+ disconnected.append(client_id)
407
+ for client_id in disconnected:
408
+ if client_id in st.session_state.active_connections[room_id]:
409
+ del st.session_state.active_connections[room_id][client_id]
410
+
411
+ async def run_websocket_server():
412
+ if not st.session_state.server_running:
413
+ server = await websockets.serve(websocket_handler, '0.0.0.0', 8765)
414
+ st.session_state.server_running = True
415
+ await server.wait_closed()
416
+
417
+ def start_websocket_server():
418
+ asyncio.run(run_websocket_server())
419
+
420
+ # 📚 PDF to Audio
421
+ class AudioProcessor:
422
+ def __init__(self):
423
+ self.cache_dir = AUDIO_CACHE_DIR
424
+ os.makedirs(self.cache_dir, exist_ok=True)
425
+ self.metadata = json.load(open(f"{self.cache_dir}/metadata.json")) if os.path.exists(f"{self.cache_dir}/metadata.json") else {}
426
+
427
+ def _save_metadata(self):
428
+ with open(f"{self.cache_dir}/metadata.json", 'w') as f:
429
+ json.dump(self.metadata, f)
430
+
431
+ async def create_audio(self, text, voice='en-US-AriaNeural'):
432
+ cache_key = hashlib.md5(f"{text}:{voice}".encode()).hexdigest()
433
+ cache_path = f"{self.cache_dir}/{cache_key}.mp3"
434
+ if cache_key in self.metadata and os.path.exists(cache_path):
435
+ return cache_path
436
+ text = clean_text_for_tts(text)
437
+ if not text:
438
+ return None
439
+ communicate = edge_tts.Communicate(text, voice)
440
+ await communicate.save(cache_path)
441
+ self.metadata[cache_key] = {'timestamp': datetime.now().isoformat(), 'text_length': len(text), 'voice': voice}
442
+ self._save_metadata()
443
+ return cache_path
444
+
445
+ def process_pdf(pdf_file, max_pages, voice, audio_processor):
446
+ reader = PdfReader(pdf_file)
447
+ total_pages = min(len(reader.pages), max_pages)
448
+ texts, audios = [], {}
449
+ async def process_page(i, text):
450
+ audio_path = await audio_processor.create_audio(text, voice)
451
+ if audio_path:
452
+ audios[i] = audio_path
453
+ for i in range(total_pages):
454
+ text = reader.pages[i].extract_text()
455
+ texts.append(text)
456
+ threading.Thread(target=lambda: asyncio.run(process_page(i, text))).start()
457
+ return texts, audios, total_pages
458
+
459
+ # 🔍 ArXiv & AI Lookup
460
+ def parse_arxiv_refs(ref_text, query):
461
+ if not ref_text:
462
+ return []
463
+ papers = []
464
+ current = {}
465
+ for line in ref_text.split('\n'):
466
+ if line.count('|') == 2:
467
+ if current:
468
+ papers.append(current)
469
+ date, title, *_ = line.strip('* ').split('|')
470
+ url = re.search(r'(https://arxiv.org/\S+)', line).group(1) if re.search(r'(https://arxiv.org/\S+)', line) else f"paper_{len(papers)}"
471
+ current = {'date': date, 'title': title, 'url': url, 'authors': '', 'summary': '', 'full_audio': None, 'download_base64': '', 'query': query}
472
+ elif current:
473
+ if not current['authors']:
474
+ current['authors'] = line.strip('* ')
475
+ else:
476
+ current['summary'] += ' ' + line.strip() if current['summary'] else line.strip()
477
+ if current:
478
+ papers.append(current)
479
+ return papers[:20]
480
+
481
+ def generate_5min_feature_markdown(paper):
482
+ title, summary, authors, date, url = paper['title'], paper['summary'], paper['authors'], paper['date'], paper['url']
483
+ pdf_url = url.replace("abs", "pdf") + (".pdf" if not url.endswith(".pdf") else "")
484
+ wct, sw = len(title.split()), len(summary.split())
485
+ terms = get_high_info_terms(summary, 15)
486
+ rouge = round((len(terms) / max(sw, 1)) * 100, 2)
487
+ mermaid = "```mermaid\nflowchart TD\n" + "\n".join(f' T{i+1}["{t}"] --> T{i+2}["{terms[i+1]}"]' for i in range(len(terms)-1)) + "\n```"
488
+ return f"""
489
+ ## 📄 {title}
490
+ **Authors:** {authors}
491
+ **Date:** {date}
492
+ **Words:** Title: {wct}, Summary: {sw}
493
+ **Links:** [Abstract]({url}) | [PDF]({pdf_url})
494
+ **Terms:** {', '.join(terms)}
495
+ **ROUGE:** {rouge}%
496
+ ### 🎤 TTF Read Aloud
497
+ - **Title:** {title}
498
+ - **Terms:** {', '.join(terms)}
499
+ - **ROUGE:** {rouge}%
500
+ #### Concepts Graph
501
+ {mermaid}
502
+ ---
503
+ """
504
+
505
+ async def create_paper_audio_files(papers, query):
506
+ for p in papers:
507
+ audio_text = clean_text_for_tts(f"{p['title']} by {p['authors']}. {p['summary']}")
508
+ p['full_audio'], _ = await async_edge_tts_generate(audio_text, st.session_state['tts_voice'], p['authors'])
509
+ if p['full_audio']:
510
+ p['download_base64'] = get_download_link(p['full_audio'])
511
+
512
+ def save_vote(file, item, user_hash):
513
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
514
+ entry = f"[{timestamp}] {user_hash} voted for {item}"
515
+ try:
516
+ with open(file, 'a') as f:
517
+ f.write(f"{entry}\n")
518
+ with open(HISTORY_FILE, 'a') as f:
519
+ f.write(f"- {timestamp} - User {user_hash} voted for {item}\n")
520
+ return True
521
+ except Exception as e:
522
+ print(f"Vote save flop: {e}")
523
+ return False
524
+
525
+ def load_votes(file):
526
+ if not os.path.exists(file):
527
+ with open(file, 'w') as f:
528
+ f.write("# Vote Tally\n\nNo votes yet - get clicking! 🖱️\n")
529
+ try:
530
+ with open(file, 'r') as f:
531
+ lines = f.read().strip().split('\n')
532
+ votes = {}
533
+ for line in lines[2:]:
534
+ if line.strip() and 'voted for' in line:
535
+ item = line.split('voted for ')[1]
536
+ votes[item] = votes.get(item, 0) + 1
537
+ return votes
538
+ except Exception as e:
539
+ print(f"Vote load oopsie: {e}")
540
+ return {}
541
+
542
+ def generate_user_hash():
543
+ if 'user_hash' not in st.session_state:
544
+ session_id = str(random.getrandbits(128))
545
+ hash_object = hashlib.md5(session_id.encode())
546
+ st.session_state['user_hash'] = hash_object.hexdigest()[:8]
547
+ return st.session_state['user_hash']
548
+
549
+ async def save_pasted_image(image, username, prompt=""):
550
+ img_hash = hashlib.md5(image.tobytes()).hexdigest()[:8]
551
+ if img_hash in st.session_state.image_hashes:
552
+ return None
553
+ context = prompt if prompt else st.session_state.get('last_message', "pasted_image")
554
+ timestamp = format_timestamp_prefix(username)
555
+ filename = f"{timestamp}-{clean_text_for_filename(context)}-{img_hash}.png"
556
+ filepath = filename
557
+ image.save(filepath, "PNG")
558
+ st.session_state.image_hashes.add(img_hash)
559
+ await save_chat_entry(username, f"Pasted image: {filepath}", FUN_USERNAMES.get(username, "en-US-AriaNeural"))
560
+ return filepath
561
+
562
+ # 📦 Zip Files
563
+ def create_zip_of_files(files, prefix="All", query="latest"):
564
+ if not files:
565
+ return None
566
+ terms = get_high_info_terms(" ".join([open(f, 'r', encoding='utf-8').read() if f.endswith('.md') else os.path.splitext(os.path.basename(f))[0].replace('_', ' ') for f in files] + [query]), 5)
567
+ zip_name = f"{prefix}_{format_timestamp_prefix()}_{'-'.join(terms)[:20]}.zip"
568
+ with zipfile.ZipFile(zip_name, 'w') as z:
569
+ [z.write(f) for f in files]
570
+ return zip_name
571
+
572
+ # Custom Paste Image Component
573
+ def paste_image_component():
574
+ with st.form(key="paste_form"):
575
+ paste_input = st.text_input("Paste Base64 Image Here (hidden)", value="", key="paste_input", label_visibility="collapsed")
576
+ st.markdown("""
577
+ <script>
578
+ function pasteClipboard() {
579
+ navigator.clipboard.readText().then(text => {
580
+ document.getElementById('paste_input').value = text;
581
+ document.getElementById('paste_form').requestSubmit();
582
+ }).catch(err => {
583
+ console.error('Failed to read clipboard: ', err);
584
+ document.getElementById('paste_input').value = 'ERROR: ' + err.message;
585
+ document.getElementById('paste_form').requestSubmit();
586
+ });
587
+ }
588
+ </script>
589
+ """, unsafe_allow_html=True)
590
+ paste_button = st.form_submit_button("Paste Image 📋", on_click=lambda: st.markdown("<script>pasteClipboard();</script>", unsafe_allow_html=True))
591
+
592
+ if paste_button and paste_input:
593
+ if paste_input.startswith('ERROR:'):
594
+ st.warning(f"Paste failed: {paste_input}")
595
+ return None
596
+ if paste_input.startswith('data:image'):
597
+ try:
598
+ base64_str = paste_input.split(',')[1]
599
+ img_bytes = base64.b64decode(base64_str)
600
+ img = Image.open(io.BytesIO(img_bytes))
601
+ return img
602
+ except Exception as e:
603
+ st.warning(f"Error decoding pasted image: {e}")
604
+ return None
605
+ else:
606
+ st.warning("Clipboard does not contain a valid image (expected base64 data:image)")
607
+ return None
608
+ return None
609
+
610
+ # 🎮 Main Interface
611
+ def main():
612
+ init_session_state()
613
+ load_mp3_viewer()
614
+ saved_username = load_username()
615
+ if saved_username and saved_username in FUN_USERNAMES:
616
+ st.session_state.username = saved_username
617
+ if not st.session_state.username:
618
+ available = [n for n in FUN_USERNAMES if not any(f"{n} has joined" in l for l in asyncio.run(load_chat()))]
619
+ st.session_state.username = random.choice(available or list(FUN_USERNAMES.keys()))
620
+ st.session_state.tts_voice = FUN_USERNAMES[st.session_state.username]
621
+ asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} has joined {START_ROOM}!", "en-US-AriaNeural"))
622
+ save_username(st.session_state.username)
623
+
624
+ st.title(f"{Site_Name} for {st.session_state.username}")
625
+ update_marquee_settings_ui()
626
+ display_marquee(f"🚀 Welcome to {START_ROOM} | 🤖 {st.session_state.username}", st.session_state['marquee_settings'], "welcome")
627
+
628
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
629
+ val = mycomponent(my_input_value="", key=f"speech_{st.session_state.get('speech_processed', False)}")
630
+ if val and val != st.session_state.last_transcript:
631
+ val_stripped = val.strip().replace('\n', ' ')
632
+ if val_stripped:
633
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
634
+ st.session_state['speech_processed'] = True
635
+ md_file, audio_file = asyncio.run(save_chat_entry(st.session_state.username, val_stripped, voice))
636
+ if audio_file:
637
+ play_and_download_audio(audio_file)
638
+ st.rerun()
639
+
640
+ tab_main = st.radio("Action:", ["🎤 Chat & Voice", "🔍 ArXiv", "📚 PDF to Audio"], horizontal=True, key="tab_main")
641
+ st.checkbox("Search ArXiv", key="use_arxiv")
642
+ st.checkbox("ArXiv Audio", key="use_arxiv_audio")
643
+ st.checkbox("Autosend Chat", key="autosend")
644
+ st.checkbox("Autosearch ArXiv", key="autosearch")
645
+
646
+ if tab_main == "🎤 Chat & Voice":
647
+ st.subheader(f"{START_ROOM} Chat 💬")
648
+ chat_content = asyncio.run(load_chat())
649
+ chat_container = st.container()
650
+ with chat_container:
651
+ numbered_content = "\n".join(f"{i+1}. {line}" for i, line in enumerate(chat_content))
652
+ st.code(numbered_content, language="python")
653
+
654
+ message = st.text_input(f"Message as {st.session_state.username}", key="message_input")
655
+
656
+ pasted_image = paste_image_component()
657
+ if pasted_image is not None and st.session_state['paste_image_base64'] != base64.b64encode(pasted_image.tobytes()).decode('utf-8'):
658
+ st.session_state['paste_image_base64'] = base64.b64encode(pasted_image.tobytes()).decode('utf-8')
659
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
660
+ st.image(pasted_image, caption="Pasted Image")
661
+ image_prompt = st.text_input("Add a prompt for Claude (e.g., 'OCR this image')", key="image_prompt")
662
+ filename = asyncio.run(save_pasted_image(pasted_image, st.session_state.username, image_prompt))
663
+ if filename:
664
+ st.session_state.pasted_image_data = filename
665
+ if image_prompt:
666
+ md_file_claude, audio_file_claude, claude_result = asyncio.run(
667
+ perform_claude_search(image_prompt, st.session_state.username, paved_image)
668
+ )
669
+ if audio_file_claude:
670
+ play_and_download_audio(audio_file_claude)
671
+ md_file_arxiv, audio_file_arxiv = asyncio.run(
672
+ perform_arxiv_search(image_prompt, st.session_state.username, claude_result)
673
+ )
674
+ if audio_file_arxiv:
675
+ play_and_download_audio(audio_file_arxiv)
676
+ st.session_state.pasted_image_data = None
677
+ st.session_state['paste_image_base64'] = ""
678
+ st.session_state.timer_start = time.time()
679
+ save_username(st.session_state.username)
680
+ st.rerun()
681
+
682
+ if (message and message != st.session_state.last_message) or (st.session_state.pasted_image_data and not st.session_state['paste_image_base64']):
683
+ st.session_state.last_message = message
684
+ col_send, col_claude, col_arxiv = st.columns([1, 1, 1])
685
+
686
+ with col_send:
687
+ if st.session_state.autosend or st.button("Send 🚀", key="send_button"):
688
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
689
+ if message.strip():
690
+ md_file, audio_file = asyncio.run(save_chat_entry(st.session_state.username, message, voice, True))
691
+ if audio_file:
692
+ play_and_download_audio(audio_file)
693
+ if st.session_state.pasted_image_data:
694
+ asyncio.run(save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}", voice))
695
+ st.session_state.pasted_image_data = None
696
+ st.session_state.timer_start = time.time()
697
+ save_username(st.session_state.username)
698
+ st.rerun()
699
+
700
+ with col_claude:
701
+ if st.button("🧠 Claude", key="claude_button"):
702
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
703
+ if message.strip():
704
+ md_file, audio_file, _ = asyncio.run(perform_claude_search(message, st.session_state.username))
705
+ if audio_file:
706
+ play_and_download_audio(audio_file)
707
+ st.session_state.timer_start = time.time()
708
+ save_username(st.session_state.username)
709
+ st.rerun()
710
+
711
+ with col_arxiv:
712
+ if st.button("🔍 ArXiv", key="arxiv_button"):
713
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
714
+ if message.strip():
715
+ md_file, audio_file = asyncio.run(perform_arxiv_search(message, st.session_state.username))
716
+ if audio_file:
717
+ play_and_download_audio(audio_file)
718
+ st.session_state.timer_start = time.time()
719
+ save_username(st.session_state.username)
720
+ st.rerun()
721
+
722
+ elif tab_main == "🔍 ArXiv":
723
+ st.subheader("🔍 Query ArXiv")
724
+ q = st.text_input("🔍 Query:", key="arxiv_query")
725
+ if q and q != st.session_state.last_query:
726
+ st.session_state.last_query = q
727
+ if st.session_state.autosearch or st.button("🔍 Run", key="arxiv_run"):
728
+ result, papers = asyncio.run(perform_ai_lookup(q, useArxiv=st.session_state['use_arxiv'], useArxivAudio=st.session_state['use_arxiv_audio']))
729
+ st.markdown(f"### Query: {q}")
730
+ for i, p in enumerate(papers, 1):
731
+ expander_label = f"{p['title']} | [arXiv Link]({p['url']})"
732
+ with st.expander(expander_label):
733
+ with open(p['md_file'], 'r', encoding='utf-8') as f:
734
+ content = f.read()
735
+ numbered_content = "\n".join(f"{j+1}. {line}" for j, line in enumerate(content.split('\n')))
736
+ st.code(numbered_content, language="python")
737
+
738
+ elif tab_main == "📚 PDF to Audio":
739
+ audio_processor = AudioProcessor()
740
+ pdf_file = st.file_uploader("Choose PDF", "pdf", key="pdf_upload")
741
+ max_pages = st.slider('Pages', 1, 100, 10, key="pdf_pages")
742
+ if pdf_file:
743
+ with st.spinner('Processing...'):
744
+ texts, audios, total = process_pdf(pdf_file, max_pages, st.session_state['tts_voice'], audio_processor)
745
+ for i, text in enumerate(texts):
746
+ with st.expander(f"Page {i+1}"):
747
+ st.markdown(text)
748
+ while i not in audios:
749
+ time.sleep(0.1)
750
+ if audios.get(i):
751
+ st.audio(audios[i])
752
+ st.markdown(get_download_link(audios[i], "mp3"), unsafe_allow_html=True)
753
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
754
+ asyncio.run(save_chat_entry(st.session_state.username, f"PDF Page {i+1} converted to audio: {audios[i]}", voice))
755
+
756
+ # Always Visible Media Gallery
757
+ st.header("📸 Media Gallery")
758
+ all_files = sorted(glob.glob("*.md") + glob.glob("*.mp3") + glob.glob("*.png") + glob.glob("*.mp4"), key=os.path.getmtime)
759
+ md_files = [f for f in all_files if f.endswith('.md') and os.path.basename(f) != "README.md"]
760
+ mp3_files = [f for f in all_files if f.endswith('.mp3')]
761
+ png_files = [f for f in all_files if f.endswith('.png')]
762
+ mp4_files = [f for f in all_files if f.endswith('.mp4')]
763
+
764
+ st.subheader("All Submitted Text")
765
+ all_md_content = concatenate_markdown_files()
766
+ with st.expander("View All Markdown Content"):
767
+ st.markdown(all_md_content)
768
+
769
+ st.subheader("🎵 Audio (MP3)")
770
+ for filename, (num, mp3) in sorted(st.session_state['mp3_files'].items(), key=lambda x: x[1][0]):
771
+ with st.expander(f"{num}. {os.path.basename(mp3)}"):
772
+ st.audio(mp3)
773
+ st.markdown(get_download_link(mp3, "mp3"), unsafe_allow_html=True)
774
+
775
+ st.subheader("🖼️ Images (PNG)")
776
+ for png in sorted(png_files, key=os.path.getmtime):
777
+ with st.expander(os.path.basename(png)):
778
+ st.image(png, use_container_width=True)
779
+ st.markdown(get_download_link(png, "png"), unsafe_allow_html=True)
780
+
781
+ st.subheader("🎥 Videos (MP4)")
782
+ for mp4 in sorted(mp4_files, key=os.path.getmtime):
783
+ with st.expander(os.path.basename(mp4)):
784
+ st.video(mp4)
785
+ st.markdown(get_download_link(mp4, "mp4"), unsafe_allow_html=True)
786
+
787
+ # 🗂️ Sidebar with Dialog and Audio
788
+ st.sidebar.subheader("Voice Settings")
789
+ new_username = st.sidebar.selectbox("Change Name/Voice", list(FUN_USERNAMES.keys()), index=list(FUN_USERNAMES.keys()).index(st.session_state.username), key="username_select")
790
+ if new_username != st.session_state.username:
791
+ asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} changed to {new_username}", "en-US-AriaNeural"))
792
+ st.session_state.username, st.session_state.tts_voice = new_username, FUN_USERNAMES[new_username]
793
+ st.session_state.timer_start = time.time()
794
+ save_username(st.session_state.username)
795
+ st.rerun()
796
+
797
+ st.sidebar.markdown("### 💬 Chat Dialog")
798
+ chat_content = asyncio.run(load_chat())
799
+ with st.sidebar.expander("Chat History"):
800
+ numbered_content = "\n".join(f"{i+1}. {line}" for i, line in enumerate(chat_content))
801
+ st.code(numbered_content, language="python")
802
+
803
+ st.sidebar.subheader("Vote Totals")
804
+ chat_votes = load_votes(QUOTE_VOTES_FILE)
805
+ image_votes = load_votes(IMAGE_VOTES_FILE)
806
+ for item, count in chat_votes.items():
807
+ st.sidebar.write(f"{item}: {count} votes")
808
+ for image, count in image_votes.items():
809
+ st.sidebar.write(f"{image}: {count} votes")
810
+
811
+ st.sidebar.markdown("### 📂 File History")
812
+ for f in all_files[:10]:
813
+ st.sidebar.write(f"{FILE_EMOJIS.get(f.split('.')[-1], '📄')} {os.path.basename(f)}")
814
+
815
+ st.sidebar.subheader("📦 Zip Downloads")
816
+ if st.sidebar.button("⬇️ Zip All", key="zip_all"):
817
+ zip_name = create_zip_of_files(all_files, "All")
818
+ if zip_name:
819
+ st.session_state['download_link_cache'] = {}
820
+ if st.sidebar.button("⬇️ Zip All MD", key="zip_md"):
821
+ zip_name = create_zip_of_files(md_files, "MD")
822
+ if zip_name:
823
+ st.session_state['download_link_cache'] = {}
824
+ if st.sidebar.button("⬇️ Zip All MP3", key="zip_mp3"):
825
+ zip_name = create_zip_of_files(mp3_files, "MP3")
826
+ if zip_name:
827
+ st.session_state['download_link_cache'] = {}
828
+ if st.sidebar.button("⬇️ Zip All PNG", key="zip_png"):
829
+ zip_name = create_zip_of_files(png_files, "PNG")
830
+ if zip_name:
831
+ st.session_state['download_link_cache'] = {}
832
+ if st.sidebar.button("⬇️ Zip All MP4", key="zip_mp4"):
833
+ zip_name = create_zip_of_files(mp4_files, "MP4")
834
+ if zip_name:
835
+ st.session_state['download_link_cache'] = {}
836
+
837
+ zip_files = sorted(glob.glob("*.zip"), key=os.path.getmtime, reverse=True)
838
+ for zip_file in zip_files:
839
+ st.sidebar.markdown(get_download_link(zip_file, "zip"), unsafe_allow_html=True)
840
+
841
+ # Refresh Timer in Sidebar
842
+ st.sidebar.subheader("Set Refresh Rate ⏳")
843
+ st.markdown("""
844
+ <style>
845
+ .timer {
846
+ font-size: 24px;
847
+ color: #ffcc00;
848
+ text-align: center;
849
+ animation: pulse 1s infinite;
850
+ }
851
+ @keyframes pulse {
852
+ 0% { transform: scale(1); }
853
+ 50% { transform: scale(1.1); }
854
+ 100% { transform: scale(1); }
855
+ }
856
+ </style>
857
+ """, unsafe_allow_html=True)
858
+
859
+ refresh_rate = st.sidebar.slider("Refresh Rate (seconds)", min_value=1, max_value=300, value=st.session_state.refresh_rate, step=1)
860
+ if refresh_rate != st.session_state.refresh_rate:
861
+ st.session_state.refresh_rate = refresh_rate
862
+ st.session_state.timer_start = time.time()
863
+ save_username(st.session_state.username)
864
+
865
+ col1, col2, col3 = st.sidebar.columns(3)
866
+ with col1:
867
+ if st.button("🐇 Small (1s)"):
868
+ st.session_state.refresh_rate = 1
869
+ st.session_state.timer_start = time.time()
870
+ save_username(st.session_state.username)
871
+ with col2:
872
+ if st.button("🐢 Medium (5s)"):
873
+ st.session_state.refresh_rate = 5
874
+ st.session_state.timer_start = time.time()
875
+ save_username(st.session_state.username)
876
+ with col3:
877
+ if st.button("🐘 Large (5m)"):
878
+ st.session_state.refresh_rate = 300
879
+ st.session_state.timer_start = time.time()
880
+ save_username(st.session_state.username)
881
+
882
+ timer_placeholder = st.sidebar.empty()
883
+ start_time = st.session_state.timer_start
884
+ remaining_time = int(st.session_state.refresh_rate - (time.time() - start_time))
885
+ if remaining_time <= 0:
886
+ st.session_state.timer_start = time.time()
887
+ st.session_state.last_refresh = time.time()
888
+ st.rerun()
889
+ else:
890
+ timer_placeholder.markdown(f"<p class='timer'>⏳ Next refresh in: {remaining_time} seconds</p>", unsafe_allow_html=True)
891
+
892
+ if not st.session_state.server_running and not st.session_state.server_task:
893
+ st.session_state.server_task = threading.Thread(target=start_websocket_server, daemon=True)
894
+ st.session_state.server_task.start()
895
+
896
+ if __name__ == "__main__":
897
+ main()