awacke1 commited on
Commit
4c8ec70
·
verified ·
1 Parent(s): 4f3b7e1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +817 -0
app.py ADDED
@@ -0,0 +1,817 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import asyncio
3
+ import websockets
4
+ import uuid
5
+ from datetime import datetime
6
+ import os
7
+ import random
8
+ import time
9
+ import hashlib
10
+ from PIL import Image
11
+ import glob
12
+ import base64
13
+ import io
14
+ import streamlit.components.v1 as components
15
+ import edge_tts
16
+ from audio_recorder_streamlit import audio_recorder
17
+ import nest_asyncio
18
+ import re
19
+ from streamlit_paste_button import paste_image_button
20
+ import pytz
21
+ import shutil
22
+ import anthropic
23
+ import openai
24
+ from PyPDF2 import PdfReader
25
+ import threading
26
+ import json
27
+ import zipfile
28
+ from gradio_client import Client
29
+ from dotenv import load_dotenv
30
+ from streamlit_marquee import streamlit_marquee
31
+ from collections import defaultdict, Counter
32
+ import pandas as pd
33
+
34
+ # 🛠️ Patch asyncio for nesting
35
+ nest_asyncio.apply()
36
+
37
+ # 🎨 Page Config
38
+ st.set_page_config(
39
+ page_title="🚲TalkingAIResearcher🏆",
40
+ page_icon="🚲🏆",
41
+ layout="wide",
42
+ initial_sidebar_state="auto"
43
+ )
44
+
45
+ # 🌟 Static Config
46
+ icons = '🤖🧠🔬📝'
47
+ Site_Name = '🤖🧠Chat & Quote Node📝🔬'
48
+ START_ROOM = "Sector 🌌"
49
+ FUN_USERNAMES = {
50
+ "CosmicJester 🌌": "en-US-AriaNeural",
51
+ "PixelPanda 🐼": "en-US-JennyNeural",
52
+ "QuantumQuack 🦆": "en-GB-SoniaNeural",
53
+ "StellarSquirrel 🐿️": "en-AU-NatashaNeural",
54
+ "GizmoGuru ⚙️": "en-CA-ClaraNeural",
55
+ "NebulaNinja 🌠": "en-US-GuyNeural",
56
+ "ByteBuster 💾": "en-GB-RyanNeural",
57
+ "GalacticGopher 🌍": "en-AU-WilliamNeural",
58
+ "RocketRaccoon 🚀": "en-CA-LiamNeural",
59
+ "EchoElf 🧝": "en-US-AnaNeural",
60
+ "PhantomFox 🦊": "en-US-BrandonNeural",
61
+ "WittyWizard 🧙": "en-GB-ThomasNeural",
62
+ "LunarLlama 🌙": "en-AU-FreyaNeural",
63
+ "SolarSloth ☀️": "en-CA-LindaNeural",
64
+ "AstroAlpaca 🦙": "en-US-ChristopherNeural",
65
+ "CyberCoyote 🐺": "en-GB-ElliotNeural",
66
+ "MysticMoose 🦌": "en-AU-JamesNeural",
67
+ "GlitchGnome 🧚": "en-CA-EthanNeural",
68
+ "VortexViper 🐍": "en-US-AmberNeural",
69
+ "ChronoChimp 🐒": "en-GB-LibbyNeural"
70
+ }
71
+ EDGE_TTS_VOICES = list(set(FUN_USERNAMES.values()))
72
+ FILE_EMOJIS = {"md": "📝", "mp3": "🎵", "png": "🖼️", "mp4": "🎥"}
73
+
74
+ # 📁 Directories
75
+ for d in ["chat_logs", "vote_logs", "audio_logs", "history_logs", "audio_cache", "paper_metadata"]:
76
+ os.makedirs(d, exist_ok=True)
77
+
78
+ CHAT_DIR = "chat_logs"
79
+ VOTE_DIR = "vote_logs"
80
+ MEDIA_DIR = "."
81
+ AUDIO_CACHE_DIR = "audio_cache"
82
+ AUDIO_DIR = "audio_logs"
83
+ PAPER_DIR = "paper_metadata"
84
+ STATE_FILE = "user_state.txt"
85
+
86
+ CHAT_FILE = os.path.join(CHAT_DIR, "global_chat.md")
87
+ QUOTE_VOTES_FILE = os.path.join(VOTE_DIR, "quote_votes.md")
88
+ IMAGE_VOTES_FILE = os.path.join(VOTE_DIR, "image_votes.md")
89
+ HISTORY_FILE = os.path.join(VOTE_DIR, "vote_history.md")
90
+
91
+ # 🔑 API Keys
92
+ load_dotenv()
93
+ anthropic_key = os.getenv('ANTHROPIC_API_KEY', st.secrets.get('ANTHROPIC_API_KEY', ""))
94
+ openai_api_key = os.getenv('OPENAI_API_KEY', st.secrets.get('OPENAI_API_KEY', ""))
95
+ openai_client = openai.OpenAI(api_key=openai_api_key)
96
+
97
+ # 🕒 Timestamp Helper
98
+ def format_timestamp_prefix(username=""):
99
+ central = pytz.timezone('US/Central')
100
+ now = datetime.now(central)
101
+ return f"{now.strftime('%Y%m%d_%H%M%S')}-by-{username}"
102
+
103
+ # 📈 Performance Timer
104
+ class PerformanceTimer:
105
+ def __init__(self, name):
106
+ self.name, self.start = name, None
107
+ def __enter__(self):
108
+ self.start = time.time()
109
+ return self
110
+ def __exit__(self, *args):
111
+ duration = time.time() - self.start
112
+ st.session_state['operation_timings'][self.name] = duration
113
+ st.session_state['performance_metrics'][self.name].append(duration)
114
+
115
+ # 🎛️ Session State Init
116
+ def init_session_state():
117
+ defaults = {
118
+ 'server_running': False, 'server_task': None, 'active_connections': {},
119
+ 'media_notifications': [], 'last_chat_update': 0, 'displayed_chat_lines': [],
120
+ 'message_text': "", 'audio_cache': {}, 'pasted_image_data': None,
121
+ 'quote_line': None, 'refresh_rate': 5, 'base64_cache': {},
122
+ 'transcript_history': [], 'last_transcript': "", 'image_hashes': set(),
123
+ 'tts_voice': "en-US-AriaNeural", 'chat_history': [], 'marquee_settings': {
124
+ "background": "#1E1E1E", "color": "#FFFFFF", "font-size": "14px",
125
+ "animationDuration": "20s", "width": "100%", "lineHeight": "35px"
126
+ }, 'operation_timings': {}, 'performance_metrics': defaultdict(list),
127
+ 'enable_audio': True, 'download_link_cache': {}, 'username': None,
128
+ 'autosend': True, 'autosearch': True, 'last_message': "", 'last_query': "",
129
+ 'mp3_files': {}, 'timer_start': time.time(), 'quote_index': 0,
130
+ 'quote_source': "famous", 'last_sent_transcript': "", 'old_val': None,
131
+ 'last_refresh': time.time(), 'paper_metadata': {}
132
+ }
133
+ for k, v in defaults.items():
134
+ if k not in st.session_state:
135
+ st.session_state[k] = v
136
+
137
+ # 🖌️ Marquee Helpers
138
+ def update_marquee_settings_ui():
139
+ st.sidebar.markdown("### 🎯 Marquee Settings")
140
+ cols = st.sidebar.columns(2)
141
+ with cols[0]:
142
+ st.session_state['marquee_settings']['background'] = st.color_picker("🎨 Background", "#1E1E1E")
143
+ st.session_state['marquee_settings']['color'] = st.color_picker("✍️ Text", "#FFFFFF")
144
+ with cols[1]:
145
+ st.session_state['marquee_settings']['font-size'] = f"{st.slider('📏 Size', 10, 24, 14)}px"
146
+ st.session_state['marquee_settings']['animationDuration'] = f"{st.slider('⏱️ Speed', 1, 20, 20)}s"
147
+
148
+ def display_marquee(text, settings, key_suffix=""):
149
+ truncated = text[:280] + "..." if len(text) > 280 else text
150
+ streamlit_marquee(content=truncated, **settings, key=f"marquee_{key_suffix}")
151
+ st.write("")
152
+
153
+ # 📝 Text & File Helpers
154
+ def clean_text_for_tts(text):
155
+ return re.sub(r'[#*!\[\]]+', '', ' '.join(text.split()))[:200] or "No text"
156
+
157
+ def clean_text_for_filename(text):
158
+ return '_'.join(re.sub(r'[^\w\s-]', '', text.lower()).split())[:200]
159
+
160
+ def get_high_info_terms(text, top_n=10):
161
+ stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with'}
162
+ words = re.findall(r'\b\w+(?:-\w+)*\b', text.lower())
163
+ bi_grams = [' '.join(pair) for pair in zip(words, words[1:])]
164
+ filtered = [t for t in words + bi_grams if t not in stop_words and len(t.split()) <= 2]
165
+ return [t for t, _ in Counter(filtered).most_common(top_n)]
166
+
167
+ def generate_filename(prompt, username, file_type="md", title=None):
168
+ timestamp = format_timestamp_prefix(username)
169
+ if title:
170
+ high_info = '-'.join(get_high_info_terms(title, 5))
171
+ return f"{timestamp}-{clean_text_for_filename(prompt[:20])}-{high_info}.{file_type}"
172
+ hash_val = hashlib.md5(prompt.encode()).hexdigest()[:8]
173
+ return f"{timestamp}-{hash_val}.{file_type}"
174
+
175
+ def create_file(prompt, username, file_type="md", title=None):
176
+ filename = generate_filename(prompt, username, file_type, title)
177
+ with open(filename, 'w', encoding='utf-8') as f:
178
+ f.write(prompt)
179
+ return filename
180
+
181
+ def get_download_link(file, file_type="mp3"):
182
+ cache_key = f"dl_{file}"
183
+ if cache_key not in st.session_state['download_link_cache']:
184
+ with open(file, "rb") as f:
185
+ b64 = base64.b64encode(f.read()).decode()
186
+ mime_types = {"mp3": "audio/mpeg", "png": "image/png", "mp4": "video/mp4", "md": "text/markdown", "zip": "application/zip"}
187
+ st.session_state['download_link_cache'][cache_key] = f'<a href="data:{mime_types.get(file_type, "application/octet-stream")};base64,{b64}" download="{os.path.basename(file)}">{FILE_EMOJIS.get(file_type, "Download")} Download {os.path.basename(file)}</a>'
188
+ return st.session_state['download_link_cache'][cache_key]
189
+
190
+ def save_username(username):
191
+ try:
192
+ with open(STATE_FILE, 'w') as f:
193
+ f.write(username)
194
+ except Exception as e:
195
+ print(f"Failed to save username: {e}")
196
+
197
+ def load_username():
198
+ if os.path.exists(STATE_FILE):
199
+ try:
200
+ with open(STATE_FILE, 'r') as f:
201
+ return f.read().strip()
202
+ except Exception as e:
203
+ print(f"Failed to load username: {e}")
204
+ return None
205
+
206
+ def concatenate_markdown_files():
207
+ md_files = sorted(glob.glob("*.md"), key=os.path.getmtime, reverse=True)
208
+ all_md_content = ""
209
+ for md_file in md_files:
210
+ with open(md_file, 'r', encoding='utf-8') as f:
211
+ all_md_content += f.read() + "\n\n---\n\n"
212
+ return all_md_content.strip()
213
+
214
+ # 🎶 Audio Processing
215
+ async def async_edge_tts_generate(text, voice, username, rate=0, pitch=0, file_format="mp3"):
216
+ cache_key = f"{text[:100]}_{voice}_{rate}_{pitch}_{file_format}"
217
+ if cache_key in st.session_state['audio_cache']:
218
+ return st.session_state['audio_cache'][cache_key], 0
219
+ start_time = time.time()
220
+ text = clean_text_for_tts(text)
221
+ if not text or text == "No text":
222
+ print(f"Skipping audio generation for empty/invalid text: '{text}'")
223
+ return None, 0
224
+ filename = f"{format_timestamp_prefix(username)}-{hashlib.md5(text.encode()).hexdigest()[:8]}.{file_format}"
225
+ try:
226
+ communicate = edge_tts.Communicate(text, voice, rate=f"{rate:+d}%", pitch=f"{pitch:+d}Hz")
227
+ await communicate.save(filename)
228
+ st.session_state['audio_cache'][cache_key] = filename
229
+ return filename, time.time() - start_time
230
+ except edge_tts.exceptions.NoAudioReceived as e:
231
+ print(f"No audio received for text: '{text}' with voice: {voice}. Error: {e}")
232
+ return None, 0
233
+ except Exception as e:
234
+ print(f"Error generating audio for text: '{text}' with voice: {voice}. Error: {e}")
235
+ return None, 0
236
+
237
+ def play_and_download_audio(file_path):
238
+ if file_path and os.path.exists(file_path):
239
+ st.audio(file_path)
240
+ st.markdown(get_download_link(file_path), unsafe_allow_html=True)
241
+
242
+ def load_mp3_viewer():
243
+ mp3_files = sorted(glob.glob(f"*.mp3"), key=os.path.getmtime, reverse=True)
244
+ for mp3 in mp3_files:
245
+ filename = os.path.basename(mp3)
246
+ if filename not in st.session_state['mp3_files']:
247
+ st.session_state['mp3_files'][filename] = mp3
248
+
249
+ async def save_chat_entry(username, message, voice, is_markdown=False):
250
+ if not message.strip() or message == st.session_state.last_transcript:
251
+ return None, None
252
+ central = pytz.timezone('US/Central')
253
+ timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
254
+ entry = f"[{timestamp}] {username} ({voice}): {message}" if not is_markdown else f"[{timestamp}] {username} ({voice}):\n```markdown\n{message}\n```"
255
+ md_file = create_file(entry, username, "md")
256
+ with open(CHAT_FILE, 'a') as f:
257
+ f.write(f"{entry}\n")
258
+ audio_file, _ = await async_edge_tts_generate(message, voice, username)
259
+ if audio_file:
260
+ with open(HISTORY_FILE, 'a') as f:
261
+ f.write(f"[{timestamp}] {username}: Audio - {audio_file}\n")
262
+ st.session_state['mp3_files'][os.path.basename(audio_file)] = audio_file
263
+ await broadcast_message(f"{username}|{message}", "chat")
264
+ st.session_state.last_chat_update = time.time()
265
+ st.session_state.chat_history.append(entry)
266
+ st.session_state.last_transcript = message
267
+ return md_file, audio_file
268
+
269
+ async def load_chat():
270
+ if not os.path.exists(CHAT_FILE):
271
+ with open(CHAT_FILE, 'a') as f:
272
+ f.write(f"# {START_ROOM} Chat\n\nWelcome to the cosmic hub! 🎤\n")
273
+ with open(CHAT_FILE, 'r') as f:
274
+ content = f.read().strip()
275
+ lines = content.split('\n')
276
+ unique_lines = list(dict.fromkeys(line for line in lines if line.strip()))
277
+ return unique_lines
278
+
279
+ # Claude Search Function
280
+ async def perform_claude_search(query, username):
281
+ if not query.strip() or query == st.session_state.last_transcript:
282
+ return None, None, None
283
+ client = anthropic.Anthropic(api_key=anthropic_key)
284
+ response = client.messages.create(
285
+ model="claude-3-sonnet-20240229",
286
+ max_tokens=1000,
287
+ messages=[{"role": "user", "content": query}]
288
+ )
289
+ result = response.content[0].text
290
+ st.markdown(f"### Claude's Reply 🧠\n{result}")
291
+
292
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
293
+ md_file, audio_file = await save_chat_entry(username, f"Claude Search: {query}\nResponse: {result}", voice, True)
294
+ return md_file, audio_file, result
295
+
296
+ # ArXiv Search Function
297
+ async def perform_arxiv_search(query, username, claude_result=None):
298
+ if not query.strip() or query == st.session_state.last_transcript:
299
+ return None, None
300
+ if claude_result is None:
301
+ client = anthropic.Anthropic(api_key=anthropic_key)
302
+ claude_response = client.messages.create(
303
+ model="claude-3-sonnet-20240229",
304
+ max_tokens=1000,
305
+ messages=[{"role": "user", "content": query}]
306
+ )
307
+ claude_result = claude_response.content[0].text
308
+ st.markdown(f"### Claude's Reply 🧠\n{claude_result}")
309
+
310
+ enhanced_query = f"{query}\n\n{claude_result}"
311
+ gradio_client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
312
+ refs = gradio_client.predict(
313
+ enhanced_query, 10, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md"
314
+ )[0]
315
+ result = f"🔎 {enhanced_query}\n\n{refs}"
316
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
317
+ md_file, audio_file = await save_chat_entry(username, f"ArXiv Search: {query}\nClaude Response: {claude_result}\nArXiv Results: {refs}", voice, True)
318
+ return md_file, audio_file
319
+
320
+ async def perform_ai_lookup(q, vocal_summary=True, extended_refs=False, titles_summary=True, full_audio=False, useArxiv=True, useArxivAudio=False):
321
+ start = time.time()
322
+ client = anthropic.Anthropic(api_key=anthropic_key)
323
+ response = client.messages.create(
324
+ model="claude-3-sonnet-20240229",
325
+ max_tokens=1000,
326
+ messages=[{"role": "user", "content": q}]
327
+ )
328
+ st.write("Claude's reply 🧠:")
329
+ st.markdown(response.content[0].text)
330
+
331
+ result = response.content[0].text
332
+ md_file = create_file(result, "System", "md")
333
+ audio_file, _ = await async_edge_tts_generate(result, st.session_state['tts_voice'], "System")
334
+ st.subheader("📝 Main Response Audio")
335
+ play_and_download_audio(audio_file)
336
+
337
+ papers = []
338
+ if useArxiv:
339
+ q = q + result
340
+ st.write('Running Arxiv RAG with Claude inputs.')
341
+ gradio_client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
342
+ refs = gradio_client.predict(
343
+ q, 20, "Semantic Search", "mistralai/Mixtral-8x7B-Instruct-v0.1", api_name="/update_with_rag_md"
344
+ )[0]
345
+ papers = parse_arxiv_refs(refs, q)
346
+ for paper in papers:
347
+ filename = create_file(generate_5min_feature_markdown(paper), "System", "md", paper['title'])
348
+ paper['md_file'] = filename
349
+ st.session_state['paper_metadata'][paper['title']] = filename
350
+ if papers and useArxivAudio:
351
+ await create_paper_audio_files(papers, q)
352
+ elapsed = time.time() - start
353
+ st.write(f"**Total Elapsed:** {elapsed:.2f} s")
354
+ return result, papers
355
+
356
+ # 🌐 WebSocket Handling
357
+ async def websocket_handler(websocket, path):
358
+ client_id = str(uuid.uuid4())
359
+ room_id = "chat"
360
+ if room_id not in st.session_state.active_connections:
361
+ st.session_state.active_connections[room_id] = {}
362
+ st.session_state.active_connections[room_id][client_id] = websocket
363
+ username = st.session_state.get('username', random.choice(list(FUN_USERNAMES.keys())))
364
+ chat_content = await load_chat()
365
+ if not any(f"Client-{client_id}" in line for line in chat_content):
366
+ await save_chat_entry("System 🌟", f"{username} has joined {START_ROOM}!", "en-US-AriaNeural")
367
+ try:
368
+ async for message in websocket:
369
+ if '|' in message:
370
+ username, content = message.split('|', 1)
371
+ voice = FUN_USERNAMES.get(username, "en-US-AriaNeural")
372
+ await save_chat_entry(username, content, voice)
373
+ else:
374
+ await websocket.send("ERROR|Message format: username|content")
375
+ except websockets.ConnectionClosed:
376
+ await save_chat_entry("System 🌟", f"{username} has left {START_ROOM}!", "en-US-AriaNeural")
377
+ finally:
378
+ if room_id in st.session_state.active_connections and client_id in st.session_state.active_connections[room_id]:
379
+ del st.session_state.active_connections[room_id][client_id]
380
+
381
+ async def broadcast_message(message, room_id):
382
+ if room_id in st.session_state.active_connections:
383
+ disconnected = []
384
+ for client_id, ws in st.session_state.active_connections[room_id].items():
385
+ try:
386
+ await ws.send(message)
387
+ except websockets.ConnectionClosed:
388
+ disconnected.append(client_id)
389
+ for client_id in disconnected:
390
+ if client_id in st.session_state.active_connections[room_id]:
391
+ del st.session_state.active_connections[room_id][client_id]
392
+
393
+ async def run_websocket_server():
394
+ if not st.session_state.server_running:
395
+ server = await websockets.serve(websocket_handler, '0.0.0.0', 8765)
396
+ st.session_state.server_running = True
397
+ await server.wait_closed()
398
+
399
+ def start_websocket_server():
400
+ asyncio.run(run_websocket_server())
401
+
402
+ # 📚 PDF to Audio
403
+ class AudioProcessor:
404
+ def __init__(self):
405
+ self.cache_dir = AUDIO_CACHE_DIR
406
+ os.makedirs(self.cache_dir, exist_ok=True)
407
+ self.metadata = json.load(open(f"{self.cache_dir}/metadata.json")) if os.path.exists(f"{self.cache_dir}/metadata.json") else {}
408
+
409
+ def _save_metadata(self):
410
+ with open(f"{self.cache_dir}/metadata.json", 'w') as f:
411
+ json.dump(self.metadata, f)
412
+
413
+ async def create_audio(self, text, voice='en-US-AriaNeural'):
414
+ cache_key = hashlib.md5(f"{text}:{voice}".encode()).hexdigest()
415
+ cache_path = f"{self.cache_dir}/{cache_key}.mp3"
416
+ if cache_key in self.metadata and os.path.exists(cache_path):
417
+ return cache_path
418
+ text = clean_text_for_tts(text)
419
+ if not text:
420
+ return None
421
+ communicate = edge_tts.Communicate(text, voice)
422
+ await communicate.save(cache_path)
423
+ self.metadata[cache_key] = {'timestamp': datetime.now().isoformat(), 'text_length': len(text), 'voice': voice}
424
+ self._save_metadata()
425
+ return cache_path
426
+
427
+ def process_pdf(pdf_file, max_pages, voice, audio_processor):
428
+ reader = PdfReader(pdf_file)
429
+ total_pages = min(len(reader.pages), max_pages)
430
+ texts, audios = [], {}
431
+ async def process_page(i, text):
432
+ audio_path = await audio_processor.create_audio(text, voice)
433
+ if audio_path:
434
+ audios[i] = audio_path
435
+ for i in range(total_pages):
436
+ text = reader.pages[i].extract_text()
437
+ texts.append(text)
438
+ threading.Thread(target=lambda: asyncio.run(process_page(i, text))).start()
439
+ return texts, audios, total_pages
440
+
441
+ # 🔍 ArXiv & AI Lookup
442
+ def parse_arxiv_refs(ref_text, query):
443
+ if not ref_text:
444
+ return []
445
+ papers = []
446
+ current = {}
447
+ for line in ref_text.split('\n'):
448
+ if line.count('|') == 2:
449
+ if current:
450
+ papers.append(current)
451
+ date, title, *_ = line.strip('* ').split('|')
452
+ url = re.search(r'(https://arxiv.org/\S+)', line).group(1) if re.search(r'(https://arxiv.org/\S+)', line) else f"paper_{len(papers)}"
453
+ current = {'date': date, 'title': title, 'url': url, 'authors': '', 'summary': '', 'full_audio': None, 'download_base64': '', 'query': query}
454
+ elif current:
455
+ if not current['authors']:
456
+ current['authors'] = line.strip('* ')
457
+ else:
458
+ current['summary'] += ' ' + line.strip() if current['summary'] else line.strip()
459
+ if current:
460
+ papers.append(current)
461
+ return papers[:20]
462
+
463
+ def generate_5min_feature_markdown(paper):
464
+ title, summary, authors, date, url = paper['title'], paper['summary'], paper['authors'], paper['date'], paper['url']
465
+ pdf_url = url.replace("abs", "pdf") + (".pdf" if not url.endswith(".pdf") else "")
466
+ wct, sw = len(title.split()), len(summary.split())
467
+ terms = get_high_info_terms(summary, 15)
468
+ rouge = round((len(terms) / max(sw, 1)) * 100, 2)
469
+ mermaid = "```mermaid\nflowchart TD\n" + "\n".join(f' T{i+1}["{t}"] --> T{i+2}["{terms[i+1]}"]' for i in range(len(terms)-1)) + "\n```"
470
+ return f"""
471
+ ## 📄 {title}
472
+ **Authors:** {authors}
473
+ **Date:** {date}
474
+ **Words:** Title: {wct}, Summary: {sw}
475
+ **Links:** [Abstract]({url}) | [PDF]({pdf_url})
476
+ **Terms:** {', '.join(terms)}
477
+ **ROUGE:** {rouge}%
478
+ ### 🎤 TTF Read Aloud
479
+ - **Title:** {title}
480
+ - **Terms:** {', '.join(terms)}
481
+ - **ROUGE:** {rouge}%
482
+ #### Concepts Graph
483
+ {mermaid}
484
+ ---
485
+ """
486
+
487
+ async def create_paper_audio_files(papers, query):
488
+ for p in papers:
489
+ audio_text = clean_text_for_tts(f"{p['title']} by {p['authors']}. {p['summary']}")
490
+ p['full_audio'], _ = await async_edge_tts_generate(audio_text, st.session_state['tts_voice'], p['authors'])
491
+ if p['full_audio']:
492
+ p['download_base64'] = get_download_link(p['full_audio'])
493
+
494
+ def save_vote(file, item, user_hash):
495
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
496
+ entry = f"[{timestamp}] {user_hash} voted for {item}"
497
+ try:
498
+ with open(file, 'a') as f:
499
+ f.write(f"{entry}\n")
500
+ with open(HISTORY_FILE, 'a') as f:
501
+ f.write(f"- {timestamp} - User {user_hash} voted for {item}\n")
502
+ return True
503
+ except Exception as e:
504
+ print(f"Vote save flop: {e}")
505
+ return False
506
+
507
+ def load_votes(file):
508
+ if not os.path.exists(file):
509
+ with open(file, 'w') as f:
510
+ f.write("# Vote Tally\n\nNo votes yet - get clicking! 🖱️\n")
511
+ try:
512
+ with open(file, 'r') as f:
513
+ lines = f.read().strip().split('\n')
514
+ votes = {}
515
+ for line in lines[2:]:
516
+ if line.strip() and 'voted for' in line:
517
+ item = line.split('voted for ')[1]
518
+ votes[item] = votes.get(item, 0) + 1
519
+ return votes
520
+ except Exception as e:
521
+ print(f"Vote load oopsie: {e}")
522
+ return {}
523
+
524
+ def generate_user_hash():
525
+ if 'user_hash' not in st.session_state:
526
+ session_id = str(random.getrandbits(128))
527
+ hash_object = hashlib.md5(session_id.encode())
528
+ st.session_state['user_hash'] = hash_object.hexdigest()[:8]
529
+ return st.session_state['user_hash']
530
+
531
+ async def save_pasted_image(image, username):
532
+ img_hash = hashlib.md5(image.tobytes()).hexdigest()[:8]
533
+ if img_hash in st.session_state.image_hashes:
534
+ return None
535
+ timestamp = format_timestamp_prefix(username)
536
+ filename = f"{timestamp}-{img_hash}.png"
537
+ filepath = filename
538
+ image.save(filepath, "PNG")
539
+ st.session_state.image_hashes.add(img_hash)
540
+ return filepath
541
+
542
+ # 📦 Zip Files
543
+ def create_zip_of_files(md_files, mp3_files, png_files, mp4_files, query):
544
+ all_files = md_files + mp3_files + png_files + mp4_files
545
+ if not all_files:
546
+ return None
547
+ terms = get_high_info_terms(" ".join([open(f, 'r', encoding='utf-8').read() if f.endswith('.md') else os.path.splitext(os.path.basename(f))[0].replace('_', ' ') for f in all_files] + [query]), 5)
548
+ zip_name = f"{format_timestamp_prefix()}_{'-'.join(terms)[:20]}.zip"
549
+ with zipfile.ZipFile(zip_name, 'w') as z:
550
+ [z.write(f) for f in all_files]
551
+ return zip_name
552
+
553
+ # 🎮 Main Interface
554
+ def main():
555
+ init_session_state()
556
+ load_mp3_viewer()
557
+ saved_username = load_username()
558
+ if saved_username and saved_username in FUN_USERNAMES:
559
+ st.session_state.username = saved_username
560
+ if not st.session_state.username:
561
+ available = [n for n in FUN_USERNAMES if not any(f"{n} has joined" in l for l in asyncio.run(load_chat()))]
562
+ st.session_state.username = random.choice(available or list(FUN_USERNAMES.keys()))
563
+ st.session_state.tts_voice = FUN_USERNAMES[st.session_state.username]
564
+ asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} has joined {START_ROOM}!", "en-US-AriaNeural"))
565
+ save_username(st.session_state.username)
566
+
567
+ st.title(f"{Site_Name} for {st.session_state.username}")
568
+ update_marquee_settings_ui()
569
+ display_marquee(f"🚀 Welcome to {START_ROOM} | 🤖 {st.session_state.username}", st.session_state['marquee_settings'], "welcome")
570
+
571
+ # Speech Component at Top Level
572
+ mycomponent = components.declare_component("mycomponent", path="mycomponent")
573
+ val = mycomponent(my_input_value="")
574
+ if val and val != st.session_state.last_transcript:
575
+ val_stripped = val.strip().replace('\n', ' ')
576
+ if val_stripped:
577
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
578
+ md_file, audio_file = asyncio.run(save_chat_entry(st.session_state.username, val_stripped, voice))
579
+ if audio_file:
580
+ play_and_download_audio(audio_file)
581
+ st.rerun()
582
+
583
+ tab_main = st.radio("Action:", ["🎤 Chat & Voice", "🔍 ArXiv", "📚 PDF to Audio"], horizontal=True, key="tab_main")
584
+ useArxiv = st.checkbox("Search ArXiv", True, key="use_arxiv")
585
+ useArxivAudio = st.checkbox("ArXiv Audio", False, key="use_arxiv_audio")
586
+ st.checkbox("Autosend Chat", value=True, key="autosend")
587
+ st.checkbox("Autosearch ArXiv", value=True, key="autosearch")
588
+
589
+ # 🎤 Chat & Voice
590
+ if tab_main == "🎤 Chat & Voice":
591
+ st.subheader(f"{START_ROOM} Chat 💬")
592
+ chat_content = asyncio.run(load_chat())
593
+ chat_container = st.container()
594
+ with chat_container:
595
+ for i, line in enumerate(chat_content):
596
+ with st.expander(f"Line {i+1}"):
597
+ st.markdown(line)
598
+
599
+ message = st.text_input(f"Message as {st.session_state.username}", key="message_input")
600
+ paste_result = paste_image_button("📋 Paste Image or Text", key="paste_button_msg")
601
+ if paste_result.image_data is not None:
602
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
603
+ if isinstance(paste_result.image_data, str):
604
+ st.session_state.message_text = paste_result.image_data
605
+ message = st.text_input(f"Message as {st.session_state.username}", key="message_input_paste", value=st.session_state.message_text)
606
+ else:
607
+ st.image(paste_result.image_data, caption="Pasted Image")
608
+ filename = asyncio.run(save_pasted_image(paste_result.image_data, st.session_state.username))
609
+ if filename:
610
+ st.session_state.pasted_image_data = filename
611
+ asr_text = f"User {st.session_state.username} requested analysis of an image uploaded at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
612
+ md_file_claude, audio_file_claude, claude_result = asyncio.run(perform_claude_search(asr_text, st.session_state.username))
613
+ if audio_file_claude:
614
+ play_and_download_audio(audio_file_claude)
615
+ md_file_arxiv, audio_file_arxiv = asyncio.run(perform_arxiv_search(asr_text, st.session_state.username, claude_result))
616
+ if audio_file_arxiv:
617
+ play_and_download_audio(audio_file_arxiv)
618
+ st.session_state.timer_start = time.time()
619
+ save_username(st.session_state.username)
620
+ st.rerun()
621
+
622
+ if (message and message != st.session_state.last_message) or st.session_state.pasted_image_data:
623
+ st.session_state.last_message = message
624
+ col_send, col_claude, col_arxiv = st.columns([1, 1, 1])
625
+
626
+ with col_send:
627
+ if st.session_state.autosend or st.button("Send 🚀", key="send_button"):
628
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
629
+ if message.strip():
630
+ md_file, audio_file = asyncio.run(save_chat_entry(st.session_state.username, message, voice, True))
631
+ if audio_file:
632
+ play_and_download_audio(audio_file)
633
+ if st.session_state.pasted_image_data:
634
+ asyncio.run(save_chat_entry(st.session_state.username, f"Pasted image: {st.session_state.pasted_image_data}", voice))
635
+ st.session_state.pasted_image_data = None
636
+ st.session_state.timer_start = time.time()
637
+ save_username(st.session_state.username)
638
+ st.rerun()
639
+
640
+ with col_claude:
641
+ if st.button("🧠 Claude", key="claude_button"):
642
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
643
+ if message.strip():
644
+ md_file, audio_file, _ = asyncio.run(perform_claude_search(message, st.session_state.username))
645
+ if audio_file:
646
+ play_and_download_audio(audio_file)
647
+ st.session_state.timer_start = time.time()
648
+ save_username(st.session_state.username)
649
+ st.rerun()
650
+
651
+ with col_arxiv:
652
+ if st.button("🔍 ArXiv", key="arxiv_button"):
653
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
654
+ if message.strip():
655
+ md_file, audio_file = asyncio.run(perform_arxiv_search(message, st.session_state.username))
656
+ if audio_file:
657
+ play_and_download_audio(audio_file)
658
+ st.session_state.timer_start = time.time()
659
+ save_username(st.session_state.username)
660
+ st.rerun()
661
+
662
+ # 🔍 ArXiv
663
+ elif tab_main == "🔍 ArXiv":
664
+ st.subheader("🔍 Query ArXiv")
665
+ q = st.text_input("🔍 Query:", key="arxiv_query")
666
+ if q and q != st.session_state.last_query:
667
+ st.session_state.last_query = q
668
+ if st.session_state.autosearch or st.button("🔍 Run", key="arxiv_run"):
669
+ result, papers = asyncio.run(perform_ai_lookup(q, useArxiv=useArxiv, useArxivAudio=useArxivAudio))
670
+ st.markdown(f"### Query: {q}")
671
+ for i, p in enumerate(papers, 1):
672
+ with st.expander(f"{i}. {p['title']}"):
673
+ st.markdown(f"[{p['date']}] [Abstract]({p['url']}) | [PDF]({p['url'].replace('abs', 'pdf')}.pdf)")
674
+ if st.button(f"Load Details {i}", key=f"load_{i}"):
675
+ with open(p['md_file'], 'r', encoding='utf-8') as f:
676
+ st.markdown(f.read())
677
+
678
+ # 📚 PDF to Audio
679
+ elif tab_main == "📚 PDF to Audio":
680
+ audio_processor = AudioProcessor()
681
+ pdf_file = st.file_uploader("Choose PDF", "pdf", key="pdf_upload")
682
+ max_pages = st.slider('Pages', 1, 100, 10, key="pdf_pages")
683
+ if pdf_file:
684
+ with st.spinner('Processing...'):
685
+ texts, audios, total = process_pdf(pdf_file, max_pages, st.session_state['tts_voice'], audio_processor)
686
+ for i, text in enumerate(texts):
687
+ with st.expander(f"Page {i+1}"):
688
+ st.markdown(text)
689
+ while i not in audios:
690
+ time.sleep(0.1)
691
+ if audios.get(i):
692
+ st.audio(audios[i])
693
+ st.markdown(get_download_link(audios[i], "mp3"), unsafe_allow_html=True)
694
+ voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
695
+ asyncio.run(save_chat_entry(st.session_state.username, f"PDF Page {i+1} converted to audio: {audios[i]}", voice))
696
+
697
+ # Always Visible Media Gallery
698
+ st.header("📸 Media Gallery")
699
+ all_files = sorted(glob.glob("*.md") + glob.glob("*.mp3") + glob.glob("*.png") + glob.glob("*.mp4"), key=os.path.getmtime, reverse=True)
700
+ md_files = [f for f in all_files if f.endswith('.md')]
701
+ mp3_files = [f for f in all_files if f.endswith('.mp3')]
702
+ png_files = [f for f in all_files if f.endswith('.png')]
703
+ mp4_files = [f for f in all_files if f.endswith('.mp4')]
704
+
705
+ st.subheader("All Submitted Text")
706
+ all_md_content = concatenate_markdown_files()
707
+ with st.expander("View All Markdown Content"):
708
+ st.markdown(all_md_content)
709
+
710
+ st.subheader("🎵 Audio (MP3)")
711
+ for mp3 in mp3_files:
712
+ with st.expander(os.path.basename(mp3)):
713
+ st.audio(mp3)
714
+ st.markdown(get_download_link(mp3, "mp3"), unsafe_allow_html=True)
715
+
716
+ st.subheader("🖼️ Images (PNG)")
717
+ for png in png_files:
718
+ with st.expander(os.path.basename(png)):
719
+ st.image(png, use_container_width=True)
720
+ st.markdown(get_download_link(png, "png"), unsafe_allow_html=True)
721
+
722
+ st.subheader("🎥 Videos (MP4)")
723
+ for mp4 in mp4_files:
724
+ with st.expander(os.path.basename(mp4)):
725
+ st.video(mp4)
726
+ st.markdown(get_download_link(mp4, "mp4"), unsafe_allow_html=True)
727
+
728
+ # 🗂️ Sidebar with Dialog and Audio
729
+ st.sidebar.subheader("Voice Settings")
730
+ new_username = st.sidebar.selectbox("Change Name/Voice", list(FUN_USERNAMES.keys()), index=list(FUN_USERNAMES.keys()).index(st.session_state.username), key="username_select")
731
+ if new_username != st.session_state.username:
732
+ asyncio.run(save_chat_entry("System 🌟", f"{st.session_state.username} changed to {new_username}", "en-US-AriaNeural"))
733
+ st.session_state.username, st.session_state.tts_voice = new_username, FUN_USERNAMES[new_username]
734
+ st.session_state.timer_start = time.time()
735
+ save_username(st.session_state.username)
736
+ st.rerun()
737
+
738
+ st.sidebar.markdown("### 💬 Chat Dialog")
739
+ chat_content = asyncio.run(load_chat())
740
+ with st.sidebar.expander("Chat History"):
741
+ for i, line in enumerate(chat_content):
742
+ st.markdown(f"{i+1}. {line}")
743
+
744
+ st.sidebar.subheader("Vote Totals")
745
+ chat_votes = load_votes(QUOTE_VOTES_FILE)
746
+ image_votes = load_votes(IMAGE_VOTES_FILE)
747
+ for item, count in chat_votes.items():
748
+ st.sidebar.write(f"{item}: {count} votes")
749
+ for image, count in image_votes.items():
750
+ st.sidebar.write(f"{image}: {count} votes")
751
+
752
+ st.sidebar.markdown("### 📂 File History")
753
+ for f in all_files[:10]:
754
+ st.sidebar.write(f"{FILE_EMOJIS.get(f.split('.')[-1], '📄')} {os.path.basename(f)}")
755
+ if st.sidebar.button("⬇️ Zip All", key="zip_all"):
756
+ zip_name = create_zip_of_files(md_files, mp3_files, png_files, mp4_files, "latest_query")
757
+ if zip_name:
758
+ st.sidebar.markdown(get_download_link(zip_name, "zip"), unsafe_allow_html=True)
759
+
760
+ # Refresh Timer in Sidebar
761
+ st.sidebar.subheader("Set Refresh Rate ⏳")
762
+ st.markdown("""
763
+ <style>
764
+ .timer {
765
+ font-size: 24px;
766
+ color: #ffcc00;
767
+ text-align: center;
768
+ animation: pulse 1s infinite;
769
+ }
770
+ @keyframes pulse {
771
+ 0% { transform: scale(1); }
772
+ 50% { transform: scale(1.1); }
773
+ 100% { transform: scale(1); }
774
+ }
775
+ </style>
776
+ """, unsafe_allow_html=True)
777
+
778
+ refresh_rate = st.sidebar.slider("Refresh Rate (seconds)", min_value=1, max_value=300, value=st.session_state.refresh_rate, step=1)
779
+ if refresh_rate != st.session_state.refresh_rate:
780
+ st.session_state.refresh_rate = refresh_rate
781
+ st.session_state.timer_start = time.time()
782
+ save_username(st.session_state.username)
783
+
784
+ col1, col2, col3 = st.sidebar.columns(3)
785
+ with col1:
786
+ if st.button("🐇 Small (1s)"):
787
+ st.session_state.refresh_rate = 1
788
+ st.session_state.timer_start = time.time()
789
+ save_username(st.session_state.username)
790
+ with col2:
791
+ if st.button("🐢 Medium (5s)"):
792
+ st.session_state.refresh_rate = 5
793
+ st.session_state.timer_start = time.time()
794
+ save_username(st.session_state.username)
795
+ with col3:
796
+ if st.button("🐘 Large (5m)"):
797
+ st.session_state.refresh_rate = 300
798
+ st.session_state.timer_start = time.time()
799
+ save_username(st.session_state.username)
800
+
801
+ timer_placeholder = st.sidebar.empty()
802
+ start_time = st.session_state.timer_start
803
+ remaining_time = int(st.session_state.refresh_rate - (time.time() - start_time))
804
+ if remaining_time <= 0:
805
+ st.session_state.timer_start = time.time()
806
+ st.session_state.last_refresh = time.time()
807
+ st.rerun()
808
+ else:
809
+ timer_placeholder.markdown(f"<p class='timer'>⏳ Next refresh in: {remaining_time} seconds</p>", unsafe_allow_html=True)
810
+
811
+ # Start WebSocket server in a separate thread
812
+ if not st.session_state.server_running and not st.session_state.server_task:
813
+ st.session_state.server_task = threading.Thread(target=start_websocket_server, daemon=True)
814
+ st.session_state.server_task.start()
815
+
816
+ if __name__ == "__main__":
817
+ main()