Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# app.py (Merged Version -
|
| 2 |
import streamlit as st
|
| 3 |
import asyncio
|
| 4 |
import websockets
|
|
@@ -8,52 +8,48 @@ import os
|
|
| 8 |
import random
|
| 9 |
import time
|
| 10 |
import hashlib
|
| 11 |
-
# from PIL import Image
|
| 12 |
import glob
|
| 13 |
import base64
|
| 14 |
import io
|
| 15 |
import streamlit.components.v1 as components
|
| 16 |
import edge_tts
|
| 17 |
-
# from audio_recorder_streamlit import audio_recorder
|
| 18 |
import nest_asyncio
|
| 19 |
import re
|
| 20 |
import pytz
|
| 21 |
import shutil
|
| 22 |
-
# import anthropic
|
| 23 |
-
# import openai
|
| 24 |
from PyPDF2 import PdfReader
|
| 25 |
import threading
|
| 26 |
import json
|
| 27 |
import zipfile
|
| 28 |
-
# from gradio_client import Client
|
| 29 |
from dotenv import load_dotenv
|
| 30 |
from streamlit_marquee import streamlit_marquee
|
| 31 |
from collections import defaultdict, Counter
|
| 32 |
import pandas as pd
|
| 33 |
-
from streamlit_js_eval import streamlit_js_eval
|
| 34 |
from PIL import Image # Needed for paste_image_component
|
| 35 |
|
| 36 |
# 🛠️ Patch asyncio for nesting
|
| 37 |
nest_asyncio.apply()
|
| 38 |
|
| 39 |
-
# 🎨 Page Config
|
| 40 |
st.set_page_config(
|
| 41 |
page_title="🤖🏗️ Shared World Builder 🏆",
|
| 42 |
page_icon="🏗️",
|
| 43 |
layout="wide",
|
| 44 |
-
initial_sidebar_state="expanded"
|
| 45 |
)
|
| 46 |
|
| 47 |
-
# --- Constants ---
|
| 48 |
-
#
|
| 49 |
-
icons = '
|
| 50 |
Site_Name = '🤖🏗️ Shared World Builder 🗣️'
|
| 51 |
START_ROOM = "World Lobby 🌍"
|
| 52 |
-
|
| 53 |
-
STATE_FILE = "user_state.txt" # For remembering username
|
| 54 |
-
|
| 55 |
-
# User/Chat
|
| 56 |
-
FUN_USERNAMES = {
|
| 57 |
"BuilderBot 🤖": "en-US-AriaNeural", "WorldWeaver 🕸️": "en-US-JennyNeural",
|
| 58 |
"Terraformer 🌱": "en-GB-SoniaNeural", "SkyArchitect ☁️": "en-AU-NatashaNeural",
|
| 59 |
"PixelPainter 🎨": "en-CA-ClaraNeural", "VoxelVortex 🌪️": "en-US-GuyNeural",
|
|
@@ -61,94 +57,1208 @@ FUN_USERNAMES = {
|
|
| 61 |
"BlockBard 🧱": "en-CA-LiamNeural", "SoundSculptor 🔊": "en-US-AnaNeural",
|
| 62 |
}
|
| 63 |
EDGE_TTS_VOICES = list(set(FUN_USERNAMES.values()))
|
| 64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
-
|
|
|
|
| 67 |
AUDIO_CACHE_DIR = "audio_cache"
|
| 68 |
AUDIO_DIR = "audio_logs"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
-
#
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
#
|
| 75 |
|
| 76 |
-
#
|
| 77 |
-
FILE_EMOJIS = {"md": "📝", "mp3": "🎵", "png": "🖼️", "mp4": "🎥", "zip": "📦", "json": "📄"}
|
| 78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
-
# ---
|
| 81 |
-
|
| 82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
-
# --- API Keys (Placeholder) ---
|
| 85 |
-
load_dotenv()
|
| 86 |
|
| 87 |
-
# ---
|
| 88 |
-
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
connected_clients = set() # Holds client_id strings
|
| 91 |
|
| 92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
-
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
try:
|
| 97 |
-
|
| 98 |
-
except
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
try:
|
| 124 |
-
|
| 125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
except ValueError:
|
| 127 |
-
|
| 128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
else:
|
| 130 |
-
|
| 131 |
-
return {"name": basename.replace('.md',''), "timestamp": "Unknown", "dt": None, "filename": filename}
|
| 132 |
|
| 133 |
-
# --- World State MD File Handling ---
|
| 134 |
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
print(f"Saving {len(world_objects)} objects to MD file: {save_path}...")
|
| 140 |
|
| 141 |
-
#
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
#
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
* **Objects:** {len(world_data_dict)}
|
| 152 |
-
|
| 153 |
-
```json
|
| 154 |
-
{json.dumps(world_data_dict, indent=2)}
|
|
|
|
| 1 |
+
# app.py (Merged Version - Fixed Chat Input Clearing)
|
| 2 |
import streamlit as st
|
| 3 |
import asyncio
|
| 4 |
import websockets
|
|
|
|
| 8 |
import random
|
| 9 |
import time
|
| 10 |
import hashlib
|
| 11 |
+
# from PIL import Image # Keep commented unless needed for image pasting->3D texture?
|
| 12 |
import glob
|
| 13 |
import base64
|
| 14 |
import io
|
| 15 |
import streamlit.components.v1 as components
|
| 16 |
import edge_tts
|
| 17 |
+
# from audio_recorder_streamlit import audio_recorder # Keep commented unless re-adding audio input
|
| 18 |
import nest_asyncio
|
| 19 |
import re
|
| 20 |
import pytz
|
| 21 |
import shutil
|
| 22 |
+
# import anthropic # Keep commented unless integrating Claude
|
| 23 |
+
# import openai # Keep commented unless integrating OpenAI
|
| 24 |
from PyPDF2 import PdfReader
|
| 25 |
import threading
|
| 26 |
import json
|
| 27 |
import zipfile
|
| 28 |
+
# from gradio_client import Client # Keep commented unless integrating ArXiv/Gradio
|
| 29 |
from dotenv import load_dotenv
|
| 30 |
from streamlit_marquee import streamlit_marquee
|
| 31 |
from collections import defaultdict, Counter
|
| 32 |
import pandas as pd
|
| 33 |
+
from streamlit_js_eval import streamlit_js_eval # Still needed for some UI interactions
|
| 34 |
from PIL import Image # Needed for paste_image_component
|
| 35 |
|
| 36 |
# 🛠️ Patch asyncio for nesting
|
| 37 |
nest_asyncio.apply()
|
| 38 |
|
| 39 |
+
# 🎨 Page Config (From New App)
|
| 40 |
st.set_page_config(
|
| 41 |
page_title="🤖🏗️ Shared World Builder 🏆",
|
| 42 |
page_icon="🏗️",
|
| 43 |
layout="wide",
|
| 44 |
+
initial_sidebar_state="expanded" # Keep sidebar open initially
|
| 45 |
)
|
| 46 |
|
| 47 |
+
# --- Constants (Combined & 3D Added) ---
|
| 48 |
+
# Chat/User Constants
|
| 49 |
+
icons = '🤖🏗️🗣️' # Updated icons
|
| 50 |
Site_Name = '🤖🏗️ Shared World Builder 🗣️'
|
| 51 |
START_ROOM = "World Lobby 🌍"
|
| 52 |
+
FUN_USERNAMES = { # Simplified for clarity, can expand later
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
"BuilderBot 🤖": "en-US-AriaNeural", "WorldWeaver 🕸️": "en-US-JennyNeural",
|
| 54 |
"Terraformer 🌱": "en-GB-SoniaNeural", "SkyArchitect ☁️": "en-AU-NatashaNeural",
|
| 55 |
"PixelPainter 🎨": "en-CA-ClaraNeural", "VoxelVortex 🌪️": "en-US-GuyNeural",
|
|
|
|
| 57 |
"BlockBard 🧱": "en-CA-LiamNeural", "SoundSculptor 🔊": "en-US-AnaNeural",
|
| 58 |
}
|
| 59 |
EDGE_TTS_VOICES = list(set(FUN_USERNAMES.values()))
|
| 60 |
+
FILE_EMOJIS = {"md": "📝", "mp3": "🎵", "png": "🖼️", "mp4": "🎥", "zip": "📦", "csv":"📄", "json": "📄"}
|
| 61 |
+
|
| 62 |
+
# 3D World Constants
|
| 63 |
+
SAVE_DIR = "saved_worlds"
|
| 64 |
+
PLOT_WIDTH = 50.0
|
| 65 |
+
PLOT_DEPTH = 50.0
|
| 66 |
+
CSV_COLUMNS = ['obj_id', 'type', 'pos_x', 'pos_y', 'pos_z', 'rot_x', 'rot_y', 'rot_z', 'rot_order']
|
| 67 |
+
WORLD_STATE_FILE = "world_state.json" # Using JSON for simpler in-memory<->disk state
|
| 68 |
+
|
| 69 |
+
# --- Directories (Combined) ---
|
| 70 |
+
for d in ["chat_logs", "audio_logs", "audio_cache", SAVE_DIR]: # Added SAVE_DIR
|
| 71 |
+
os.makedirs(d, exist_ok=True)
|
| 72 |
|
| 73 |
+
CHAT_DIR = "chat_logs"
|
| 74 |
+
MEDIA_DIR = "." # Where general files are saved/served from
|
| 75 |
AUDIO_CACHE_DIR = "audio_cache"
|
| 76 |
AUDIO_DIR = "audio_logs"
|
| 77 |
+
STATE_FILE = "user_state.txt" # For remembering username
|
| 78 |
+
|
| 79 |
+
CHAT_FILE = os.path.join(CHAT_DIR, "global_chat.md") # Used for initial load maybe?
|
| 80 |
+
# Removed vote files for simplicity
|
| 81 |
|
| 82 |
+
# --- API Keys (Keep placeholder logic) ---
|
| 83 |
+
load_dotenv()
|
| 84 |
+
# anthropic_key = os.getenv('ANTHROPIC_API_KEY', st.secrets.get('ANTHROPIC_API_KEY', ""))
|
| 85 |
+
# openai_api_key = os.getenv('OPENAI_API_KEY', st.secrets.get('OPENAI_API_KEY', ""))
|
| 86 |
+
# openai_client = openai.OpenAI(api_key=openai_api_key)
|
| 87 |
|
| 88 |
+
# --- Helper Functions (Combined & Adapted) ---
|
|
|
|
| 89 |
|
| 90 |
+
def format_timestamp_prefix(username=""):
|
| 91 |
+
# Using UTC for consistency in logs/filenames across timezones potentially
|
| 92 |
+
now = datetime.now(pytz.utc)
|
| 93 |
+
# Simplified format
|
| 94 |
+
# Added randomness to avoid rare collisions if multiple users save at exact same second
|
| 95 |
+
rand_suffix = ''.join(random.choices('abcdefghijklmnopqrstuvwxyz0123456789', k=4))
|
| 96 |
+
return f"{now.strftime('%Y%m%d_%H%M%S')}_{username}_{rand_suffix}"
|
| 97 |
|
| 98 |
+
# --- Performance Timer (Optional, Keep if desired) ---
|
| 99 |
+
class PerformanceTimer:
|
| 100 |
+
def __init__(self, name): self.name, self.start = name, None
|
| 101 |
+
def __enter__(self): self.start = time.time(); return self
|
| 102 |
+
def __exit__(self, *args):
|
| 103 |
+
duration = time.time() - self.start
|
| 104 |
+
if 'operation_timings' not in st.session_state: st.session_state['operation_timings'] = {}
|
| 105 |
+
if 'performance_metrics' not in st.session_state: st.session_state['performance_metrics'] = defaultdict(list)
|
| 106 |
+
st.session_state['operation_timings'][self.name] = duration
|
| 107 |
+
st.session_state['performance_metrics'][self.name].append(duration)
|
| 108 |
|
|
|
|
|
|
|
| 109 |
|
| 110 |
+
# --- 3D World State Management (Adapted from original + WebSocket focus) ---
|
| 111 |
+
|
| 112 |
+
# Global structure to hold the current state of the world IN MEMORY
|
| 113 |
+
# Use defaultdict for easier adding
|
| 114 |
+
# Needs thread safety if accessed by multiple websocket handlers simultaneously.
|
| 115 |
+
# For now, relying on Streamlit's single-thread-per-session execution
|
| 116 |
+
# and assuming broadcast updates are okay without strict locking for this scale.
|
| 117 |
+
# A lock would be needed for production robustness.
|
| 118 |
+
world_objects_lock = threading.Lock() # Use lock for modifying the global dict
|
| 119 |
+
world_objects = defaultdict(dict) # Holds {obj_id: object_data}
|
| 120 |
+
|
| 121 |
+
def load_world_state_from_disk():
|
| 122 |
+
"""Loads world state from the JSON file or fallback to CSVs."""
|
| 123 |
+
global world_objects
|
| 124 |
+
loaded_count = 0
|
| 125 |
+
print(f"[{time.time():.2f}] Attempting to load world state...")
|
| 126 |
+
# Use lock for reading/writing the global dict
|
| 127 |
+
with world_objects_lock:
|
| 128 |
+
if os.path.exists(WORLD_STATE_FILE):
|
| 129 |
+
try:
|
| 130 |
+
with open(WORLD_STATE_FILE, 'r') as f:
|
| 131 |
+
data = json.load(f)
|
| 132 |
+
# Ensure keys are strings if they got saved as ints somehow
|
| 133 |
+
world_objects = defaultdict(dict, {str(k): v for k, v in data.items()})
|
| 134 |
+
loaded_count = len(world_objects)
|
| 135 |
+
print(f"Loaded {loaded_count} objects from {WORLD_STATE_FILE}")
|
| 136 |
+
except json.JSONDecodeError:
|
| 137 |
+
print(f"Error reading {WORLD_STATE_FILE}. Falling back to CSVs.")
|
| 138 |
+
world_objects = defaultdict(dict) # Reset before loading from CSV
|
| 139 |
+
except Exception as e:
|
| 140 |
+
print(f"Error loading from {WORLD_STATE_FILE}: {e}. Falling back to CSVs.")
|
| 141 |
+
world_objects = defaultdict(dict) # Reset
|
| 142 |
+
|
| 143 |
+
# Fallback or initial load from CSVs if JSON fails or doesn't exist
|
| 144 |
+
if not world_objects:
|
| 145 |
+
print("Loading world state from CSV files...")
|
| 146 |
+
# Use the cached CSV loading logic, but populate the global dict
|
| 147 |
+
loaded_from_csv = get_all_world_objects_from_csv() # Gets list
|
| 148 |
+
for obj in loaded_from_csv:
|
| 149 |
+
world_objects[obj['obj_id']] = obj
|
| 150 |
+
loaded_count = len(world_objects)
|
| 151 |
+
print(f"Loaded {loaded_count} objects from CSVs.")
|
| 152 |
+
# Save immediately to JSON for next time
|
| 153 |
+
save_world_state_to_disk_internal() # Call internal save that assumes lock is held
|
| 154 |
+
|
| 155 |
+
return loaded_count
|
| 156 |
+
|
| 157 |
+
def save_world_state_to_disk():
|
| 158 |
+
"""Saves the current in-memory world state to a JSON file. Acquires lock."""
|
| 159 |
+
with world_objects_lock:
|
| 160 |
+
return save_world_state_to_disk_internal()
|
| 161 |
+
|
| 162 |
+
def save_world_state_to_disk_internal():
|
| 163 |
+
"""Internal save function - assumes lock is already held."""
|
| 164 |
+
global world_objects
|
| 165 |
+
print(f"Saving {len(world_objects)} objects to {WORLD_STATE_FILE}...")
|
| 166 |
+
try:
|
| 167 |
+
with open(WORLD_STATE_FILE, 'w') as f:
|
| 168 |
+
# Convert defaultdict back to regular dict for saving
|
| 169 |
+
json.dump(dict(world_objects), f, indent=2)
|
| 170 |
+
print("World state saved successfully.")
|
| 171 |
+
return True
|
| 172 |
+
except Exception as e:
|
| 173 |
+
print(f"Error saving world state to {WORLD_STATE_FILE}: {e}")
|
| 174 |
+
# Avoid st.error here as it might be called from background thread
|
| 175 |
+
return False
|
| 176 |
+
|
| 177 |
+
# --- Functions to load from CSVs (kept for initial load/fallback) ---
|
| 178 |
+
@st.cache_data(ttl=3600)
|
| 179 |
+
def load_plot_metadata():
|
| 180 |
+
"""Scans save dir for plot_X*_Z*.csv, sorts, calculates metadata."""
|
| 181 |
+
print(f"[{time.time():.2f}] Loading plot metadata...")
|
| 182 |
+
plot_files = []
|
| 183 |
+
try:
|
| 184 |
+
plot_files = [f for f in os.listdir(SAVE_DIR) if f.endswith(".csv") and f.startswith("plot_X")]
|
| 185 |
+
except FileNotFoundError:
|
| 186 |
+
print(f"Save directory '{SAVE_DIR}' not found during metadata load.")
|
| 187 |
+
return []
|
| 188 |
+
except Exception as e:
|
| 189 |
+
print(f"Error listing save directory '{SAVE_DIR}': {e}")
|
| 190 |
+
return []
|
| 191 |
+
|
| 192 |
+
parsed_plots = []
|
| 193 |
+
for filename in plot_files:
|
| 194 |
+
try:
|
| 195 |
+
file_path = os.path.join(SAVE_DIR, filename)
|
| 196 |
+
# Basic check for empty file before parsing name
|
| 197 |
+
if not os.path.exists(file_path) or os.path.getsize(file_path) <= 2: continue
|
| 198 |
+
|
| 199 |
+
parts = filename[:-4].split('_')
|
| 200 |
+
grid_x = int(parts[1][1:])
|
| 201 |
+
grid_z = int(parts[2][1:])
|
| 202 |
+
plot_name = " ".join(parts[3:]) if len(parts) > 3 else f"Plot ({grid_x},{grid_z})"
|
| 203 |
+
parsed_plots.append({
|
| 204 |
+
'id': filename[:-4], 'filename': filename,
|
| 205 |
+
'grid_x': grid_x, 'grid_z': grid_z, 'name': plot_name,
|
| 206 |
+
'x_offset': grid_x * PLOT_WIDTH, 'z_offset': grid_z * PLOT_DEPTH
|
| 207 |
+
})
|
| 208 |
+
except Exception as e:
|
| 209 |
+
print(f"Warning: Error parsing metadata from filename '{filename}': {e}. Skipping.")
|
| 210 |
+
continue
|
| 211 |
+
parsed_plots.sort(key=lambda p: (p['grid_x'], p['grid_z']))
|
| 212 |
+
print(f"Found {len(parsed_plots)} valid plot files.")
|
| 213 |
+
return parsed_plots
|
| 214 |
+
|
| 215 |
+
def load_single_plot_objects_relative(filename):
|
| 216 |
+
"""Loads objects from a specific CSV file, keeping coordinates relative."""
|
| 217 |
+
file_path = os.path.join(SAVE_DIR, filename)
|
| 218 |
+
objects = []
|
| 219 |
+
try:
|
| 220 |
+
if not os.path.exists(file_path) or os.path.getsize(file_path) == 0: return []
|
| 221 |
+
|
| 222 |
+
df = pd.read_csv(file_path)
|
| 223 |
+
if df.empty: return []
|
| 224 |
+
|
| 225 |
+
# Data Cleaning & Defaulting
|
| 226 |
+
if 'obj_id' not in df.columns or df['obj_id'].isnull().any():
|
| 227 |
+
print(f"Warning: Generating missing obj_ids for {filename}")
|
| 228 |
+
df['obj_id'] = df['obj_id'].fillna(pd.Series([str(uuid.uuid4()) for _ in range(len(df))]))
|
| 229 |
+
df['obj_id'] = df['obj_id'].astype(str)
|
| 230 |
+
|
| 231 |
+
for col in ['type', 'pos_x', 'pos_y', 'pos_z']:
|
| 232 |
+
if col not in df.columns:
|
| 233 |
+
print(f"Warning: CSV '{filename}' missing essential column '{col}'. Skipping file.")
|
| 234 |
+
return []
|
| 235 |
+
|
| 236 |
+
for col, default in [('rot_x', 0.0), ('rot_y', 0.0), ('rot_z', 0.0), ('rot_order', 'XYZ')]:
|
| 237 |
+
if col not in df.columns: df[col] = default
|
| 238 |
+
df.fillna({'rot_x': 0.0, 'rot_y': 0.0, 'rot_z': 0.0, 'rot_order': 'XYZ'}, inplace=True)
|
| 239 |
+
|
| 240 |
+
for col in ['pos_x', 'pos_y', 'pos_z', 'rot_x', 'rot_y', 'rot_z']:
|
| 241 |
+
df[col] = pd.to_numeric(df[col], errors='coerce')
|
| 242 |
+
df.dropna(subset=['pos_x', 'pos_y', 'pos_z'], inplace=True) # Drop rows where essential position is invalid
|
| 243 |
+
|
| 244 |
+
df['type'] = df['type'].astype(str).fillna('Unknown')
|
| 245 |
+
|
| 246 |
+
# Convert valid rows to dicts
|
| 247 |
+
objects = df[CSV_COLUMNS].to_dict('records')
|
| 248 |
+
|
| 249 |
+
except pd.errors.EmptyDataError:
|
| 250 |
+
pass # Normal for empty files
|
| 251 |
+
except FileNotFoundError:
|
| 252 |
+
pass # Normal if file doesn't exist yet
|
| 253 |
+
except Exception as e:
|
| 254 |
+
print(f"Error loading objects from {filename}: {e}")
|
| 255 |
+
# Optionally raise or return partial data? For now, return empty on error.
|
| 256 |
+
return []
|
| 257 |
+
return objects
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
@st.cache_data(show_spinner="Loading initial world objects from CSVs...")
|
| 261 |
+
def get_all_world_objects_from_csv():
|
| 262 |
+
"""Loads ALL objects from ALL known plots into world coordinates FROM CSVs."""
|
| 263 |
+
print(f"[{time.time():.2f}] Reloading ALL world objects from CSV files...")
|
| 264 |
+
all_objects = {} # Use dict keyed by obj_id for auto-deduplication during load
|
| 265 |
+
plots_meta = load_plot_metadata()
|
| 266 |
+
for plot in plots_meta:
|
| 267 |
+
relative_objects = load_single_plot_objects_relative(plot['filename'])
|
| 268 |
+
for obj in relative_objects:
|
| 269 |
+
obj_id = obj.get('obj_id')
|
| 270 |
+
if not obj_id: continue # Skip objects that failed ID generation/loading
|
| 271 |
+
# Convert to world coordinates
|
| 272 |
+
world_obj = {
|
| 273 |
+
'obj_id': obj_id,
|
| 274 |
+
'type': obj.get('type', 'Unknown'),
|
| 275 |
+
'position': {
|
| 276 |
+
'x': obj.get('pos_x', 0.0) + plot['x_offset'],
|
| 277 |
+
'y': obj.get('pos_y', 0.0),
|
| 278 |
+
'z': obj.get('pos_z', 0.0) + plot['z_offset']
|
| 279 |
+
},
|
| 280 |
+
'rotation': {
|
| 281 |
+
'_x': obj.get('rot_x', 0.0),
|
| 282 |
+
'_y': obj.get('rot_y', 0.0),
|
| 283 |
+
'_z': obj.get('rot_z', 0.0),
|
| 284 |
+
'_order': obj.get('rot_order', 'XYZ')
|
| 285 |
+
}
|
| 286 |
+
}
|
| 287 |
+
# If obj_id already exists, this will overwrite. Last plot file read wins.
|
| 288 |
+
all_objects[obj_id] = world_obj
|
| 289 |
+
world_list = list(all_objects.values())
|
| 290 |
+
print(f"Loaded {len(world_list)} total objects from CSVs.")
|
| 291 |
+
return world_list
|
| 292 |
+
|
| 293 |
+
# --- Session State Init (Combined & Expanded) ---
|
| 294 |
+
def init_session_state():
|
| 295 |
+
defaults = {
|
| 296 |
+
# From Chat App
|
| 297 |
+
'server_running_flag': False, 'server_instance': None, 'server_task': None,
|
| 298 |
+
'active_connections': defaultdict(dict), # Stores actual websocket objects by ID
|
| 299 |
+
'last_chat_update': 0, 'message_input': "", 'audio_cache': {},
|
| 300 |
+
'tts_voice': "en-US-AriaNeural", 'chat_history': [], 'marquee_settings': {
|
| 301 |
+
"background": "#1E1E1E", "color": "#FFFFFF", "font-size": "14px",
|
| 302 |
+
"animationDuration": "20s", "width": "100%", "lineHeight": "35px"
|
| 303 |
+
},
|
| 304 |
+
'enable_audio': True, 'download_link_cache': {}, 'username': None,
|
| 305 |
+
'autosend': False, # Default autosend off for chat
|
| 306 |
+
'last_message': "", 'timer_start': time.time(),
|
| 307 |
+
'last_sent_transcript': "", 'last_refresh': time.time(),
|
| 308 |
+
'auto_refresh': False, # Auto-refresh for chat display? Maybe not needed with WS
|
| 309 |
+
'refresh_rate': 30,
|
| 310 |
+
|
| 311 |
+
# From 3D World App (or adapted)
|
| 312 |
+
'selected_object': 'None', # Current building tool
|
| 313 |
+
'initial_world_state_loaded': False, # Flag to load state only once
|
| 314 |
+
|
| 315 |
+
# Keep others if needed, removed some for clarity
|
| 316 |
+
'operation_timings': {}, 'performance_metrics': defaultdict(list),
|
| 317 |
+
'paste_image_base64': "", # For paste component state tracking
|
| 318 |
+
}
|
| 319 |
+
for k, v in defaults.items():
|
| 320 |
+
if k not in st.session_state:
|
| 321 |
+
st.session_state[k] = v
|
| 322 |
+
# Ensure nested dicts are present
|
| 323 |
+
if 'marquee_settings' not in st.session_state: st.session_state.marquee_settings = defaults['marquee_settings']
|
| 324 |
+
if 'active_connections' not in st.session_state: st.session_state.active_connections = defaultdict(dict)
|
| 325 |
+
|
| 326 |
+
# --- Marquee Helpers (Keep from New App) ---
|
| 327 |
+
def update_marquee_settings_ui(): # ... (keep function as is) ...
|
| 328 |
+
pass # Placeholder if not immediately needed
|
| 329 |
+
def display_marquee(text, settings, key_suffix=""): # ... (keep function as is) ...
|
| 330 |
+
pass # Placeholder
|
| 331 |
+
|
| 332 |
+
# --- Text & File Helpers (Keep & Adapt from New App) ---
|
| 333 |
+
def clean_text_for_tts(text): # ... (keep function as is) ...
|
| 334 |
+
# Remove markdown links but keep the text
|
| 335 |
+
text = re.sub(r'\[([^\]]+)\]\([^\)]+\)', r'\1', text)
|
| 336 |
+
# Remove other potential problematic characters for TTS
|
| 337 |
+
text = re.sub(r'[#*!\[\]]+', '', ' '.join(text.split()))
|
| 338 |
+
return text[:250] or "No text" # Limit length slightly more
|
| 339 |
+
|
| 340 |
+
def generate_filename(prompt, username, file_type="md", title=None): # ... (keep function as is) ...
|
| 341 |
+
timestamp = format_timestamp_prefix(username)
|
| 342 |
+
# Simplified filename generation
|
| 343 |
+
base = clean_text_for_filename(title if title else prompt[:30])
|
| 344 |
+
hash_val = hashlib.md5(prompt.encode()).hexdigest()[:6]
|
| 345 |
+
# Ensure file type is added correctly
|
| 346 |
+
filename_base = f"{timestamp}_{base}_{hash_val}"
|
| 347 |
+
return f"{filename_base}.{file_type}"
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def clean_text_for_filename(text): # ... (keep function as is) ...
|
| 351 |
+
# Replace spaces with underscores, remove invalid chars
|
| 352 |
+
text = re.sub(r'\s+', '_', text)
|
| 353 |
+
text = re.sub(r'[^\w\-.]', '', text) # Allow underscore, hyphen, period
|
| 354 |
+
return text[:50] # Limit length
|
| 355 |
+
|
| 356 |
+
def create_file(content, username, file_type="md", save_path=None): # Added explicit save_path
|
| 357 |
+
if not save_path:
|
| 358 |
+
filename = generate_filename(content, username, file_type)
|
| 359 |
+
save_path = os.path.join(MEDIA_DIR, filename) # Save to base dir by default
|
| 360 |
+
|
| 361 |
+
# Ensure directory exists if path includes one
|
| 362 |
+
dir_name = os.path.dirname(save_path)
|
| 363 |
+
if dir_name:
|
| 364 |
+
os.makedirs(dir_name, exist_ok=True)
|
| 365 |
+
|
| 366 |
+
try:
|
| 367 |
+
with open(save_path, 'w', encoding='utf-8') as f:
|
| 368 |
+
f.write(content)
|
| 369 |
+
print(f"Created file: {save_path}")
|
| 370 |
+
return save_path
|
| 371 |
+
except Exception as e:
|
| 372 |
+
print(f"Error creating file {save_path}: {e}")
|
| 373 |
+
return None
|
| 374 |
+
|
| 375 |
+
def get_download_link(file, file_type="mp3"): # ... (keep function as is, ensure FILE_EMOJIS updated) ...
|
| 376 |
+
if not file or not os.path.exists(file): return f"File not found: {file}"
|
| 377 |
+
# Cache based on file path and modification time to handle updates
|
| 378 |
+
cache_key = f"dl_{file}_{os.path.getmtime(file)}"
|
| 379 |
+
if cache_key not in st.session_state.get('download_link_cache', {}):
|
| 380 |
+
with open(file, "rb") as f: b64 = base64.b64encode(f.read()).decode()
|
| 381 |
+
mime_types = {"mp3": "audio/mpeg", "png": "image/png", "mp4": "video/mp4", "md": "text/markdown", "zip": "application/zip", "csv": "text/csv", "json": "application/json"}
|
| 382 |
+
link_html = f'<a href="data:{mime_types.get(file_type, "application/octet-stream")};base64,{b64}" download="{os.path.basename(file)}">{FILE_EMOJIS.get(file_type, "📄")} Download {os.path.basename(file)}</a>'
|
| 383 |
+
# Ensure cache dict exists
|
| 384 |
+
if 'download_link_cache' not in st.session_state: st.session_state.download_link_cache = {}
|
| 385 |
+
st.session_state.download_link_cache[cache_key] = link_html
|
| 386 |
+
return st.session_state.download_link_cache[cache_key]
|
| 387 |
+
|
| 388 |
+
def save_username(username): # ... (keep function as is) ...
|
| 389 |
+
try:
|
| 390 |
+
with open(STATE_FILE, 'w') as f: f.write(username)
|
| 391 |
+
except Exception as e: print(f"Failed to save username: {e}")
|
| 392 |
+
|
| 393 |
+
def load_username(): # ... (keep function as is) ...
|
| 394 |
+
if os.path.exists(STATE_FILE):
|
| 395 |
+
try:
|
| 396 |
+
with open(STATE_FILE, 'r') as f: return f.read().strip()
|
| 397 |
+
except Exception as e: print(f"Failed to load username: {e}")
|
| 398 |
+
return None
|
| 399 |
+
|
| 400 |
+
# --- Audio Processing (Keep from New App) ---
|
| 401 |
+
async def async_edge_tts_generate(text, voice, username): # Simplified args
|
| 402 |
+
if not text: return None
|
| 403 |
+
cache_key = hashlib.md5(f"{text[:150]}_{voice}".encode()).hexdigest() # Use hash for cache key
|
| 404 |
+
# Ensure audio cache dict exists
|
| 405 |
+
if 'audio_cache' not in st.session_state: st.session_state.audio_cache = {}
|
| 406 |
+
cached_path = st.session_state.audio_cache.get(cache_key)
|
| 407 |
+
if cached_path and os.path.exists(cached_path):
|
| 408 |
+
# print(f"Using cached audio: {cached_path}")
|
| 409 |
+
return cached_path
|
| 410 |
+
|
| 411 |
+
text_cleaned = clean_text_for_tts(text)
|
| 412 |
+
if not text_cleaned or text_cleaned == "No text":
|
| 413 |
+
print("Skipping TTS for empty/cleaned text.")
|
| 414 |
+
return None
|
| 415 |
+
|
| 416 |
+
filename_base = generate_filename(text_cleaned, username, "mp3")
|
| 417 |
+
save_path = os.path.join(AUDIO_DIR, filename_base)
|
| 418 |
+
print(f"Generating TTS audio for '{text_cleaned[:30]}...' to {save_path}")
|
| 419 |
+
try:
|
| 420 |
+
communicate = edge_tts.Communicate(text_cleaned, voice)
|
| 421 |
+
await communicate.save(save_path)
|
| 422 |
+
if os.path.exists(save_path) and os.path.getsize(save_path) > 0:
|
| 423 |
+
st.session_state.audio_cache[cache_key] = save_path
|
| 424 |
+
return save_path
|
| 425 |
+
else:
|
| 426 |
+
print(f"Audio file {save_path} failed generation or is empty.")
|
| 427 |
+
return None
|
| 428 |
+
except edge_tts.exceptions.NoAudioReceived:
|
| 429 |
+
print(f"Edge TTS returned no audio for voice {voice}.")
|
| 430 |
+
return None
|
| 431 |
+
except Exception as e:
|
| 432 |
+
print(f"Error during Edge TTS generation: {e}")
|
| 433 |
+
return None
|
| 434 |
+
|
| 435 |
+
def play_and_download_audio(file_path): # ... (keep function as is) ...
|
| 436 |
+
if file_path and os.path.exists(file_path):
|
| 437 |
+
try:
|
| 438 |
+
st.audio(file_path)
|
| 439 |
+
file_type = file_path.split('.')[-1]
|
| 440 |
+
st.markdown(get_download_link(file_path, file_type), unsafe_allow_html=True)
|
| 441 |
+
except Exception as e:
|
| 442 |
+
st.error(f"Error displaying audio {file_path}: {e}")
|
| 443 |
+
else:
|
| 444 |
+
st.warning(f"Audio file not found for playback: {file_path}")
|
| 445 |
+
|
| 446 |
+
|
| 447 |
+
# --- Chat Saving/Loading (Keep & Adapt from New App) ---
|
| 448 |
+
async def save_chat_entry(username, message, voice, is_markdown=False):
|
| 449 |
+
"""Saves chat entry to a file and potentially generates audio."""
|
| 450 |
+
if not message.strip(): return None, None
|
| 451 |
+
print(f"Saving chat entry from {username}: {message[:50]}...")
|
| 452 |
+
central = pytz.timezone('US/Central') # Or use UTC
|
| 453 |
+
timestamp = datetime.now(central).strftime("%Y-%m-%d %H:%M:%S")
|
| 454 |
+
entry = f"[{timestamp}] {username} ({voice}): {message}" if not is_markdown else f"[{timestamp}] {username} ({voice}):\n```markdown\n{message}\n```"
|
| 455 |
+
|
| 456 |
+
# Save to individual file in chat_logs
|
| 457 |
+
md_filename_base = generate_filename(message, username, "md")
|
| 458 |
+
md_file_path = os.path.join(CHAT_DIR, md_filename_base)
|
| 459 |
+
md_file = create_file(entry, username, "md", save_path=md_file_path)
|
| 460 |
+
|
| 461 |
+
# Append to session state history for immediate display
|
| 462 |
+
# Ensure history exists
|
| 463 |
+
if 'chat_history' not in st.session_state: st.session_state.chat_history = []
|
| 464 |
+
st.session_state.chat_history.append(entry)
|
| 465 |
+
|
| 466 |
+
# Generate audio (only if enabled)
|
| 467 |
+
audio_file = None
|
| 468 |
+
if st.session_state.get('enable_audio', True):
|
| 469 |
+
# Use non-markdown message for TTS
|
| 470 |
+
tts_message = message if not is_markdown else message
|
| 471 |
+
audio_file = await async_edge_tts_generate(tts_message, voice, username)
|
| 472 |
+
if audio_file:
|
| 473 |
+
print(f"Generated audio: {audio_file}")
|
| 474 |
+
else:
|
| 475 |
+
print(f"Failed to generate audio for chat message.")
|
| 476 |
+
|
| 477 |
+
return md_file, audio_file
|
| 478 |
+
|
| 479 |
+
async def load_chat_history():
|
| 480 |
+
"""Loads chat history from files in CHAT_DIR if session state is empty."""
|
| 481 |
+
if 'chat_history' not in st.session_state: st.session_state.chat_history = []
|
| 482 |
+
|
| 483 |
+
if not st.session_state.chat_history:
|
| 484 |
+
print("Loading chat history from files...")
|
| 485 |
+
chat_files = sorted(glob.glob(os.path.join(CHAT_DIR, "*.md")), key=os.path.getmtime)
|
| 486 |
+
loaded_count = 0
|
| 487 |
+
for f_path in chat_files:
|
| 488 |
+
try:
|
| 489 |
+
with open(f_path, 'r', encoding='utf-8') as file:
|
| 490 |
+
st.session_state.chat_history.append(file.read().strip())
|
| 491 |
+
loaded_count += 1
|
| 492 |
+
except Exception as e:
|
| 493 |
+
print(f"Error reading chat file {f_path}: {e}")
|
| 494 |
+
print(f"Loaded {loaded_count} chat entries from files.")
|
| 495 |
+
return st.session_state.chat_history
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
# --- WebSocket Handling (Adapted for 3D State & Thread Safety) ---
|
| 499 |
+
# Global set to track connected client IDs for efficient broadcast checks
|
| 500 |
connected_clients = set() # Holds client_id strings
|
| 501 |
|
| 502 |
+
async def register_client(websocket):
|
| 503 |
+
"""Adds client to tracking structures."""
|
| 504 |
+
client_id = str(websocket.id)
|
| 505 |
+
connected_clients.add(client_id)
|
| 506 |
+
st.session_state.active_connections[client_id] = websocket # Store WS object itself
|
| 507 |
+
print(f"Client registered: {client_id}. Total: {len(connected_clients)}")
|
| 508 |
+
|
| 509 |
+
async def unregister_client(websocket):
|
| 510 |
+
"""Removes client from tracking structures."""
|
| 511 |
+
client_id = str(websocket.id)
|
| 512 |
+
connected_clients.discard(client_id)
|
| 513 |
+
st.session_state.active_connections.pop(client_id, None)
|
| 514 |
+
print(f"Client unregistered: {client_id}. Remaining: {len(connected_clients)}")
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
async def websocket_handler(websocket, path):
|
| 518 |
+
await register_client(websocket)
|
| 519 |
+
client_id = str(websocket.id)
|
| 520 |
+
username = st.session_state.get('username', f"User_{client_id[:4]}") # Get username associated with this session
|
| 521 |
+
|
| 522 |
+
# Send initial world state to the new client
|
| 523 |
+
try:
|
| 524 |
+
with world_objects_lock: # Read lock for initial state
|
| 525 |
+
initial_state_payload = dict(world_objects)
|
| 526 |
+
|
| 527 |
+
initial_state_msg = json.dumps({
|
| 528 |
+
"type": "initial_state",
|
| 529 |
+
"payload": initial_state_payload # Send current world state
|
| 530 |
+
})
|
| 531 |
+
await websocket.send(initial_state_msg)
|
| 532 |
+
print(f"Sent initial state ({len(initial_state_payload)} objects) to {client_id}")
|
| 533 |
+
|
| 534 |
+
# Announce join to others
|
| 535 |
+
await broadcast_message(json.dumps({
|
| 536 |
+
"type": "user_join",
|
| 537 |
+
"payload": {"username": username, "id": client_id} # Send assigned username
|
| 538 |
+
}), exclude_id=client_id)
|
| 539 |
+
|
| 540 |
+
except Exception as e:
|
| 541 |
+
print(f"Error during initial phase for {client_id}: {e}")
|
| 542 |
+
|
| 543 |
+
# Main message loop
|
| 544 |
+
try:
|
| 545 |
+
async for message in websocket:
|
| 546 |
+
try:
|
| 547 |
+
data = json.loads(message)
|
| 548 |
+
msg_type = data.get("type")
|
| 549 |
+
payload = data.get("payload", {}) # Ensure payload is a dict
|
| 550 |
+
|
| 551 |
+
# Get username from payload (client should send it), fallback to initial session username
|
| 552 |
+
sender_username = payload.get("username", username)
|
| 553 |
+
|
| 554 |
+
if msg_type == "chat_message":
|
| 555 |
+
chat_text = payload.get('message', '')
|
| 556 |
+
print(f"Received chat from {sender_username} ({client_id}): {chat_text[:50]}...")
|
| 557 |
+
voice = payload.get('voice', FUN_USERNAMES.get(sender_username, "en-US-AriaNeural"))
|
| 558 |
+
# Save chat locally (run in background task)
|
| 559 |
+
asyncio.create_task(save_chat_entry(sender_username, chat_text, voice))
|
| 560 |
+
# Broadcast chat message (including sender info) to others
|
| 561 |
+
await broadcast_message(message, exclude_id=client_id) # Forward original msg
|
| 562 |
+
|
| 563 |
+
elif msg_type == "place_object":
|
| 564 |
+
obj_data = payload.get("object_data")
|
| 565 |
+
if obj_data and 'obj_id' in obj_data and 'type' in obj_data:
|
| 566 |
+
print(f"Received place_object from {sender_username} ({client_id}): {obj_data.get('type')} ({obj_data['obj_id']})")
|
| 567 |
+
with world_objects_lock: # Lock for write
|
| 568 |
+
world_objects[obj_data['obj_id']] = obj_data # Add/update in memory
|
| 569 |
+
# Broadcast placement to others (include who placed it)
|
| 570 |
+
broadcast_payload = json.dumps({
|
| 571 |
+
"type": "object_placed",
|
| 572 |
+
"payload": {"object_data": obj_data, "username": sender_username}
|
| 573 |
+
})
|
| 574 |
+
await broadcast_message(broadcast_payload, exclude_id=client_id)
|
| 575 |
+
# Trigger periodic save maybe? Or add to a "dirty" queue
|
| 576 |
+
else:
|
| 577 |
+
print(f"Invalid place_object payload from {client_id}: {payload}")
|
| 578 |
+
|
| 579 |
+
elif msg_type == "delete_object":
|
| 580 |
+
obj_id = payload.get("obj_id")
|
| 581 |
+
if obj_id:
|
| 582 |
+
print(f"Received delete_object from {sender_username} ({client_id}): {obj_id}")
|
| 583 |
+
removed = False
|
| 584 |
+
with world_objects_lock: # Lock for write
|
| 585 |
+
if obj_id in world_objects:
|
| 586 |
+
del world_objects[obj_id]
|
| 587 |
+
removed = True
|
| 588 |
+
if removed:
|
| 589 |
+
# Broadcast deletion
|
| 590 |
+
broadcast_payload = json.dumps({
|
| 591 |
+
"type": "object_deleted",
|
| 592 |
+
"payload": {"obj_id": obj_id, "username": sender_username}
|
| 593 |
+
})
|
| 594 |
+
await broadcast_message(broadcast_payload, exclude_id=client_id)
|
| 595 |
+
else:
|
| 596 |
+
print(f"Invalid delete_object payload from {client_id}: {payload}")
|
| 597 |
+
|
| 598 |
+
elif msg_type == "player_position":
|
| 599 |
+
# Basic position broadcasting (no server-side validation yet)
|
| 600 |
+
pos_data = payload.get("position")
|
| 601 |
+
if pos_data:
|
| 602 |
+
broadcast_payload = json.dumps({
|
| 603 |
+
"type": "player_moved",
|
| 604 |
+
"payload": {"username": sender_username, "id": client_id, "position": pos_data}
|
| 605 |
+
})
|
| 606 |
+
await broadcast_message(broadcast_payload, exclude_id=client_id)
|
| 607 |
+
|
| 608 |
+
# Add handlers for other types (request_save, etc.)
|
| 609 |
|
| 610 |
+
except json.JSONDecodeError:
|
| 611 |
+
print(f"Received invalid JSON from {client_id}: {message[:100]}...") # Log truncated message
|
| 612 |
+
except Exception as e:
|
| 613 |
+
print(f"Error processing message from {client_id}: {e}")
|
| 614 |
+
# Optionally send error back to client?
|
| 615 |
+
# await websocket.send(json.dumps({"type": "error", "payload": {"message": str(e)}}))
|
| 616 |
+
|
| 617 |
+
except websockets.ConnectionClosedOK:
|
| 618 |
+
print(f"Client disconnected normally: {client_id} ({username})")
|
| 619 |
+
except websockets.ConnectionClosedError as e:
|
| 620 |
+
print(f"Client connection closed with error: {client_id} ({username}) - {e}")
|
| 621 |
+
except Exception as e:
|
| 622 |
+
print(f"Unexpected error in handler for {client_id}: {e}") # Catch broader errors
|
| 623 |
+
finally:
|
| 624 |
+
# Announce leave to others
|
| 625 |
+
await broadcast_message(json.dumps({
|
| 626 |
+
"type": "user_leave",
|
| 627 |
+
"payload": {"username": username, "id": client_id}
|
| 628 |
+
}), exclude_id=client_id) # Exclude self just in case
|
| 629 |
+
await unregister_client(websocket) # Ensure cleanup
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
# Modified broadcast to use the global set and skip sender
|
| 633 |
+
async def broadcast_message(message, exclude_id=None):
|
| 634 |
+
"""Sends a message to all connected clients except the excluded one."""
|
| 635 |
+
if not connected_clients:
|
| 636 |
+
return # No one to send to
|
| 637 |
+
|
| 638 |
+
# Create list of tasks for sending concurrently
|
| 639 |
+
tasks = []
|
| 640 |
+
# Iterate over client IDs currently known
|
| 641 |
+
current_client_ids = list(connected_clients) # Copy to avoid modification issues during iteration
|
| 642 |
+
|
| 643 |
+
for client_id in current_client_ids:
|
| 644 |
+
if client_id == exclude_id:
|
| 645 |
+
continue
|
| 646 |
+
|
| 647 |
+
websocket = st.session_state.active_connections.get(client_id) # Get WS object from session state dict
|
| 648 |
+
if websocket:
|
| 649 |
+
# Schedule the send operation as a task
|
| 650 |
+
tasks.append(asyncio.create_task(send_safely(websocket, message, client_id)))
|
| 651 |
+
else:
|
| 652 |
+
# If websocket object not found, mark client for potential cleanup
|
| 653 |
+
print(f"Websocket object not found for client {client_id} during broadcast.")
|
| 654 |
+
# No immediate cleanup here, rely on handler's finally block or periodic checks
|
| 655 |
+
|
| 656 |
+
# Wait for all send tasks to complete (or fail)
|
| 657 |
+
if tasks:
|
| 658 |
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
| 659 |
+
# Process results to find failed sends and potentially clean up
|
| 660 |
+
disconnected_after_send = set()
|
| 661 |
+
for i, result in enumerate(results):
|
| 662 |
+
if isinstance(result, Exception):
|
| 663 |
+
# Identify which client failed based on task order (assuming order is preserved)
|
| 664 |
+
failed_client_id = current_client_ids[i] # This assumes no skips matched exclude_id - needs refinement if exclude_id used
|
| 665 |
+
# To be robust, the task itself should return the client_id on failure
|
| 666 |
+
print(f"Error sending message during broadcast: {result}")
|
| 667 |
+
# Mark for potential cleanup (or let handler handle it)
|
| 668 |
+
# disconnected_after_send.add(failed_client_id)
|
| 669 |
+
|
| 670 |
+
# Perform cleanup based on failed sends if needed
|
| 671 |
+
# if disconnected_after_send:
|
| 672 |
+
# for client_id in disconnected_after_send:
|
| 673 |
+
# connected_clients.discard(client_id)
|
| 674 |
+
# st.session_state.active_connections.pop(client_id, None)
|
| 675 |
+
|
| 676 |
+
|
| 677 |
+
async def send_safely(websocket, message, client_id):
|
| 678 |
+
"""Wrapper to send message and handle potential connection errors."""
|
| 679 |
try:
|
| 680 |
+
await websocket.send(message)
|
| 681 |
+
except websockets.ConnectionClosed:
|
| 682 |
+
print(f"Send failed: Connection closed for client {client_id}")
|
| 683 |
+
# Don't unregister here, let the main handler loop do it
|
| 684 |
+
raise # Re-raise exception for gather to catch
|
| 685 |
+
except RuntimeError as e: # Handle loop closed errors
|
| 686 |
+
print(f"Send failed: RuntimeError for client {client_id}: {e}")
|
| 687 |
+
raise
|
| 688 |
+
except Exception as e:
|
| 689 |
+
print(f"Send failed: Unexpected error for client {client_id}: {e}")
|
| 690 |
+
raise
|
| 691 |
+
|
| 692 |
+
|
| 693 |
+
async def run_websocket_server():
|
| 694 |
+
# Check if already running - basic flag protection
|
| 695 |
+
if st.session_state.get('server_running_flag', False):
|
| 696 |
+
print("Server flag indicates already running or starting.")
|
| 697 |
+
return
|
| 698 |
+
st.session_state['server_running_flag'] = True
|
| 699 |
+
print("Attempting to start WebSocket server on 0.0.0.0:8765...")
|
| 700 |
+
stop_event = asyncio.Event() # For potential graceful shutdown later
|
| 701 |
+
st.session_state['websocket_stop_event'] = stop_event
|
| 702 |
+
try:
|
| 703 |
+
# Use 0.0.0.0 for broader access (requires firewall config)
|
| 704 |
+
server = await websockets.serve(websocket_handler, "0.0.0.0", 8765)
|
| 705 |
+
st.session_state['server_instance'] = server # Store server instance
|
| 706 |
+
print(f"WebSocket server started successfully on {server.sockets[0].getsockname()}.")
|
| 707 |
+
await stop_event.wait() # Keep server running until stop event is set
|
| 708 |
+
except OSError as e:
|
| 709 |
+
print(f"### FAILED TO START WEBSOCKET SERVER: {e}")
|
| 710 |
+
st.error(f"Failed start WebSocket: {e}. Port 8765 busy?")
|
| 711 |
+
except Exception as e:
|
| 712 |
+
print(f"### UNEXPECTED ERROR IN WEBSOCKET SERVER: {e}")
|
| 713 |
+
st.error(f"WebSocket server error: {e}")
|
| 714 |
+
finally:
|
| 715 |
+
print("WebSocket server task loop finished.")
|
| 716 |
+
if 'server_instance' in st.session_state and st.session_state.server_instance:
|
| 717 |
+
st.session_state.server_instance.close()
|
| 718 |
+
await st.session_state.server_instance.wait_closed()
|
| 719 |
+
print("WebSocket server closed.")
|
| 720 |
+
st.session_state['server_running_flag'] = False
|
| 721 |
+
st.session_state['server_instance'] = None
|
| 722 |
+
st.session_state['websocket_stop_event'] = None
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
def start_websocket_server_thread():
|
| 726 |
+
"""Starts the WebSocket server in a separate thread."""
|
| 727 |
+
if st.session_state.get('server_task') and st.session_state.server_task.is_alive():
|
| 728 |
+
print("Server thread check: Already running.")
|
| 729 |
+
return
|
| 730 |
+
if st.session_state.get('server_running_flag', False):
|
| 731 |
+
print("Server flag check: Already running.")
|
| 732 |
+
return
|
| 733 |
+
|
| 734 |
+
print("Creating and starting new server thread.")
|
| 735 |
+
# Ensure a new loop is created and set for this thread
|
| 736 |
+
def run_server_loop():
|
| 737 |
+
loop = asyncio.new_event_loop()
|
| 738 |
+
asyncio.set_event_loop(loop)
|
| 739 |
+
try:
|
| 740 |
+
loop.run_until_complete(run_websocket_server())
|
| 741 |
+
finally:
|
| 742 |
+
loop.close()
|
| 743 |
+
print("Server thread asyncio loop closed.")
|
| 744 |
+
|
| 745 |
+
st.session_state.server_task = threading.Thread(target=run_server_loop, daemon=True)
|
| 746 |
+
st.session_state.server_task.start()
|
| 747 |
+
time.sleep(1) # Give thread a moment to initialize
|
| 748 |
+
print(f"Server thread started. Alive: {st.session_state.server_task.is_alive()}")
|
| 749 |
+
|
| 750 |
+
|
| 751 |
+
# --- PDF to Audio (Keep if desired, maybe in a separate tab?) ---
|
| 752 |
+
class AudioProcessor: # ... (keep class as is) ...
|
| 753 |
+
def __init__(self): self.cache_dir=AUDIO_CACHE_DIR; os.makedirs(self.cache_dir,exist_ok=True); self.metadata=json.load(open(f"{self.cache_dir}/metadata.json", 'r')) if os.path.exists(f"{self.cache_dir}/metadata.json") else {}
|
| 754 |
+
def _save_metadata(self): #... (save logic) ...
|
| 755 |
+
try:
|
| 756 |
+
with open(f"{self.cache_dir}/metadata.json", 'w') as f: json.dump(self.metadata, f, indent=2)
|
| 757 |
+
except Exception as e: print(f"Failed metadata save: {e}")
|
| 758 |
+
async def create_audio(self, text, voice='en-US-AriaNeural'): # ... (audio creation logic) ...
|
| 759 |
+
cache_key=hashlib.md5(f"{text[:150]}:{voice}".encode()).hexdigest(); cache_path=f"{self.cache_dir}/{cache_key}.mp3"
|
| 760 |
+
if cache_key in self.metadata and os.path.exists(cache_path): return cache_path
|
| 761 |
+
text_cleaned=clean_text_for_tts(text);
|
| 762 |
+
if not text_cleaned: return None
|
| 763 |
+
# Ensure dir exists before saving
|
| 764 |
+
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
|
| 765 |
+
try:
|
| 766 |
+
communicate=edge_tts.Communicate(text_cleaned,voice); await communicate.save(cache_path)
|
| 767 |
+
self.metadata[cache_key]={'timestamp': datetime.now().isoformat(), 'text_length': len(text_cleaned), 'voice': voice}; self._save_metadata()
|
| 768 |
+
return cache_path
|
| 769 |
+
except Exception as e:
|
| 770 |
+
print(f"TTS Create Audio Error: {e}")
|
| 771 |
+
return None
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
def process_pdf(pdf_file, max_pages, voice, audio_processor): # ... (keep function as is) ...
|
| 775 |
+
try:
|
| 776 |
+
reader=PdfReader(pdf_file); total_pages=min(len(reader.pages),max_pages); texts,audios={}, {}
|
| 777 |
+
page_threads = []
|
| 778 |
+
results_lock = threading.Lock() # Lock for updating shared audios dict
|
| 779 |
+
|
| 780 |
+
def process_page_sync(page_num, page_text):
|
| 781 |
+
# Run the async function in a new event loop for this thread
|
| 782 |
+
async def run_async_audio():
|
| 783 |
+
return await audio_processor.create_audio(page_text, voice)
|
| 784 |
+
try:
|
| 785 |
+
audio_path = asyncio.run(run_async_audio())
|
| 786 |
+
if audio_path:
|
| 787 |
+
with results_lock:
|
| 788 |
+
audios[page_num] = audio_path
|
| 789 |
+
except Exception as page_e:
|
| 790 |
+
print(f"Error processing page {page_num+1} audio: {page_e}")
|
| 791 |
+
|
| 792 |
+
for i in range(total_pages):
|
| 793 |
+
text=reader.pages[i].extract_text()
|
| 794 |
+
if text: # Only process pages with text
|
| 795 |
+
texts[i]=text
|
| 796 |
+
thread = threading.Thread(target=process_page_sync, args=(i, text))
|
| 797 |
+
page_threads.append(thread)
|
| 798 |
+
thread.start()
|
| 799 |
+
else: texts[i] = "[No text extracted]"
|
| 800 |
+
|
| 801 |
+
# Wait for all threads to complete
|
| 802 |
+
for thread in page_threads:
|
| 803 |
+
thread.join()
|
| 804 |
+
|
| 805 |
+
return texts, audios, total_pages
|
| 806 |
+
except Exception as pdf_e:
|
| 807 |
+
st.error(f"Error reading PDF: {pdf_e}")
|
| 808 |
+
return {}, {}, 0
|
| 809 |
+
|
| 810 |
+
|
| 811 |
+
# --- ArXiv/AI Lookup (Commented out for focus) ---
|
| 812 |
+
# def parse_arxiv_refs(...): pass
|
| 813 |
+
# def generate_5min_feature_markdown(...): pass
|
| 814 |
+
# async def create_paper_audio_files(...): pass
|
| 815 |
+
# async def perform_ai_lookup(...): pass
|
| 816 |
+
# async def perform_claude_search(...): pass
|
| 817 |
+
# async def perform_arxiv_search(...): pass
|
| 818 |
+
|
| 819 |
+
# --- Image Handling (Keep basic save, comment out Claude processing) ---
|
| 820 |
+
async def save_pasted_image(image, username): # Simplified
|
| 821 |
+
img_hash = hashlib.md5(image.tobytes()).hexdigest()[:8]
|
| 822 |
+
# Add check against existing hashes if needed: if img_hash in st.session_state.image_hashes: return None
|
| 823 |
+
timestamp = format_timestamp_prefix(username)
|
| 824 |
+
filename = f"{timestamp}_pasted_{img_hash}.png"
|
| 825 |
+
filepath = os.path.join(MEDIA_DIR, filename) # Save in base dir
|
| 826 |
+
try:
|
| 827 |
+
image.save(filepath, "PNG")
|
| 828 |
+
print(f"Pasted image saved: {filepath}")
|
| 829 |
+
# Optionally announce image paste via chat?
|
| 830 |
+
# await save_chat_entry(username, f"Pasted an image: {filename}", FUN_USERNAMES.get(username, "en-US-AriaNeural"))
|
| 831 |
+
return filepath
|
| 832 |
+
except Exception as e:
|
| 833 |
+
print(f"Failed image save: {e}")
|
| 834 |
+
return None
|
| 835 |
+
|
| 836 |
+
# --- Zip and Delete Files (Keep from New App) ---
|
| 837 |
+
def create_zip_of_files(files, prefix="Archive", query=""): # Simplified args
|
| 838 |
+
if not files:
|
| 839 |
+
st.warning("No files selected to zip.")
|
| 840 |
+
return None
|
| 841 |
+
timestamp = format_timestamp_prefix("Zip") # Generic timestamp
|
| 842 |
+
zip_name = f"{prefix}_{timestamp}.zip"
|
| 843 |
+
try:
|
| 844 |
+
print(f"Creating zip: {zip_name} with {len(files)} files...")
|
| 845 |
+
with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED) as z: # Use compression
|
| 846 |
+
for f in files:
|
| 847 |
+
if os.path.exists(f):
|
| 848 |
+
z.write(f, os.path.basename(f)) # Use basename in archive
|
| 849 |
+
else:
|
| 850 |
+
print(f"Skipping non-existent file for zipping: {f}")
|
| 851 |
+
print("Zip creation successful.")
|
| 852 |
+
st.success(f"Created {zip_name}")
|
| 853 |
+
return zip_name
|
| 854 |
+
except Exception as e:
|
| 855 |
+
print(f"Zip creation failed: {e}")
|
| 856 |
+
st.error(f"Zip creation failed: {e}")
|
| 857 |
+
return None
|
| 858 |
+
|
| 859 |
+
def delete_files(file_patterns, exclude_files=None): # Takes list of patterns
|
| 860 |
+
# Define core protected files
|
| 861 |
+
protected = [STATE_FILE, WORLD_STATE_FILE, "app.py", "index.html", "requirements.txt", "README.md"]
|
| 862 |
+
# Add user-provided exclusions
|
| 863 |
+
if exclude_files:
|
| 864 |
+
protected.extend(exclude_files)
|
| 865 |
+
|
| 866 |
+
deleted_count = 0
|
| 867 |
+
errors = 0
|
| 868 |
+
for pattern in file_patterns:
|
| 869 |
+
# Expand pattern relative to current directory
|
| 870 |
+
pattern_path = os.path.join(MEDIA_DIR, pattern) # Assume MEDIA_DIR is current dir '.'
|
| 871 |
+
print(f"Attempting to delete files matching: {pattern_path}")
|
| 872 |
+
try:
|
| 873 |
+
files_to_delete = glob.glob(pattern_path)
|
| 874 |
+
if not files_to_delete:
|
| 875 |
+
print(f"No files found for pattern: {pattern}")
|
| 876 |
+
continue
|
| 877 |
+
|
| 878 |
+
for f_path in files_to_delete:
|
| 879 |
+
basename = os.path.basename(f_path)
|
| 880 |
+
if basename not in protected and os.path.isfile(f_path): # Ensure it's a file and not protected
|
| 881 |
+
try:
|
| 882 |
+
os.remove(f_path)
|
| 883 |
+
print(f"Deleted: {f_path}")
|
| 884 |
+
deleted_count += 1
|
| 885 |
+
except Exception as e:
|
| 886 |
+
print(f"Failed delete {f_path}: {e}")
|
| 887 |
+
errors += 1
|
| 888 |
+
elif os.path.isdir(f_path):
|
| 889 |
+
print(f"Skipping directory: {f_path}")
|
| 890 |
+
#else:
|
| 891 |
+
# print(f"Skipping protected/non-file: {f_path}")
|
| 892 |
+
except Exception as glob_e:
|
| 893 |
+
print(f"Error matching pattern {pattern}: {glob_e}")
|
| 894 |
+
errors += 1
|
| 895 |
+
|
| 896 |
+
msg = f"Deleted {deleted_count} files."
|
| 897 |
+
if errors > 0:
|
| 898 |
+
msg += f" Encountered {errors} errors."
|
| 899 |
+
st.warning(msg)
|
| 900 |
+
else:
|
| 901 |
+
st.success(msg)
|
| 902 |
+
|
| 903 |
+
# Clear relevant caches
|
| 904 |
+
st.session_state['download_link_cache'] = {}
|
| 905 |
+
st.session_state['audio_cache'] = {} # Clear audio cache if MP3s deleted
|
| 906 |
+
|
| 907 |
+
|
| 908 |
+
# --- Custom Paste Component (Keep from New App) ---
|
| 909 |
+
def paste_image_component(): # Returns Image object, type string
|
| 910 |
+
# If PIL.Image not imported, this will fail. Ensure it is.
|
| 911 |
+
pasted_img = None
|
| 912 |
+
img_type = None
|
| 913 |
+
with st.form(key="paste_form"):
|
| 914 |
+
paste_input = st.text_area("Paste Image Data Here (Ctrl+V)", key="paste_input_area", height=50)
|
| 915 |
+
submit_button = st.form_submit_button("Paste Image 📋")
|
| 916 |
+
if submit_button and paste_input and paste_input.startswith('data:image'):
|
| 917 |
+
try:
|
| 918 |
+
mime_type = paste_input.split(';')[0].split(':')[1]
|
| 919 |
+
base64_str = paste_input.split(',')[1]
|
| 920 |
+
img_bytes = base64.b64decode(base64_str)
|
| 921 |
+
pasted_img = Image.open(io.BytesIO(img_bytes))
|
| 922 |
+
img_type = mime_type.split('/')[1] # e.g., png, jpeg
|
| 923 |
+
# Show preview immediately
|
| 924 |
+
st.image(pasted_img, caption=f"Pasted Image ({img_type.upper()})", width=150)
|
| 925 |
+
# Store base64 temporarily to avoid reprocessing on rerun if only text changed
|
| 926 |
+
st.session_state.paste_image_base64 = base64_str
|
| 927 |
+
except ImportError:
|
| 928 |
+
st.error("Pillow library not installed. Cannot process pasted images.")
|
| 929 |
+
except Exception as e:
|
| 930 |
+
st.error(f"Image decode error: {e}")
|
| 931 |
+
st.session_state.paste_image_base64 = "" # Clear on error
|
| 932 |
+
elif submit_button:
|
| 933 |
+
st.warning("No valid image data pasted.")
|
| 934 |
+
st.session_state.paste_image_base64 = "" # Clear if invalid submit
|
| 935 |
+
|
| 936 |
+
return pasted_img, img_type
|
| 937 |
+
|
| 938 |
+
|
| 939 |
+
# --- Mapping Emojis to Primitive Types ---
|
| 940 |
+
# Ensure these types match the createPrimitiveMesh function keys in index.html
|
| 941 |
+
PRIMITIVE_MAP = {
|
| 942 |
+
"🌳": "Tree", "🗿": "Rock", "🏛️": "Simple House", "🌲": "Pine Tree", "🧱": "Brick Wall",
|
| 943 |
+
"🔵": "Sphere", "📦": "Cube", "🧴": "Cylinder", "🍦": "Cone", "🍩": "Torus", # cylinder emoji changed
|
| 944 |
+
"🍄": "Mushroom", "🌵": "Cactus", "🔥": "Campfire", "⭐": "Star", "💎": "Gem",
|
| 945 |
+
"🗼": "Tower", "🚧": "Barrier", "⛲": "Fountain", "🏮": "Lantern", "팻": "Sign Post" # sign post emoji changed
|
| 946 |
+
# Add more pairs up to ~20
|
| 947 |
+
}
|
| 948 |
+
|
| 949 |
+
# --- Main Streamlit Interface ---
|
| 950 |
+
def main_interface():
|
| 951 |
+
# init_session_state() # Called before main_interface
|
| 952 |
+
|
| 953 |
+
# --- Load initial world state ONCE per session ---
|
| 954 |
+
if not st.session_state.get('initial_world_state_loaded', False):
|
| 955 |
+
with st.spinner("Loading initial world state..."):
|
| 956 |
+
load_world_state_from_disk()
|
| 957 |
+
st.session_state.initial_world_state_loaded = True
|
| 958 |
+
|
| 959 |
+
# --- Username Setup ---
|
| 960 |
+
saved_username = load_username()
|
| 961 |
+
# Check if saved username is valid, otherwise pick random
|
| 962 |
+
if saved_username and saved_username in FUN_USERNAMES:
|
| 963 |
+
st.session_state.username = saved_username
|
| 964 |
+
st.session_state.tts_voice = FUN_USERNAMES[saved_username] # Set voice too
|
| 965 |
+
if not st.session_state.username:
|
| 966 |
+
# Pick a random available name if possible
|
| 967 |
+
# This check might be complex if server restarts often, rely on WS join/leave?
|
| 968 |
+
# For simplicity, just pick random if none saved/valid
|
| 969 |
+
st.session_state.username = random.choice(list(FUN_USERNAMES.keys()))
|
| 970 |
+
st.session_state.tts_voice = FUN_USERNAMES[st.session_state.username]
|
| 971 |
+
save_username(st.session_state.username)
|
| 972 |
+
# Announce join happens via WebSocket handler when client connects
|
| 973 |
+
|
| 974 |
+
st.title(f"{Site_Name} - User: {st.session_state.username}")
|
| 975 |
+
|
| 976 |
+
# --- Main Content Area ---
|
| 977 |
+
tab_world, tab_chat, tab_files = st.tabs(["🏗️ World Builder", "🗣️ Chat", "📂 Files & Settings"])
|
| 978 |
+
|
| 979 |
+
with tab_world:
|
| 980 |
+
st.header("Shared 3D World")
|
| 981 |
+
st.caption("Place objects using the sidebar tools. Changes are shared live!")
|
| 982 |
+
|
| 983 |
+
# --- Embed HTML Component for Three.js ---
|
| 984 |
+
html_file_path = 'index.html'
|
| 985 |
try:
|
| 986 |
+
with open(html_file_path, 'r', encoding='utf-8') as f:
|
| 987 |
+
html_template = f.read()
|
| 988 |
+
|
| 989 |
+
# Determine WebSocket URL based on Streamlit server address if possible
|
| 990 |
+
# Fallback to localhost for local dev
|
| 991 |
+
# This part is tricky and might need manual configuration depending on deployment
|
| 992 |
+
try:
|
| 993 |
+
# Attempt to get server address (might not work reliably in all deployments)
|
| 994 |
+
from streamlit.web.server.server import Server
|
| 995 |
+
session_info = Server.get_current().get_session_info(st.runtime.scriptrunner.get_script_run_ctx().session_id)
|
| 996 |
+
server_host = session_info.ws.stream.request.host.split(':')[0] # Get host without port
|
| 997 |
+
ws_url = f"ws://{server_host}:8765"
|
| 998 |
+
print(f"Determined WS URL: {ws_url}")
|
| 999 |
+
except Exception as e:
|
| 1000 |
+
print(f"Could not determine server host ({e}), defaulting WS URL to localhost.")
|
| 1001 |
+
ws_url = "ws://localhost:8765"
|
| 1002 |
+
|
| 1003 |
+
|
| 1004 |
+
js_injection_script = f"""
|
| 1005 |
+
<script>
|
| 1006 |
+
window.USERNAME = {json.dumps(st.session_state.username)};
|
| 1007 |
+
window.WEBSOCKET_URL = {json.dumps(ws_url)};
|
| 1008 |
+
window.SELECTED_OBJECT_TYPE = {json.dumps(st.session_state.selected_object)}; // Send current tool
|
| 1009 |
+
window.PLOT_WIDTH = {json.dumps(PLOT_WIDTH)}; // Send constants needed by JS
|
| 1010 |
+
window.PLOT_DEPTH = {json.dumps(PLOT_DEPTH)};
|
| 1011 |
+
|
| 1012 |
+
console.log("Streamlit State Injected:", {{
|
| 1013 |
+
username: window.USERNAME,
|
| 1014 |
+
websocketUrl: window.WEBSOCKET_URL,
|
| 1015 |
+
selectedObject: window.SELECTED_OBJECT_TYPE
|
| 1016 |
+
}});
|
| 1017 |
+
</script>
|
| 1018 |
+
"""
|
| 1019 |
+
html_content_with_state = html_template.replace('</head>', js_injection_script + '\n</head>', 1)
|
| 1020 |
+
|
| 1021 |
+
components.html(html_content_with_state, height=700, scrolling=False)
|
| 1022 |
+
|
| 1023 |
+
except FileNotFoundError:
|
| 1024 |
+
st.error(f"CRITICAL ERROR: Could not find '{html_file_path}'. Ensure it's in the same directory.")
|
| 1025 |
+
except Exception as e:
|
| 1026 |
+
st.error(f"Error loading 3D component: {e}")
|
| 1027 |
+
st.exception(e) # Show traceback
|
| 1028 |
+
|
| 1029 |
+
with tab_chat:
|
| 1030 |
+
st.header(f"{START_ROOM} Chat")
|
| 1031 |
+
chat_history = asyncio.run(load_chat_history()) # Load history at start of tab render
|
| 1032 |
+
chat_container = st.container(height=500) # Scrollable chat area
|
| 1033 |
+
with chat_container:
|
| 1034 |
+
# Display chat history (most recent at bottom)
|
| 1035 |
+
st.markdown("----\n".join(reversed(chat_history[-50:]))) # Show last 50, use markdown, reversed
|
| 1036 |
+
|
| 1037 |
+
# Chat Input Area
|
| 1038 |
+
message_value = st.text_input(
|
| 1039 |
+
"Your Message:",
|
| 1040 |
+
key="message_input", # Key links to st.session_state.message_input
|
| 1041 |
+
label_visibility="collapsed"
|
| 1042 |
+
)
|
| 1043 |
+
send_button_clicked = st.button("Send Chat 💬", key="send_chat_button")
|
| 1044 |
+
should_autosend = st.session_state.get('autosend', False) and message_value # Check flag and value
|
| 1045 |
+
|
| 1046 |
+
# Process if button clicked OR autosend triggered with a valid message
|
| 1047 |
+
if send_button_clicked or should_autosend:
|
| 1048 |
+
message_to_send = message_value # Capture the value from this run
|
| 1049 |
+
|
| 1050 |
+
if message_to_send.strip() and message_to_send != st.session_state.get('last_message', ''):
|
| 1051 |
+
# Update last message tracker *before* sending/clearing
|
| 1052 |
+
st.session_state.last_message = message_to_send
|
| 1053 |
+
voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural")
|
| 1054 |
+
|
| 1055 |
+
# Send via WebSocket
|
| 1056 |
+
ws_message = json.dumps({
|
| 1057 |
+
"type": "chat_message",
|
| 1058 |
+
"payload": {"username": st.session_state.username, "message": message_to_send, "voice": voice}
|
| 1059 |
+
})
|
| 1060 |
+
# Use asyncio.run correctly for async functions called from sync context
|
| 1061 |
+
try:
|
| 1062 |
+
# Ensure loop is available - get current or run in new one if needed
|
| 1063 |
+
loop = asyncio.get_running_loop()
|
| 1064 |
+
loop.create_task(broadcast_message(ws_message)) # Schedule broadcast
|
| 1065 |
+
except RuntimeError: # No running loop
|
| 1066 |
+
asyncio.run(broadcast_message(ws_message)) # Run in new loop (less efficient)
|
| 1067 |
+
except Exception as e:
|
| 1068 |
+
st.error(f"WebSocket broadcast error: {e}")
|
| 1069 |
+
|
| 1070 |
+
|
| 1071 |
+
# Save locally (run in background task to avoid blocking UI much)
|
| 1072 |
+
try:
|
| 1073 |
+
loop = asyncio.get_running_loop()
|
| 1074 |
+
loop.create_task(save_chat_entry(st.session_state.username, message_to_send, voice))
|
| 1075 |
+
except RuntimeError:
|
| 1076 |
+
asyncio.run(save_chat_entry(st.session_state.username, message_to_send, voice))
|
| 1077 |
+
except Exception as e:
|
| 1078 |
+
st.error(f"Chat save error: {e}")
|
| 1079 |
+
|
| 1080 |
+
|
| 1081 |
+
# --- CORRECT WAY TO CLEAR ---
|
| 1082 |
+
st.session_state.message_input = ""
|
| 1083 |
+
|
| 1084 |
+
# Rerun to clear the input field visually and update the chat display
|
| 1085 |
+
# Short delay might help ensure background tasks started? Unlikely needed.
|
| 1086 |
+
# time.sleep(0.05)
|
| 1087 |
+
st.rerun()
|
| 1088 |
+
|
| 1089 |
+
# Handle cases where button was clicked but message was empty/repeated
|
| 1090 |
+
elif send_button_clicked and (not message_to_send.strip() or message_to_send == st.session_state.get('last_message', '')):
|
| 1091 |
+
st.toast("Message empty or same as last.") # Give feedback
|
| 1092 |
+
|
| 1093 |
+
with tab_files:
|
| 1094 |
+
st.header("File Management & Settings")
|
| 1095 |
+
|
| 1096 |
+
st.subheader("Server & World State")
|
| 1097 |
+
col_ws, col_save = st.columns(2)
|
| 1098 |
+
with col_ws:
|
| 1099 |
+
# Check thread status if task exists
|
| 1100 |
+
server_alive = st.session_state.get('server_task') and st.session_state.server_task.is_alive()
|
| 1101 |
+
ws_status = "Running" if server_alive else "Stopped"
|
| 1102 |
+
st.metric("WebSocket Server", ws_status)
|
| 1103 |
+
st.metric("Connected Clients", len(connected_clients)) # Use global set length
|
| 1104 |
+
if not server_alive and st.button("Restart Server Thread", key="restart_ws"):
|
| 1105 |
+
start_websocket_server_thread()
|
| 1106 |
+
st.rerun()
|
| 1107 |
+
|
| 1108 |
+
with col_save:
|
| 1109 |
+
if st.button("💾 Save World State to Disk", key="save_world_disk", help="Saves the current live world state to world_state.json"):
|
| 1110 |
+
with st.spinner("Saving..."):
|
| 1111 |
+
if save_world_state_to_disk():
|
| 1112 |
+
st.success("World state saved!")
|
| 1113 |
+
else:
|
| 1114 |
+
st.error("Failed to save world state.")
|
| 1115 |
+
st.markdown(get_download_link(WORLD_STATE_FILE, "json"), unsafe_allow_html=True)
|
| 1116 |
+
|
| 1117 |
+
|
| 1118 |
+
# File deletion buttons
|
| 1119 |
+
st.subheader("Delete Files")
|
| 1120 |
+
st.warning("Deletion is permanent!", icon="⚠️")
|
| 1121 |
+
col_del1, col_del2, col_del3, col_del4 = st.columns(4)
|
| 1122 |
+
with col_del1:
|
| 1123 |
+
if st.button("🗑️ Chats (.md)", key="del_chat_md"):
|
| 1124 |
+
delete_files([os.path.join(CHAT_DIR, "*.md")])
|
| 1125 |
+
st.session_state.chat_history = [] # Clear session history too
|
| 1126 |
+
st.rerun()
|
| 1127 |
+
with col_del2:
|
| 1128 |
+
if st.button("🗑️ Audio (.mp3)", key="del_audio_mp3"):
|
| 1129 |
+
delete_files([os.path.join(AUDIO_DIR, "*.mp3"), os.path.join(AUDIO_CACHE_DIR, "*.mp3")])
|
| 1130 |
+
st.session_state.audio_cache = {}
|
| 1131 |
+
st.rerun()
|
| 1132 |
+
with col_del3:
|
| 1133 |
+
if st.button("🗑️ Zips (.zip)", key="del_zips"):
|
| 1134 |
+
delete_files(["*.zip"])
|
| 1135 |
+
st.rerun()
|
| 1136 |
+
with col_del4:
|
| 1137 |
+
if st.button("🗑️ All Generated", key="del_all_gen", help="Deletes Chats, Audio, Zips"):
|
| 1138 |
+
delete_files([os.path.join(CHAT_DIR, "*.md"),
|
| 1139 |
+
os.path.join(AUDIO_DIR, "*.mp3"),
|
| 1140 |
+
os.path.join(AUDIO_CACHE_DIR, "*.mp3"),
|
| 1141 |
+
"*.zip"])
|
| 1142 |
+
st.session_state.chat_history = []
|
| 1143 |
+
st.session_state.audio_cache = {}
|
| 1144 |
+
st.rerun()
|
| 1145 |
+
|
| 1146 |
+
# Display Zips
|
| 1147 |
+
st.subheader("📦 Download Archives")
|
| 1148 |
+
zip_files = sorted(glob.glob("*.zip"), key=os.path.getmtime, reverse=True)
|
| 1149 |
+
for zip_file in zip_files:
|
| 1150 |
+
st.markdown(get_download_link(zip_file, "zip"), unsafe_allow_html=True)
|
| 1151 |
+
|
| 1152 |
+
|
| 1153 |
+
# --- Sidebar Controls ---
|
| 1154 |
+
with st.sidebar:
|
| 1155 |
+
st.header("🏗️ Build Tools")
|
| 1156 |
+
st.caption("Select an object to place.")
|
| 1157 |
+
|
| 1158 |
+
# --- Emoji Buttons for Primitives ---
|
| 1159 |
+
cols = st.columns(5) # 5 columns for buttons
|
| 1160 |
+
col_idx = 0
|
| 1161 |
+
current_tool = st.session_state.get('selected_object', 'None')
|
| 1162 |
+
|
| 1163 |
+
for emoji, name in PRIMITIVE_MAP.items():
|
| 1164 |
+
button_key = f"primitive_{name}"
|
| 1165 |
+
# Use primary styling for the selected button
|
| 1166 |
+
button_type = "primary" if current_tool == name else "secondary"
|
| 1167 |
+
if cols[col_idx % 5].button(emoji, key=button_key, help=name, type=button_type, use_container_width=True):
|
| 1168 |
+
st.session_state.selected_object = name
|
| 1169 |
+
# Update JS immediately without full rerun if possible
|
| 1170 |
+
try:
|
| 1171 |
+
js_update_selection = f"updateSelectedObjectType({json.dumps(name)});"
|
| 1172 |
+
streamlit_js_eval(js_code=js_update_selection, key=f"update_tool_js_{name}") # Unique key per button might help
|
| 1173 |
+
except Exception as e:
|
| 1174 |
+
print(f"Could not push tool update to JS: {e}")
|
| 1175 |
+
# Force a rerun to update button styles immediately if JS update fails or isn't enough
|
| 1176 |
+
st.rerun()
|
| 1177 |
+
col_idx += 1
|
| 1178 |
+
|
| 1179 |
+
# Button to clear selection
|
| 1180 |
+
st.markdown("---") # Separator
|
| 1181 |
+
if st.button("🚫 Clear Tool", key="clear_tool", use_container_width=True):
|
| 1182 |
+
if st.session_state.selected_object != 'None':
|
| 1183 |
+
st.session_state.selected_object = 'None'
|
| 1184 |
+
try: # Update JS too
|
| 1185 |
+
streamlit_js_eval(js_code=f"updateSelectedObjectType('None');", key="update_tool_js_none")
|
| 1186 |
+
except Exception: pass
|
| 1187 |
+
st.rerun() # Rerun to update UI
|
| 1188 |
+
|
| 1189 |
+
st.markdown("---")
|
| 1190 |
+
st.header("🗣️ Voice & User")
|
| 1191 |
+
# Username/Voice Selection
|
| 1192 |
+
# Use format_func to display only the name part
|
| 1193 |
+
current_username = st.session_state.get('username', list(FUN_USERNAMES.keys())[0])
|
| 1194 |
+
username_options = list(FUN_USERNAMES.keys())
|
| 1195 |
+
try:
|
| 1196 |
+
current_index = username_options.index(current_username)
|
| 1197 |
except ValueError:
|
| 1198 |
+
current_index = 0 # Default to first if saved name invalid
|
| 1199 |
+
|
| 1200 |
+
new_username = st.selectbox(
|
| 1201 |
+
"Change Name/Voice",
|
| 1202 |
+
options=username_options,
|
| 1203 |
+
index=current_index,
|
| 1204 |
+
key="username_select",
|
| 1205 |
+
format_func=lambda x: x.split(" ")[0] # Show only name before emoji
|
| 1206 |
+
)
|
| 1207 |
+
|
| 1208 |
+
if new_username != st.session_state.username:
|
| 1209 |
+
old_username = st.session_state.username
|
| 1210 |
+
# Announce name change via WebSocket
|
| 1211 |
+
change_msg = json.dumps({
|
| 1212 |
+
"type":"user_rename",
|
| 1213 |
+
"payload": {"old_username": old_username, "new_username": new_username}
|
| 1214 |
+
})
|
| 1215 |
+
try:
|
| 1216 |
+
loop = asyncio.get_running_loop()
|
| 1217 |
+
loop.create_task(broadcast_message(change_msg))
|
| 1218 |
+
except RuntimeError: asyncio.run(broadcast_message(change_msg))
|
| 1219 |
+
except Exception as e: st.error(f"Rename broadcast error: {e}")
|
| 1220 |
+
|
| 1221 |
+
st.session_state.username = new_username
|
| 1222 |
+
st.session_state.tts_voice = FUN_USERNAMES[new_username]
|
| 1223 |
+
save_username(st.session_state.username) # Save new username
|
| 1224 |
+
st.rerun()
|
| 1225 |
+
|
| 1226 |
+
# Enable/Disable Audio Toggle
|
| 1227 |
+
st.session_state['enable_audio'] = st.toggle("Enable TTS Audio", value=st.session_state.get('enable_audio', True))
|
| 1228 |
+
|
| 1229 |
+
st.markdown("---")
|
| 1230 |
+
st.info("Chat and File management in main tabs.")
|
| 1231 |
+
|
| 1232 |
+
|
| 1233 |
+
# --- Main Execution ---
|
| 1234 |
+
if __name__ == "__main__":
|
| 1235 |
+
# Initialize session state variables first
|
| 1236 |
+
init_session_state()
|
| 1237 |
+
|
| 1238 |
+
# Start WebSocket server in a thread IF it's not already running
|
| 1239 |
+
# This check needs to be robust across reruns
|
| 1240 |
+
if 'server_task' not in st.session_state or not st.session_state.server_task.is_alive():
|
| 1241 |
+
print("Main thread: Starting WebSocket server thread...")
|
| 1242 |
+
start_websocket_server_thread()
|
| 1243 |
+
# Wait briefly to allow the server thread to initialize.
|
| 1244 |
+
# This might not be strictly necessary but can help avoid race conditions on first load.
|
| 1245 |
+
time.sleep(1.5)
|
| 1246 |
else:
|
| 1247 |
+
print("Main thread: Server thread already exists.")
|
|
|
|
| 1248 |
|
|
|
|
| 1249 |
|
| 1250 |
+
# Load world state from disk if not already loaded this session
|
| 1251 |
+
if not st.session_state.get('initial_world_state_loaded', False):
|
| 1252 |
+
load_world_state_from_disk()
|
| 1253 |
+
st.session_state.initial_world_state_loaded = True
|
|
|
|
| 1254 |
|
| 1255 |
+
# Run the main UI rendering function
|
| 1256 |
+
main_interface()
|
| 1257 |
+
|
| 1258 |
+
# Optional: Add a periodic save task?
|
| 1259 |
+
# Example: Save every 5 minutes if needed
|
| 1260 |
+
# last_save_time = st.session_state.get('last_world_save_time', 0)
|
| 1261 |
+
# if time.time() - last_save_time > 300: # 300 seconds = 5 minutes
|
| 1262 |
+
# print("Periodic save triggered...")
|
| 1263 |
+
# if save_world_state_to_disk():
|
| 1264 |
+
# st.session_state.last_world_save_time = time.time()
|
|
|
|
|
|
|
|
|
|
|
|