Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -650,6 +650,8 @@ HTML_CONTENT = """<!DOCTYPE html>
|
|
650 |
userMemories = {};
|
651 |
memoryList.innerHTML = '';
|
652 |
|
|
|
|
|
653 |
memories.forEach(memory => {
|
654 |
if (!userMemories[memory.category]) {
|
655 |
userMemories[memory.category] = [];
|
@@ -665,7 +667,10 @@ HTML_CONTENT = """<!DOCTYPE html>
|
|
665 |
memoryList.appendChild(item);
|
666 |
});
|
667 |
|
668 |
-
console.log('
|
|
|
|
|
|
|
669 |
} catch (error) {
|
670 |
console.error('Failed to load memories:', error);
|
671 |
}
|
@@ -911,6 +916,12 @@ HTML_CONTENT = """<!DOCTYPE html>
|
|
911 |
}
|
912 |
|
913 |
async function setupWebRTC() {
|
|
|
|
|
|
|
|
|
|
|
|
|
914 |
const config = __RTC_CONFIGURATION__;
|
915 |
peerConnection = new RTCPeerConnection(config);
|
916 |
const timeoutId = setTimeout(() => {
|
@@ -972,6 +983,9 @@ HTML_CONTENT = """<!DOCTYPE html>
|
|
972 |
|
973 |
webrtc_id = Math.random().toString(36).substring(7);
|
974 |
|
|
|
|
|
|
|
975 |
const response = await fetch('/webrtc/offer', {
|
976 |
method: 'POST',
|
977 |
headers: { 'Content-Type': 'application/json' },
|
@@ -1123,9 +1137,16 @@ HTML_CONTENT = """<!DOCTYPE html>
|
|
1123 |
}
|
1124 |
}
|
1125 |
|
1126 |
-
startButton.addEventListener('click', () => {
|
1127 |
console.log('clicked');
|
1128 |
console.log(peerConnection, peerConnection?.connectionState);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1129 |
if (!peerConnection || peerConnection.connectionState !== 'connected') {
|
1130 |
setupWebRTC();
|
1131 |
} else {
|
@@ -1488,22 +1509,27 @@ def update_chatbot(chatbot: list[dict], response: ResponseAudioTranscriptDoneEve
|
|
1488 |
chatbot.append({"role": "assistant", "content": response.transcript})
|
1489 |
return chatbot
|
1490 |
|
1491 |
-
|
1492 |
def format_memories_for_prompt(memories: Dict[str, List[str]]) -> str:
|
1493 |
"""Format memories for inclusion in system prompt"""
|
1494 |
if not memories:
|
1495 |
return ""
|
1496 |
|
1497 |
memory_text = "\n\n=== 기억된 정보 ===\n"
|
|
|
|
|
1498 |
for category, items in memories.items():
|
1499 |
if items and isinstance(items, list):
|
1500 |
-
|
1501 |
-
|
1502 |
-
|
|
|
1503 |
memory_text += f"- {item}\n"
|
|
|
1504 |
|
1505 |
-
|
|
|
1506 |
|
|
|
1507 |
|
1508 |
async def process_text_chat(message: str, web_search_enabled: bool, session_id: str,
|
1509 |
user_name: str = "", memories: Dict = None) -> Dict[str, str]:
|
@@ -1610,12 +1636,20 @@ class OpenAIHandler(AsyncStreamHandler):
|
|
1610 |
self.session_id = session_id
|
1611 |
self.user_name = user_name
|
1612 |
self.memories = memories or {}
|
1613 |
-
self.is_responding = False
|
1614 |
-
self.should_stop = False
|
1615 |
|
1616 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1617 |
|
1618 |
def copy(self):
|
|
|
1619 |
if connection_settings:
|
1620 |
recent_ids = sorted(connection_settings.keys(),
|
1621 |
key=lambda k: connection_settings[k].get('timestamp', 0),
|
@@ -1625,17 +1659,64 @@ class OpenAIHandler(AsyncStreamHandler):
|
|
1625 |
settings = connection_settings[recent_id]
|
1626 |
|
1627 |
print(f"[COPY] Copying settings from {recent_id}:")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1628 |
|
1629 |
return OpenAIHandler(
|
1630 |
web_search_enabled=settings.get('web_search_enabled', False),
|
1631 |
webrtc_id=recent_id,
|
1632 |
session_id=settings.get('session_id'),
|
1633 |
user_name=settings.get('user_name', ''),
|
1634 |
-
memories=
|
1635 |
)
|
1636 |
|
1637 |
print(f"[COPY] No settings found, creating default handler")
|
1638 |
return OpenAIHandler(web_search_enabled=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1639 |
|
1640 |
async def search_web(self, query: str) -> str:
|
1641 |
"""Perform web search and return formatted results"""
|
@@ -1681,6 +1762,23 @@ class OpenAIHandler(AsyncStreamHandler):
|
|
1681 |
|
1682 |
print(f"[START_UP] Updated settings from storage for {self.webrtc_id}")
|
1683 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1684 |
self.client = openai.AsyncOpenAI()
|
1685 |
|
1686 |
print(f"[REALTIME API] Connecting...")
|
@@ -1696,6 +1794,9 @@ IMPORTANT: Give only ONE response per user input. Do not repeat yourself or give
|
|
1696 |
if self.memories:
|
1697 |
memory_text = format_memories_for_prompt(self.memories)
|
1698 |
base_instructions += memory_text
|
|
|
|
|
|
|
1699 |
|
1700 |
# Define the web search function
|
1701 |
tools = []
|
@@ -1941,15 +2042,51 @@ IMPORTANT: Give only ONE response per user input. Do not repeat yourself or give
|
|
1941 |
print("[REALTIME API] Connection closed")
|
1942 |
|
1943 |
|
1944 |
-
#
|
1945 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1946 |
|
1947 |
# Create components
|
1948 |
chatbot = gr.Chatbot(type="messages")
|
1949 |
|
1950 |
-
# Create stream with
|
1951 |
stream = Stream(
|
1952 |
-
handler
|
1953 |
mode="send-receive",
|
1954 |
modality="audio",
|
1955 |
additional_inputs=[chatbot],
|
@@ -1960,6 +2097,8 @@ stream = Stream(
|
|
1960 |
time_limit=300 if get_space() else None,
|
1961 |
)
|
1962 |
|
|
|
|
|
1963 |
app = FastAPI()
|
1964 |
|
1965 |
# Mount stream
|
@@ -2005,6 +2144,23 @@ async def custom_offer(request: Request):
|
|
2005 |
print(f"[OFFER] web_search_enabled: {web_search_enabled}")
|
2006 |
print(f"[OFFER] session_id: {session_id}")
|
2007 |
print(f"[OFFER] user_name: {user_name}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2008 |
|
2009 |
# Store settings with timestamp
|
2010 |
if webrtc_id:
|
@@ -2012,11 +2168,11 @@ async def custom_offer(request: Request):
|
|
2012 |
'web_search_enabled': web_search_enabled,
|
2013 |
'session_id': session_id,
|
2014 |
'user_name': user_name,
|
2015 |
-
'memories': memories,
|
2016 |
'timestamp': asyncio.get_event_loop().time()
|
2017 |
}
|
2018 |
|
2019 |
-
print(f"[OFFER] Stored settings for {webrtc_id}")
|
2020 |
|
2021 |
# Remove our custom route temporarily
|
2022 |
custom_route = None
|
|
|
650 |
userMemories = {};
|
651 |
memoryList.innerHTML = '';
|
652 |
|
653 |
+
console.log('[LoadMemories] Loaded memories from DB:', memories);
|
654 |
+
|
655 |
memories.forEach(memory => {
|
656 |
if (!userMemories[memory.category]) {
|
657 |
userMemories[memory.category] = [];
|
|
|
667 |
memoryList.appendChild(item);
|
668 |
});
|
669 |
|
670 |
+
console.log('[LoadMemories] Formatted memories:', userMemories);
|
671 |
+
console.log('[LoadMemories] Total categories:', Object.keys(userMemories).length);
|
672 |
+
console.log('[LoadMemories] Total items:', Object.values(userMemories).flat().length);
|
673 |
+
|
674 |
} catch (error) {
|
675 |
console.error('Failed to load memories:', error);
|
676 |
}
|
|
|
916 |
}
|
917 |
|
918 |
async function setupWebRTC() {
|
919 |
+
// 메모리가 로드되지 않았다면 먼저 로드
|
920 |
+
if (Object.keys(userMemories).length === 0) {
|
921 |
+
console.log('[WebRTC] No memories loaded, loading now...');
|
922 |
+
await loadMemories();
|
923 |
+
}
|
924 |
+
|
925 |
const config = __RTC_CONFIGURATION__;
|
926 |
peerConnection = new RTCPeerConnection(config);
|
927 |
const timeoutId = setTimeout(() => {
|
|
|
983 |
|
984 |
webrtc_id = Math.random().toString(36).substring(7);
|
985 |
|
986 |
+
console.log('[WebRTC] Sending offer with memories:', userMemories);
|
987 |
+
console.log('[WebRTC] Total memory items:', Object.values(userMemories).flat().length);
|
988 |
+
|
989 |
const response = await fetch('/webrtc/offer', {
|
990 |
method: 'POST',
|
991 |
headers: { 'Content-Type': 'application/json' },
|
|
|
1137 |
}
|
1138 |
}
|
1139 |
|
1140 |
+
startButton.addEventListener('click', async () => {
|
1141 |
console.log('clicked');
|
1142 |
console.log(peerConnection, peerConnection?.connectionState);
|
1143 |
+
|
1144 |
+
// 메모리가 로드되지 않았다면 먼저 로드
|
1145 |
+
if (Object.keys(userMemories).length === 0) {
|
1146 |
+
console.log('[StartButton] Loading memories before starting...');
|
1147 |
+
await loadMemories();
|
1148 |
+
}
|
1149 |
+
|
1150 |
if (!peerConnection || peerConnection.connectionState !== 'connected') {
|
1151 |
setupWebRTC();
|
1152 |
} else {
|
|
|
1509 |
chatbot.append({"role": "assistant", "content": response.transcript})
|
1510 |
return chatbot
|
1511 |
|
|
|
1512 |
def format_memories_for_prompt(memories: Dict[str, List[str]]) -> str:
|
1513 |
"""Format memories for inclusion in system prompt"""
|
1514 |
if not memories:
|
1515 |
return ""
|
1516 |
|
1517 |
memory_text = "\n\n=== 기억된 정보 ===\n"
|
1518 |
+
memory_count = 0
|
1519 |
+
|
1520 |
for category, items in memories.items():
|
1521 |
if items and isinstance(items, list):
|
1522 |
+
valid_items = [item for item in items if item] # None이나 빈 문자열 제외
|
1523 |
+
if valid_items:
|
1524 |
+
memory_text += f"\n[{category}]\n"
|
1525 |
+
for item in valid_items:
|
1526 |
memory_text += f"- {item}\n"
|
1527 |
+
memory_count += 1
|
1528 |
|
1529 |
+
print(f"[FORMAT_MEMORIES] Formatted {memory_count} memory items")
|
1530 |
+
return memory_text if memory_count > 0 else ""
|
1531 |
|
1532 |
+
|
1533 |
|
1534 |
async def process_text_chat(message: str, web_search_enabled: bool, session_id: str,
|
1535 |
user_name: str = "", memories: Dict = None) -> Dict[str, str]:
|
|
|
1636 |
self.session_id = session_id
|
1637 |
self.user_name = user_name
|
1638 |
self.memories = memories or {}
|
1639 |
+
self.is_responding = False
|
1640 |
+
self.should_stop = False
|
1641 |
|
1642 |
+
# 메모리 정보 로깅
|
1643 |
+
memory_count = sum(len(items) for items in self.memories.values() if isinstance(items, list))
|
1644 |
+
print(f"[INIT] Handler created with:")
|
1645 |
+
print(f" - web_search={web_search_enabled}")
|
1646 |
+
print(f" - session_id={session_id}")
|
1647 |
+
print(f" - user={user_name}")
|
1648 |
+
print(f" - memory categories={list(self.memories.keys())}")
|
1649 |
+
print(f" - total memory items={memory_count}")
|
1650 |
|
1651 |
def copy(self):
|
1652 |
+
# 가장 최근의 connection settings 가져오기
|
1653 |
if connection_settings:
|
1654 |
recent_ids = sorted(connection_settings.keys(),
|
1655 |
key=lambda k: connection_settings[k].get('timestamp', 0),
|
|
|
1659 |
settings = connection_settings[recent_id]
|
1660 |
|
1661 |
print(f"[COPY] Copying settings from {recent_id}:")
|
1662 |
+
print(f"[COPY] - web_search: {settings.get('web_search_enabled', False)}")
|
1663 |
+
print(f"[COPY] - session_id: {settings.get('session_id')}")
|
1664 |
+
print(f"[COPY] - user_name: {settings.get('user_name', '')}")
|
1665 |
+
|
1666 |
+
memories = settings.get('memories', {})
|
1667 |
+
|
1668 |
+
# 메모리가 없으면 DB에서 직접 로드 (동기적으로)
|
1669 |
+
if not memories:
|
1670 |
+
print(f"[COPY] No memories in settings, loading from DB...")
|
1671 |
+
import asyncio
|
1672 |
+
try:
|
1673 |
+
# 현재 이벤트 루프가 있는지 확인
|
1674 |
+
loop = asyncio.get_event_loop()
|
1675 |
+
if loop.is_running():
|
1676 |
+
# 이미 실행 중인 루프가 있으면 run_in_executor 사용
|
1677 |
+
import concurrent.futures
|
1678 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
1679 |
+
future = executor.submit(self._load_memories_sync)
|
1680 |
+
memories_list = future.result()
|
1681 |
+
else:
|
1682 |
+
# 새 루프에서 실행
|
1683 |
+
memories_list = loop.run_until_complete(PersonalAssistantDB.get_all_memories())
|
1684 |
+
except:
|
1685 |
+
# 새 루프 생성
|
1686 |
+
new_loop = asyncio.new_event_loop()
|
1687 |
+
asyncio.set_event_loop(new_loop)
|
1688 |
+
memories_list = new_loop.run_until_complete(PersonalAssistantDB.get_all_memories())
|
1689 |
+
new_loop.close()
|
1690 |
+
|
1691 |
+
# 메모리를 카테고리별로 그룹화
|
1692 |
+
for memory in memories_list:
|
1693 |
+
category = memory['category']
|
1694 |
+
if category not in memories:
|
1695 |
+
memories[category] = []
|
1696 |
+
memories[category].append(memory['content'])
|
1697 |
+
|
1698 |
+
print(f"[COPY] Loaded {len(memories_list)} memories from DB")
|
1699 |
+
|
1700 |
+
print(f"[COPY] - memories count: {sum(len(items) for items in memories.values() if isinstance(items, list))}")
|
1701 |
|
1702 |
return OpenAIHandler(
|
1703 |
web_search_enabled=settings.get('web_search_enabled', False),
|
1704 |
webrtc_id=recent_id,
|
1705 |
session_id=settings.get('session_id'),
|
1706 |
user_name=settings.get('user_name', ''),
|
1707 |
+
memories=memories
|
1708 |
)
|
1709 |
|
1710 |
print(f"[COPY] No settings found, creating default handler")
|
1711 |
return OpenAIHandler(web_search_enabled=False)
|
1712 |
+
|
1713 |
+
def _load_memories_sync(self):
|
1714 |
+
"""동기적으로 메모리 로드 (Thread에서 실행용)"""
|
1715 |
+
loop = asyncio.new_event_loop()
|
1716 |
+
asyncio.set_event_loop(loop)
|
1717 |
+
result = loop.run_until_complete(PersonalAssistantDB.get_all_memories())
|
1718 |
+
loop.close()
|
1719 |
+
return result
|
1720 |
|
1721 |
async def search_web(self, query: str) -> str:
|
1722 |
"""Perform web search and return formatted results"""
|
|
|
1762 |
|
1763 |
print(f"[START_UP] Updated settings from storage for {self.webrtc_id}")
|
1764 |
|
1765 |
+
# 메모리가 비어있고 session_id가 있으면 DB에서 로드
|
1766 |
+
if not self.memories:
|
1767 |
+
print(f"[START_UP] No memories found, loading from DB...")
|
1768 |
+
memories_list = await PersonalAssistantDB.get_all_memories()
|
1769 |
+
|
1770 |
+
# 메모리를 카테고리별로 그룹화
|
1771 |
+
self.memories = {}
|
1772 |
+
for memory in memories_list:
|
1773 |
+
category = memory['category']
|
1774 |
+
if category not in self.memories:
|
1775 |
+
self.memories[category] = []
|
1776 |
+
self.memories[category].append(memory['content'])
|
1777 |
+
|
1778 |
+
print(f"[START_UP] Loaded {len(memories_list)} memories from DB")
|
1779 |
+
|
1780 |
+
print(f"[START_UP] Final memory count: {sum(len(items) for items in self.memories.values() if isinstance(items, list))}")
|
1781 |
+
|
1782 |
self.client = openai.AsyncOpenAI()
|
1783 |
|
1784 |
print(f"[REALTIME API] Connecting...")
|
|
|
1794 |
if self.memories:
|
1795 |
memory_text = format_memories_for_prompt(self.memories)
|
1796 |
base_instructions += memory_text
|
1797 |
+
print(f"[START_UP] Added memories to system prompt: {len(memory_text)} characters")
|
1798 |
+
else:
|
1799 |
+
print(f"[START_UP] No memories to add to system prompt")
|
1800 |
|
1801 |
# Define the web search function
|
1802 |
tools = []
|
|
|
2042 |
print("[REALTIME API] Connection closed")
|
2043 |
|
2044 |
|
2045 |
+
# Factory function to create new handler with latest settings
|
2046 |
+
def create_handler():
|
2047 |
+
"""Factory function to create new handler with latest settings"""
|
2048 |
+
# 가장 최근의 설정 가져오기
|
2049 |
+
if connection_settings:
|
2050 |
+
recent_ids = sorted(connection_settings.keys(),
|
2051 |
+
key=lambda k: connection_settings[k].get('timestamp', 0),
|
2052 |
+
reverse=True)
|
2053 |
+
if recent_ids:
|
2054 |
+
recent_id = recent_ids[0]
|
2055 |
+
settings = connection_settings[recent_id]
|
2056 |
+
|
2057 |
+
memories = settings.get('memories', {})
|
2058 |
+
|
2059 |
+
# 메모리가 없으면 DB에서 직접 로드 (동기적으로)
|
2060 |
+
if not memories:
|
2061 |
+
import asyncio
|
2062 |
+
loop = asyncio.new_event_loop()
|
2063 |
+
asyncio.set_event_loop(loop)
|
2064 |
+
memories_list = loop.run_until_complete(PersonalAssistantDB.get_all_memories())
|
2065 |
+
loop.close()
|
2066 |
+
|
2067 |
+
# 메모리를 카테고리별로 그룹화
|
2068 |
+
for memory in memories_list:
|
2069 |
+
category = memory['category']
|
2070 |
+
if category not in memories:
|
2071 |
+
memories[category] = []
|
2072 |
+
memories[category].append(memory['content'])
|
2073 |
+
|
2074 |
+
return OpenAIHandler(
|
2075 |
+
web_search_enabled=settings.get('web_search_enabled', False),
|
2076 |
+
webrtc_id=recent_id,
|
2077 |
+
session_id=settings.get('session_id'),
|
2078 |
+
user_name=settings.get('user_name', ''),
|
2079 |
+
memories=memories
|
2080 |
+
)
|
2081 |
+
|
2082 |
+
return OpenAIHandler(web_search_enabled=False)
|
2083 |
|
2084 |
# Create components
|
2085 |
chatbot = gr.Chatbot(type="messages")
|
2086 |
|
2087 |
+
# Create stream with factory function
|
2088 |
stream = Stream(
|
2089 |
+
create_handler, # handler 대신 factory function 전달
|
2090 |
mode="send-receive",
|
2091 |
modality="audio",
|
2092 |
additional_inputs=[chatbot],
|
|
|
2097 |
time_limit=300 if get_space() else None,
|
2098 |
)
|
2099 |
|
2100 |
+
|
2101 |
+
|
2102 |
app = FastAPI()
|
2103 |
|
2104 |
# Mount stream
|
|
|
2144 |
print(f"[OFFER] web_search_enabled: {web_search_enabled}")
|
2145 |
print(f"[OFFER] session_id: {session_id}")
|
2146 |
print(f"[OFFER] user_name: {user_name}")
|
2147 |
+
print(f"[OFFER] memories categories: {list(memories.keys())}")
|
2148 |
+
print(f"[OFFER] memories total items: {sum(len(items) for items in memories.values() if isinstance(items, list))}")
|
2149 |
+
|
2150 |
+
# 메모리가 비어있으면 DB에서 로드
|
2151 |
+
if not memories and session_id:
|
2152 |
+
print(f"[OFFER] No memories received, loading from DB...")
|
2153 |
+
memories_list = await PersonalAssistantDB.get_all_memories()
|
2154 |
+
|
2155 |
+
# 메모리를 카테고리별로 그룹화
|
2156 |
+
memories = {}
|
2157 |
+
for memory in memories_list:
|
2158 |
+
category = memory['category']
|
2159 |
+
if category not in memories:
|
2160 |
+
memories[category] = []
|
2161 |
+
memories[category].append(memory['content'])
|
2162 |
+
|
2163 |
+
print(f"[OFFER] Loaded {len(memories_list)} memories from DB")
|
2164 |
|
2165 |
# Store settings with timestamp
|
2166 |
if webrtc_id:
|
|
|
2168 |
'web_search_enabled': web_search_enabled,
|
2169 |
'session_id': session_id,
|
2170 |
'user_name': user_name,
|
2171 |
+
'memories': memories, # DB에서 로드한 메모리 저장
|
2172 |
'timestamp': asyncio.get_event_loop().time()
|
2173 |
}
|
2174 |
|
2175 |
+
print(f"[OFFER] Stored settings for {webrtc_id} with {sum(len(items) for items in memories.values() if isinstance(items, list))} memory items")
|
2176 |
|
2177 |
# Remove our custom route temporarily
|
2178 |
custom_route = None
|