Private-AI / app.py
seawolf2357's picture
Update app.py
cedd312 verified
raw
history blame
71.3 kB
import asyncio
import base64
import json
from pathlib import Path
import os
import numpy as np
import openai
from dotenv import load_dotenv
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse, StreamingResponse
from fastrtc import (
AdditionalOutputs,
AsyncStreamHandler,
Stream,
get_twilio_turn_credentials,
wait_for_item,
)
from gradio.utils import get_space
from openai.types.beta.realtime import ResponseAudioTranscriptDoneEvent
import httpx
from typing import Optional, List, Dict
import gradio as gr
load_dotenv()
SAMPLE_RATE = 24000
# Supported languages for OpenAI Realtime API
SUPPORTED_LANGUAGES = {
"ko": "한국어 (Korean)",
"en": "English",
"es": "Español (Spanish)",
"fr": "Français (French)",
"de": "Deutsch (German)",
"it": "Italiano (Italian)",
"pt": "Português (Portuguese)",
"ru": "Русский (Russian)",
"ja": "日本語 (Japanese)",
"zh": "中文 (Chinese)",
"ar": "العربية (Arabic)",
"hi": "हिन्दी (Hindi)",
"nl": "Nederlands (Dutch)",
"pl": "Polski (Polish)",
"tr": "Türkçe (Turkish)",
"vi": "Tiếng Việt (Vietnamese)",
"th": "ไทย (Thai)",
"id": "Bahasa Indonesia",
"sv": "Svenska (Swedish)",
"da": "Dansk (Danish)",
"no": "Norsk (Norwegian)",
"fi": "Suomi (Finnish)",
"he": "עברית (Hebrew)",
"uk": "Українська (Ukrainian)",
"cs": "Čeština (Czech)",
"el": "Ελληνικά (Greek)",
"ro": "Română (Romanian)",
"hu": "Magyar (Hungarian)",
"ms": "Bahasa Melayu (Malay)"
}
# HTML content embedded as a string
HTML_CONTENT = """<!DOCTYPE html>
<html lang="ko">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Mouth of 'MOUSE'</title>
<style>
:root {
--primary-color: #6f42c1;
--secondary-color: #563d7c;
--dark-bg: #121212;
--card-bg: #1e1e1e;
--text-color: #f8f9fa;
--border-color: #333;
--hover-color: #8a5cf6;
}
body {
font-family: "SF Pro Display", -apple-system, BlinkMacSystemFont, sans-serif;
background-color: var(--dark-bg);
color: var(--text-color);
margin: 0;
padding: 0;
height: 100vh;
display: flex;
flex-direction: column;
overflow: hidden;
}
.container {
max-width: 1400px;
margin: 0 auto;
padding: 20px;
flex-grow: 1;
display: flex;
flex-direction: column;
width: 100%;
height: 100vh;
box-sizing: border-box;
overflow: hidden;
}
.header {
text-align: center;
padding: 15px 0;
border-bottom: 1px solid var(--border-color);
margin-bottom: 20px;
flex-shrink: 0;
background-color: var(--card-bg);
}
.main-content {
display: flex;
gap: 20px;
flex-grow: 1;
min-height: 0;
overflow: hidden;
}
.sidebar {
width: 350px;
flex-shrink: 0;
display: flex;
flex-direction: column;
gap: 20px;
overflow-y: auto;
max-height: calc(100vh - 120px);
}
.chat-section {
flex-grow: 1;
display: flex;
flex-direction: column;
min-width: 0;
}
.logo {
display: flex;
align-items: center;
justify-content: center;
gap: 10px;
}
.logo h1 {
margin: 0;
background: linear-gradient(135deg, var(--primary-color), #a78bfa);
-webkit-background-clip: text;
background-clip: text;
color: transparent;
font-size: 32px;
letter-spacing: 1px;
}
/* Settings section */
.settings-section {
background-color: var(--card-bg);
border-radius: 12px;
padding: 20px;
border: 1px solid var(--border-color);
overflow-y: auto;
flex-grow: 1;
}
.settings-grid {
display: flex;
flex-direction: column;
gap: 15px;
margin-bottom: 15px;
}
.interpretation-section {
display: flex;
flex-direction: column;
gap: 15px;
padding: 15px;
background-color: var(--dark-bg);
border-radius: 8px;
margin-top: 15px;
}
.interpretation-info {
font-size: 13px;
color: #999;
margin-top: 5px;
}
.setting-item {
display: flex;
align-items: center;
justify-content: space-between;
gap: 10px;
}
.setting-label {
font-size: 14px;
color: #aaa;
min-width: 60px;
}
/* Toggle switch */
.toggle-switch {
position: relative;
width: 50px;
height: 26px;
background-color: #ccc;
border-radius: 13px;
cursor: pointer;
transition: background-color 0.3s;
}
.toggle-switch.active {
background-color: var(--primary-color);
}
.toggle-slider {
position: absolute;
top: 3px;
left: 3px;
width: 20px;
height: 20px;
background-color: white;
border-radius: 50%;
transition: transform 0.3s;
}
.toggle-switch.active .toggle-slider {
transform: translateX(24px);
}
/* Select dropdown */
select {
background-color: var(--card-bg);
color: var(--text-color);
border: 1px solid var(--border-color);
padding: 8px 12px;
border-radius: 6px;
font-size: 14px;
cursor: pointer;
min-width: 120px;
max-width: 200px;
}
select:focus {
outline: none;
border-color: var(--primary-color);
}
/* Text inputs */
.text-input-section {
margin-top: 15px;
}
input[type="text"], textarea {
width: 100%;
background-color: var(--dark-bg);
color: var(--text-color);
border: 1px solid var(--border-color);
padding: 10px;
border-radius: 6px;
font-size: 14px;
box-sizing: border-box;
margin-top: 5px;
}
input[type="text"]:focus, textarea:focus {
outline: none;
border-color: var(--primary-color);
}
textarea {
resize: vertical;
min-height: 80px;
}
.chat-container {
border-radius: 12px;
background-color: var(--card-bg);
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.2);
padding: 20px;
flex-grow: 1;
display: flex;
flex-direction: column;
border: 1px solid var(--border-color);
overflow: hidden;
min-height: 0;
height: 100%;
}
.chat-messages {
flex-grow: 1;
overflow-y: auto;
padding: 15px;
scrollbar-width: thin;
scrollbar-color: var(--primary-color) var(--card-bg);
min-height: 0;
max-height: calc(100vh - 250px);
}
.chat-messages::-webkit-scrollbar {
width: 6px;
}
.chat-messages::-webkit-scrollbar-thumb {
background-color: var(--primary-color);
border-radius: 6px;
}
.message {
margin-bottom: 15px;
padding: 12px 16px;
border-radius: 8px;
font-size: 15px;
line-height: 1.5;
position: relative;
max-width: 85%;
animation: fade-in 0.3s ease-out;
word-wrap: break-word;
}
@keyframes fade-in {
from {
opacity: 0;
transform: translateY(10px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
.message.user {
background: linear-gradient(135deg, #2c3e50, #34495e);
margin-left: auto;
border-bottom-right-radius: 2px;
}
.message.assistant {
background: linear-gradient(135deg, var(--secondary-color), var(--primary-color));
margin-right: auto;
border-bottom-left-radius: 2px;
}
.message.search-result {
background: linear-gradient(135deg, #1a5a3e, #2e7d32);
font-size: 14px;
padding: 10px;
margin-bottom: 10px;
}
.controls {
text-align: center;
margin-top: auto;
display: flex;
justify-content: center;
gap: 10px;
flex-shrink: 0;
padding-top: 20px;
}
/* Responsive design */
@media (max-width: 1024px) {
.sidebar {
width: 300px;
}
}
@media (max-width: 768px) {
.main-content {
flex-direction: column;
}
.sidebar {
width: 100%;
margin-bottom: 20px;
}
.chat-section {
height: 400px;
}
}
button {
background: linear-gradient(135deg, var(--primary-color), var(--secondary-color));
color: white;
border: none;
padding: 14px 28px;
font-family: inherit;
font-size: 16px;
cursor: pointer;
transition: all 0.3s;
text-transform: uppercase;
letter-spacing: 1px;
border-radius: 50px;
display: flex;
align-items: center;
justify-content: center;
gap: 10px;
box-shadow: 0 4px 10px rgba(111, 66, 193, 0.3);
}
button:hover {
transform: translateY(-2px);
box-shadow: 0 6px 15px rgba(111, 66, 193, 0.5);
background: linear-gradient(135deg, var(--hover-color), var(--primary-color));
}
button:active {
transform: translateY(1px);
}
#send-button {
background: linear-gradient(135deg, #2ecc71, #27ae60);
padding: 10px 20px;
font-size: 14px;
flex-shrink: 0;
}
#send-button:hover {
background: linear-gradient(135deg, #27ae60, #229954);
}
#audio-output {
display: none;
}
.icon-with-spinner {
display: flex;
align-items: center;
justify-content: center;
gap: 12px;
min-width: 180px;
}
.spinner {
width: 20px;
height: 20px;
border: 2px solid #ffffff;
border-top-color: transparent;
border-radius: 50%;
animation: spin 1s linear infinite;
flex-shrink: 0;
}
@keyframes spin {
to {
transform: rotate(360deg);
}
}
.audio-visualizer {
display: flex;
align-items: center;
justify-content: center;
gap: 5px;
min-width: 80px;
height: 25px;
}
.visualizer-bar {
width: 4px;
height: 100%;
background-color: rgba(255, 255, 255, 0.7);
border-radius: 2px;
transform-origin: bottom;
transform: scaleY(0.1);
transition: transform 0.1s ease;
}
.toast {
position: fixed;
top: 20px;
left: 50%;
transform: translateX(-50%);
padding: 16px 24px;
border-radius: 8px;
font-size: 14px;
z-index: 1000;
display: none;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.3);
}
.toast.error {
background-color: #f44336;
color: white;
}
.toast.warning {
background-color: #ff9800;
color: white;
}
.status-indicator {
display: inline-flex;
align-items: center;
margin-top: 10px;
font-size: 14px;
color: #aaa;
}
.status-dot {
width: 8px;
height: 8px;
border-radius: 50%;
margin-right: 8px;
}
.status-dot.connected {
background-color: #4caf50;
}
.status-dot.disconnected {
background-color: #f44336;
}
.status-dot.connecting {
background-color: #ff9800;
animation: pulse 1.5s infinite;
}
@keyframes pulse {
0% {
opacity: 0.6;
}
50% {
opacity: 1;
}
100% {
opacity: 0.6;
}
}
.mouse-logo {
position: relative;
width: 40px;
height: 40px;
}
.mouse-ears {
position: absolute;
width: 15px;
height: 15px;
background-color: var(--primary-color);
border-radius: 50%;
}
.mouse-ear-left {
top: 0;
left: 5px;
}
.mouse-ear-right {
top: 0;
right: 5px;
}
.mouse-face {
position: absolute;
top: 10px;
left: 5px;
width: 30px;
height: 30px;
background-color: var(--secondary-color);
border-radius: 50%;
}
.language-info {
font-size: 12px;
color: #888;
margin-left: 5px;
}
</style>
</head>
<body>
<div id="error-toast" class="toast"></div>
<div class="container">
<div class="header">
<div class="logo">
<div class="mouse-logo">
<div class="mouse-ears mouse-ear-left"></div>
<div class="mouse-ears mouse-ear-right"></div>
<div class="mouse-face"></div>
</div>
<h1>MOUSE 음성 챗</h1>
</div>
<div class="status-indicator">
<div id="status-dot" class="status-dot disconnected"></div>
<span id="status-text">연결 대기 중</span>
</div>
</div>
<div class="main-content">
<div class="sidebar">
<div class="settings-section">
<h3 style="margin: 0 0 15px 0; color: var(--primary-color);">설정</h3>
<div class="settings-grid">
<div class="setting-item">
<span class="setting-label">웹 검색</span>
<div id="search-toggle" class="toggle-switch">
<div class="toggle-slider"></div>
</div>
</div>
<div class="setting-item">
<span class="setting-label">자동 번역</span>
<select id="language-select">
<option value="">비활성화</option>
<option value="ko">한국어 (Korean)</option>
<option value="en">English</option>
<option value="es">Español (Spanish)</option>
<option value="fr">Français (French)</option>
<option value="de">Deutsch (German)</option>
<option value="it">Italiano (Italian)</option>
<option value="pt">Português (Portuguese)</option>
<option value="ru">Русский (Russian)</option>
<option value="ja">日本語 (Japanese)</option>
<option value="zh">中文 (Chinese)</option>
<option value="ar">العربية (Arabic)</option>
<option value="hi">हिन्दी (Hindi)</option>
<option value="nl">Nederlands (Dutch)</option>
<option value="pl">Polski (Polish)</option>
<option value="tr">Türkçe (Turkish)</option>
<option value="vi">Tiếng Việt (Vietnamese)</option>
<option value="th">ไทย (Thai)</option>
<option value="id">Bahasa Indonesia</option>
<option value="sv">Svenska (Swedish)</option>
<option value="da">Dansk (Danish)</option>
<option value="no">Norsk (Norwegian)</option>
<option value="fi">Suomi (Finnish)</option>
<option value="he">עברית (Hebrew)</option>
<option value="uk">Українська (Ukrainian)</option>
<option value="cs">Čeština (Czech)</option>
<option value="el">Ελληνικά (Greek)</option>
<option value="ro">Română (Romanian)</option>
<option value="hu">Magyar (Hungarian)</option>
<option value="ms">Bahasa Melayu (Malay)</option>
</select>
</div>
</div>
<div class="interpretation-section">
<div class="setting-item">
<span class="setting-label">자동 통역</span>
<div id="interpretation-toggle" class="toggle-switch">
<div class="toggle-slider"></div>
</div>
</div>
<div class="setting-item" id="interpretation-language-container" style="display: none;">
<span class="setting-label">통역 언어</span>
<select id="interpretation-language-select">
<option value="">언어 선택</option>
<option value="ko">한국어 (Korean)</option>
<option value="en">English</option>
<option value="es">Español (Spanish)</option>
<option value="fr">Français (French)</option>
<option value="de">Deutsch (German)</option>
<option value="it">Italiano (Italian)</option>
<option value="pt">Português (Portuguese)</option>
<option value="ru">Русский (Russian)</option>
<option value="ja">日本語 (Japanese)</option>
<option value="zh">中文 (Chinese)</option>
<option value="ar">العربية (Arabic)</option>
<option value="hi">हिन्दी (Hindi)</option>
<option value="nl">Nederlands (Dutch)</option>
<option value="pl">Polski (Polish)</option>
<option value="tr">Türkçe (Turkish)</option>
<option value="vi">Tiếng Việt (Vietnamese)</option>
<option value="th">ไทย (Thai)</option>
<option value="id">Bahasa Indonesia</option>
<option value="sv">Svenska (Swedish)</option>
<option value="da">Dansk (Danish)</option>
<option value="no">Norsk (Norwegian)</option>
<option value="fi">Suomi (Finnish)</option>
<option value="he">עברית (Hebrew)</option>
<option value="uk">Українська (Ukrainian)</option>
<option value="cs">Čeština (Czech)</option>
<option value="el">Ελληνικά (Greek)</option>
<option value="ro">Română (Romanian)</option>
<option value="hu">Magyar (Hungarian)</option>
<option value="ms">Bahasa Melayu (Malay)</option>
</select>
</div>
</div>
<div class="interpretation-info" id="interpretation-info" style="display: none;">
통역 모드: 입력한 음성이 선택한 언어로 자동 통역됩니다.
</div>
<div class="text-input-section">
<label for="system-prompt" class="setting-label">시스템 프롬프트:</label>
<textarea id="system-prompt" placeholder="AI 어시스턴트의 성격, 역할, 행동 방식을 정의하세요...">You are a helpful assistant. Respond in a friendly and professional manner.</textarea>
</div>
</div>
<div class="controls">
<button id="start-button">대화 시작</button>
</div>
</div>
<div class="chat-section">
<div class="chat-container">
<h3 style="margin: 0 0 15px 0; color: var(--primary-color);">대화</h3>
<div class="chat-messages" id="chat-messages"></div>
<div class="text-input-section" style="margin-top: 10px;">
<div style="display: flex; gap: 10px;">
<input type="text" id="text-input" placeholder="텍스트 메시지를 입력하세요..." style="flex-grow: 1;" />
<button id="send-button" style="display: none;">전송</button>
</div>
</div>
</div>
</div>
</div>
</div>
<audio id="audio-output"></audio>
<script>
let peerConnection;
let webrtc_id;
let webSearchEnabled = false;
let selectedLanguage = "";
let interpretationMode = false;
let interpretationLanguage = "";
let systemPrompt = "You are a helpful assistant. Respond in a friendly and professional manner.";
const audioOutput = document.getElementById('audio-output');
const startButton = document.getElementById('start-button');
const sendButton = document.getElementById('send-button');
const chatMessages = document.getElementById('chat-messages');
const statusDot = document.getElementById('status-dot');
const statusText = document.getElementById('status-text');
const searchToggle = document.getElementById('search-toggle');
const languageSelect = document.getElementById('language-select');
const interpretationToggle = document.getElementById('interpretation-toggle');
const interpretationLanguageSelect = document.getElementById('interpretation-language-select');
const interpretationLanguageContainer = document.getElementById('interpretation-language-container');
const interpretationInfo = document.getElementById('interpretation-info');
const systemPromptInput = document.getElementById('system-prompt');
const textInput = document.getElementById('text-input');
let audioLevel = 0;
let animationFrame;
let audioContext, analyser, audioSource;
let dataChannel = null;
let isVoiceActive = false;
// Web search toggle functionality
searchToggle.addEventListener('click', () => {
webSearchEnabled = !webSearchEnabled;
searchToggle.classList.toggle('active', webSearchEnabled);
console.log('Web search enabled:', webSearchEnabled);
});
// Language selection
languageSelect.addEventListener('change', () => {
selectedLanguage = languageSelect.value;
console.log('Selected language:', selectedLanguage);
});
// Interpretation mode toggle
interpretationToggle.addEventListener('click', () => {
if (!interpretationMode) {
// Turning ON interpretation mode
interpretationLanguageContainer.style.display = 'flex';
interpretationInfo.style.display = 'block';
// Show language selector first
showError('통역 언어를 선택해주세요.');
interpretationToggle.classList.remove('active');
// Don't actually enable interpretation mode until language is selected
return;
} else {
// Turning OFF interpretation mode
interpretationMode = false;
interpretationToggle.classList.remove('active');
interpretationLanguageContainer.style.display = 'none';
interpretationInfo.style.display = 'none';
interpretationLanguage = '';
interpretationLanguageSelect.value = '';
// Re-enable other features
languageSelect.disabled = false;
searchToggle.style.opacity = '1';
searchToggle.style.pointerEvents = 'auto';
textInput.disabled = false;
textInput.placeholder = '텍스트 메시지를 입력하세요...';
sendButton.style.display = 'block';
console.log('Interpretation mode disabled');
}
console.log('Interpretation mode:', interpretationMode);
});
// Interpretation language selection
interpretationLanguageSelect.addEventListener('change', () => {
interpretationLanguage = interpretationLanguageSelect.value;
console.log('Interpretation language:', interpretationLanguage);
if (interpretationLanguage && !interpretationMode) {
// Now actually enable interpretation mode
interpretationMode = true;
interpretationToggle.classList.add('active');
// Disable other features
languageSelect.value = '';
selectedLanguage = '';
languageSelect.disabled = true;
searchToggle.classList.remove('active');
webSearchEnabled = false;
searchToggle.style.opacity = '0.5';
searchToggle.style.pointerEvents = 'none';
textInput.disabled = true;
textInput.placeholder = '통역 모드에서는 텍스트 입력이 지원되지 않습니다';
sendButton.style.display = 'none';
console.log('Interpretation mode enabled with language:', interpretationLanguage);
}
});
// System prompt update
systemPromptInput.addEventListener('input', () => {
systemPrompt = systemPromptInput.value || "You are a helpful assistant. Respond in a friendly and professional manner.";
});
// Text input handling
textInput.addEventListener('keypress', (e) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
sendTextMessage();
}
});
sendButton.addEventListener('click', sendTextMessage);
async function sendTextMessage() {
const message = textInput.value.trim();
if (!message) return;
// Don't allow text messages in interpretation mode
if (interpretationMode) {
showError('통역 모드에서는 텍스트 입력이 지원되지 않습니다.');
return;
}
// Add user message to chat
addMessage('user', message);
textInput.value = '';
// Show sending indicator
const typingIndicator = document.createElement('div');
typingIndicator.classList.add('message', 'assistant');
typingIndicator.textContent = '입력 중...';
typingIndicator.id = 'typing-indicator';
chatMessages.appendChild(typingIndicator);
chatMessages.scrollTop = chatMessages.scrollHeight;
try {
// Send to text chat endpoint
const response = await fetch('/chat/text', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
message: message,
web_search_enabled: webSearchEnabled,
target_language: selectedLanguage,
system_prompt: systemPrompt
})
});
const data = await response.json();
// Remove typing indicator
const indicator = document.getElementById('typing-indicator');
if (indicator) indicator.remove();
if (data.error) {
showError(data.error);
} else {
// Add assistant response
let content = data.response;
if (selectedLanguage && data.language) {
content += ` <span class="language-info">[${data.language}]</span>`;
}
addMessage('assistant', content);
}
} catch (error) {
console.error('Error sending text message:', error);
const indicator = document.getElementById('typing-indicator');
if (indicator) indicator.remove();
showError('메시지 전송 중 오류가 발생했습니다.');
}
}
function updateStatus(state) {
statusDot.className = 'status-dot ' + state;
if (state === 'connected') {
statusText.textContent = '연결됨';
if (!interpretationMode) {
sendButton.style.display = 'block';
}
isVoiceActive = true;
} else if (state === 'connecting') {
statusText.textContent = '연결 중...';
sendButton.style.display = 'none';
} else {
statusText.textContent = '연결 대기 중';
if (!interpretationMode) {
sendButton.style.display = 'block'; // Show send button even when disconnected for text chat
}
isVoiceActive = false;
}
}
function updateButtonState() {
const button = document.getElementById('start-button');
if (peerConnection && (peerConnection.connectionState === 'connecting' || peerConnection.connectionState === 'new')) {
button.innerHTML = `
<div class="icon-with-spinner">
<div class="spinner"></div>
<span>연결 중...</span>
</div>
`;
updateStatus('connecting');
} else if (peerConnection && peerConnection.connectionState === 'connected') {
button.innerHTML = `
<div class="icon-with-spinner">
<div class="audio-visualizer" id="audio-visualizer">
<div class="visualizer-bar"></div>
<div class="visualizer-bar"></div>
<div class="visualizer-bar"></div>
<div class="visualizer-bar"></div>
<div class="visualizer-bar"></div>
</div>
<span>대화 종료</span>
</div>
`;
updateStatus('connected');
} else {
button.innerHTML = '대화 시작';
updateStatus('disconnected');
}
}
function setupAudioVisualization(stream) {
audioContext = new (window.AudioContext || window.webkitAudioContext)();
analyser = audioContext.createAnalyser();
audioSource = audioContext.createMediaStreamSource(stream);
audioSource.connect(analyser);
analyser.fftSize = 256;
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
const visualizerBars = document.querySelectorAll('.visualizer-bar');
const barCount = visualizerBars.length;
function updateAudioLevel() {
analyser.getByteFrequencyData(dataArray);
for (let i = 0; i < barCount; i++) {
const start = Math.floor(i * (bufferLength / barCount));
const end = Math.floor((i + 1) * (bufferLength / barCount));
let sum = 0;
for (let j = start; j < end; j++) {
sum += dataArray[j];
}
const average = sum / (end - start) / 255;
const scaleY = 0.1 + average * 0.9;
visualizerBars[i].style.transform = `scaleY(${scaleY})`;
}
animationFrame = requestAnimationFrame(updateAudioLevel);
}
updateAudioLevel();
}
function showError(message) {
const toast = document.getElementById('error-toast');
toast.textContent = message;
toast.className = 'toast error';
toast.style.display = 'block';
setTimeout(() => {
toast.style.display = 'none';
}, 5000);
}
async function setupWebRTC() {
const config = __RTC_CONFIGURATION__;
peerConnection = new RTCPeerConnection(config);
const timeoutId = setTimeout(() => {
const toast = document.getElementById('error-toast');
toast.textContent = "연결이 평소보다 오래 걸리고 있습니다. VPN을 사용 중이신가요?";
toast.className = 'toast warning';
toast.style.display = 'block';
setTimeout(() => {
toast.style.display = 'none';
}, 5000);
}, 5000);
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: true
});
setupAudioVisualization(stream);
stream.getTracks().forEach(track => {
peerConnection.addTrack(track, stream);
});
peerConnection.addEventListener('track', (evt) => {
if (audioOutput.srcObject !== evt.streams[0]) {
audioOutput.srcObject = evt.streams[0];
audioOutput.play();
}
});
// Create data channel for text messages
dataChannel = peerConnection.createDataChannel('text');
dataChannel.onopen = () => {
console.log('Data channel opened');
};
dataChannel.onmessage = (event) => {
const eventJson = JSON.parse(event.data);
if (eventJson.type === "error") {
showError(eventJson.message);
}
};
const offer = await peerConnection.createOffer();
await peerConnection.setLocalDescription(offer);
await new Promise((resolve) => {
if (peerConnection.iceGatheringState === "complete") {
resolve();
} else {
const checkState = () => {
if (peerConnection.iceGatheringState === "complete") {
peerConnection.removeEventListener("icegatheringstatechange", checkState);
resolve();
}
};
peerConnection.addEventListener("icegatheringstatechange", checkState);
}
});
peerConnection.addEventListener('connectionstatechange', () => {
console.log('connectionstatechange', peerConnection.connectionState);
if (peerConnection.connectionState === 'connected') {
clearTimeout(timeoutId);
const toast = document.getElementById('error-toast');
toast.style.display = 'none';
}
updateButtonState();
});
webrtc_id = Math.random().toString(36).substring(7);
const response = await fetch('/webrtc/offer', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
sdp: peerConnection.localDescription.sdp,
type: peerConnection.localDescription.type,
webrtc_id: webrtc_id,
web_search_enabled: webSearchEnabled,
target_language: selectedLanguage,
system_prompt: systemPrompt,
interpretation_mode: interpretationMode,
interpretation_language: interpretationLanguage
})
});
const serverResponse = await response.json();
if (serverResponse.status === 'failed') {
showError(serverResponse.meta.error === 'concurrency_limit_reached'
? `너무 많은 연결입니다. 최대 한도는 ${serverResponse.meta.limit} 입니다.`
: serverResponse.meta.error);
stop();
return;
}
await peerConnection.setRemoteDescription(serverResponse);
const eventSource = new EventSource('/outputs?webrtc_id=' + webrtc_id);
eventSource.addEventListener("output", (event) => {
const eventJson = JSON.parse(event.data);
let content = eventJson.content;
if (selectedLanguage && eventJson.language) {
content += ` <span class="language-info">[${eventJson.language}]</span>`;
}
addMessage("assistant", content);
});
eventSource.addEventListener("search", (event) => {
const eventJson = JSON.parse(event.data);
if (eventJson.query) {
addMessage("search-result", `웹 검색 중: "${eventJson.query}"`);
}
});
} catch (err) {
clearTimeout(timeoutId);
console.error('Error setting up WebRTC:', err);
showError('연결을 설정하지 못했습니다. 다시 시도해 주세요.');
stop();
}
}
function addMessage(role, content) {
const messageDiv = document.createElement('div');
messageDiv.classList.add('message', role);
if (content.includes('<span')) {
messageDiv.innerHTML = content;
} else {
messageDiv.textContent = content;
}
chatMessages.appendChild(messageDiv);
chatMessages.scrollTop = chatMessages.scrollHeight;
}
function stop() {
if (animationFrame) {
cancelAnimationFrame(animationFrame);
}
if (audioContext) {
audioContext.close();
audioContext = null;
analyser = null;
audioSource = null;
}
if (peerConnection) {
if (peerConnection.getTransceivers) {
peerConnection.getTransceivers().forEach(transceiver => {
if (transceiver.stop) {
transceiver.stop();
}
});
}
if (peerConnection.getSenders) {
peerConnection.getSenders().forEach(sender => {
if (sender.track && sender.track.stop) sender.track.stop();
});
}
console.log('closing');
peerConnection.close();
}
dataChannel = null;
updateButtonState();
audioLevel = 0;
}
startButton.addEventListener('click', () => {
console.log('clicked');
console.log(peerConnection, peerConnection?.connectionState);
if (!peerConnection || peerConnection.connectionState !== 'connected') {
setupWebRTC();
} else {
console.log('stopping');
stop();
}
});
// Initialize send button visibility on page load
window.addEventListener('DOMContentLoaded', () => {
sendButton.style.display = 'block';
});
</script>
</body>
</html>"""
class BraveSearchClient:
"""Brave Search API client"""
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = "https://api.search.brave.com/res/v1/web/search"
async def search(self, query: str, count: int = 10) -> List[Dict]:
"""Perform a web search using Brave Search API"""
if not self.api_key:
return []
headers = {
"Accept": "application/json",
"X-Subscription-Token": self.api_key
}
params = {
"q": query,
"count": count,
"lang": "ko"
}
async with httpx.AsyncClient() as client:
try:
response = await client.get(self.base_url, headers=headers, params=params)
response.raise_for_status()
data = response.json()
results = []
if "web" in data and "results" in data["web"]:
for result in data["web"]["results"][:count]:
results.append({
"title": result.get("title", ""),
"url": result.get("url", ""),
"description": result.get("description", "")
})
return results
except Exception as e:
print(f"Brave Search error: {e}")
return []
# Initialize search client globally
brave_api_key = os.getenv("BSEARCH_API")
search_client = BraveSearchClient(brave_api_key) if brave_api_key else None
print(f"Search client initialized: {search_client is not None}, API key present: {bool(brave_api_key)}")
# Store connection settings
connection_settings = {}
# Initialize OpenAI client for text chat
client = openai.AsyncOpenAI()
def update_chatbot(chatbot: list[dict], response: ResponseAudioTranscriptDoneEvent):
chatbot.append({"role": "assistant", "content": response.transcript})
return chatbot
def get_translation_instructions(target_language: str) -> str:
"""Get instructions for translation based on target language"""
if not target_language:
return ""
language_name = SUPPORTED_LANGUAGES.get(target_language, target_language)
return (
f"\n\nIMPORTANT: You must respond in {language_name} ({target_language}). "
f"Translate all your responses to {language_name}."
)
async def process_text_chat(message: str, web_search_enabled: bool, target_language: str,
system_prompt: str) -> Dict[str, str]:
"""Process text chat using GPT-4o-mini model"""
try:
# Prepare system message
base_instructions = system_prompt or "You are a helpful assistant."
translation_instructions = get_translation_instructions(target_language)
messages = [
{"role": "system", "content": base_instructions + translation_instructions}
]
# Handle web search if enabled
if web_search_enabled and search_client:
# Check if the message requires web search
search_keywords = ["날씨", "기온", "비", "눈", "뉴스", "소식", "현재", "최근",
"오늘", "지금", "가격", "환율", "주가", "weather", "news",
"current", "today", "price", "2024", "2025"]
should_search = any(keyword in message.lower() for keyword in search_keywords)
if should_search:
# Perform web search
search_results = await search_client.search(message)
if search_results:
search_context = "웹 검색 결과:\n\n"
for i, result in enumerate(search_results[:5], 1):
search_context += f"{i}. {result['title']}\n{result['description']}\n\n"
messages.append({
"role": "system",
"content": f"다음 웹 검색 결과를 참고하여 답변하세요:\n\n{search_context}"
})
messages.append({"role": "user", "content": message})
# Call GPT-4o-mini
response = await client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
temperature=0.7,
max_tokens=2000
)
return {
"response": response.choices[0].message.content,
"language": SUPPORTED_LANGUAGES.get(target_language, "") if target_language else ""
}
except Exception as e:
print(f"Error in text chat: {e}")
return {"error": str(e)}
class OpenAIHandler(AsyncStreamHandler):
def __init__(self, web_search_enabled: bool = False, target_language: str = "",
system_prompt: str = "", webrtc_id: str = None,
interpretation_mode: bool = False, interpretation_language: str = "") -> None:
super().__init__(
expected_layout="mono",
output_sample_rate=SAMPLE_RATE,
output_frame_size=480,
input_sample_rate=SAMPLE_RATE,
)
self.connection = None
self.output_queue = asyncio.Queue()
self.search_client = search_client
self.function_call_in_progress = False
self.current_function_args = ""
self.current_call_id = None
self.webrtc_id = webrtc_id
self.web_search_enabled = web_search_enabled
self.target_language = target_language
self.system_prompt = system_prompt
self.interpretation_mode = interpretation_mode
self.interpretation_language = interpretation_language
print(f"Handler created with web_search_enabled={web_search_enabled}, "
f"target_language={target_language}, webrtc_id={webrtc_id}, "
f"interpretation_mode={interpretation_mode}, interpretation_language={interpretation_language}")
def copy(self):
# Get the most recent settings
if connection_settings:
# Get the most recent webrtc_id
recent_ids = sorted(connection_settings.keys(),
key=lambda k: connection_settings[k].get('timestamp', 0),
reverse=True)
if recent_ids:
recent_id = recent_ids[0]
settings = connection_settings[recent_id]
return OpenAIHandler(
web_search_enabled=settings.get('web_search_enabled', False),
target_language=settings.get('target_language', ''),
system_prompt=settings.get('system_prompt', ''),
webrtc_id=recent_id,
interpretation_mode=settings.get('interpretation_mode', False),
interpretation_language=settings.get('interpretation_language', '')
)
print(f"Handler.copy() called - creating new handler with default settings")
return OpenAIHandler(web_search_enabled=False)
async def search_web(self, query: str) -> str:
"""Perform web search and return formatted results"""
if not self.search_client or not self.web_search_enabled:
return "웹 검색이 비활성화되어 있습니다."
print(f"Searching web for: {query}")
results = await self.search_client.search(query)
if not results:
return f"'{query}'에 대한 검색 결과를 찾을 수 없습니다."
# Format search results
formatted_results = []
for i, result in enumerate(results, 1):
formatted_results.append(
f"{i}. {result['title']}\n"
f" URL: {result['url']}\n"
f" {result['description']}\n"
)
return f"웹 검색 결과 '{query}':\n\n" + "\n".join(formatted_results)
async def process_text_message(self, message: str):
"""Process text message from user"""
if self.connection:
await self.connection.conversation.item.create(
item={
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": message}]
}
)
await self.connection.response.create()
def get_interpretation_instructions(self):
"""Get instructions for interpretation mode"""
if not self.interpretation_mode or not self.interpretation_language:
return ""
target_language_name = SUPPORTED_LANGUAGES.get(self.interpretation_language, self.interpretation_language)
target_code = self.interpretation_language
# Language-specific examples
examples = {
"en": "Hello, the weather is nice today",
"es": "Hola, el clima está agradable hoy",
"fr": "Bonjour, il fait beau aujourd'hui",
"de": "Hallo, das Wetter ist heute schön",
"ja": "こんにちは、今日はいい天気ですね",
"zh": "你好,今天天气很好"
}
example_translation = examples.get(target_code, "Hello, the weather is nice today")
return (
f"INTERPRETATION MODE - CRITICAL RULES:\n\n"
f"1. You are ONLY a translator to {target_language_name} (language code: {target_code}).\n"
f"2. NEVER respond in any other language.\n"
f"3. NEVER generate conversation or additional content.\n"
f"4. ONLY translate what the user says.\n"
f"5. STOP immediately after translating.\n\n"
f"Example:\n"
f"If user says: '안녕하세요, 오늘 날씨가 좋네요'\n"
f"You MUST respond ONLY: '{example_translation}'\n\n"
f"DO NOT say anything else. DO NOT continue talking.\n"
f"Your output language MUST be {target_language_name} ONLY."
)
def get_translation_instructions(self):
"""Get instructions for translation based on target language"""
if not self.target_language or self.interpretation_mode:
return ""
language_name = SUPPORTED_LANGUAGES.get(self.target_language, self.target_language)
return (
f"\n\nIMPORTANT: You must respond in {language_name} ({self.target_language}). "
f"Translate all your responses to {language_name}. "
f"This includes both spoken and written responses."
)
async def start_up(self):
"""Connect to realtime API with function calling enabled"""
# First check if we have the most recent settings
if connection_settings:
recent_ids = sorted(connection_settings.keys(),
key=lambda k: connection_settings[k].get('timestamp', 0),
reverse=True)
if recent_ids:
recent_id = recent_ids[0]
settings = connection_settings[recent_id]
self.web_search_enabled = settings.get('web_search_enabled', False)
self.target_language = settings.get('target_language', '')
self.system_prompt = settings.get('system_prompt', '')
self.interpretation_mode = settings.get('interpretation_mode', False)
self.interpretation_language = settings.get('interpretation_language', '')
self.webrtc_id = recent_id
print(f"start_up: Updated settings from storage - webrtc_id={self.webrtc_id}, "
f"web_search_enabled={self.web_search_enabled}, target_language={self.target_language}, "
f"interpretation_mode={self.interpretation_mode}")
print(f"Handler interpretation settings: mode={self.interpretation_mode}, language={self.interpretation_language}")
print(f"Starting up handler with web_search_enabled={self.web_search_enabled}, "
f"target_language={self.target_language}, interpretation_mode={self.interpretation_mode}, "
f"interpretation_language={self.interpretation_language}")
self.client = openai.AsyncOpenAI()
# Define the web search function
tools = []
base_instructions = self.system_prompt or "You are a helpful assistant."
# Check if in interpretation mode
if self.interpretation_mode:
# In interpretation mode, override all instructions
base_instructions = (
f"You are a professional interpreter. Your ONLY task is to translate what the user says "
f"into {SUPPORTED_LANGUAGES.get(self.interpretation_language, self.interpretation_language)}. "
f"Do not add any commentary, do not continue the conversation, do not generate new content. "
f"Simply translate what was said and stop."
)
interpretation_instructions = self.get_interpretation_instructions()
instructions = base_instructions + "\n\n" + interpretation_instructions
# No tools in interpretation mode
tools = []
print(f"Interpretation mode active - target language: {self.interpretation_language}")
else:
# Normal mode - add translation instructions if language is selected
translation_instructions = self.get_translation_instructions()
if self.web_search_enabled and self.search_client:
tools = [{
"type": "function",
"function": {
"name": "web_search",
"description": "Search the web for current information. Use this for weather, news, prices, current events, or any time-sensitive topics.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query"
}
},
"required": ["query"]
}
}
}]
print("Web search function added to tools")
search_instructions = (
"\n\nYou have web search capabilities. "
"IMPORTANT: You MUST use the web_search function for ANY of these topics:\n"
"- Weather (날씨, 기온, 비, 눈)\n"
"- News (뉴스, 소식)\n"
"- Current events (현재, 최근, 오늘, 지금)\n"
"- Prices (가격, 환율, 주가)\n"
"- Sports scores or results\n"
"- Any question about 2024 or 2025\n"
"- Any time-sensitive information\n\n"
"When in doubt, USE web_search. It's better to search and provide accurate information "
"than to guess or use outdated information."
)
instructions = base_instructions + search_instructions + translation_instructions
else:
instructions = base_instructions + translation_instructions
async with self.client.beta.realtime.connect(
model="gpt-4o-mini-realtime-preview-2024-12-17"
) as conn:
# Update session with tools
session_update = {
"turn_detection": {
"type": "server_vad",
"threshold": 0.5,
"prefix_padding_ms": 300,
"silence_duration_ms": 500 if self.interpretation_mode else 700
},
"instructions": instructions,
"tools": tools,
"tool_choice": "auto" if tools else "none"
}
# Add voice setting based on interpretation or translation language
voice_language = self.interpretation_language if self.interpretation_mode else self.target_language
if voice_language:
# Use only alloy voice to avoid language confusion
# The model will handle the language based on instructions
session_update["voice"] = "alloy"
# For interpretation mode, explicitly set the output language
if self.interpretation_mode:
session_update["output_audio_format"] = "pcm16"
print(f"Voice set to: alloy for language: {voice_language}")
# For interpretation mode, ensure proper language settings
if self.interpretation_mode and self.interpretation_language:
session_update["modalities"] = ["text", "audio"]
session_update["temperature"] = 0.3 # Lower temperature for more accurate translation
session_update["max_response_output_tokens"] = 500 # Limit output to prevent long generations
print(f"Interpretation session config: voice={session_update.get('voice')}, lang={self.interpretation_language}")
await conn.session.update(session=session_update)
self.connection = conn
print(f"Connected with tools: {len(tools)} functions, voice: {session_update.get('voice', 'default')}, "
f"interpretation_mode: {self.interpretation_mode}, language: {self.interpretation_language if self.interpretation_mode else self.target_language}")
async for event in self.connection:
# Debug logging for function calls
if event.type.startswith("response.function_call"):
print(f"Function event: {event.type}")
if event.type == "response.audio_transcript.done":
output_data = {
"event": event,
"language": SUPPORTED_LANGUAGES.get(
self.interpretation_language if self.interpretation_mode else self.target_language,
""
) if (self.interpretation_language or self.target_language) else ""
}
await self.output_queue.put(AdditionalOutputs(output_data))
elif event.type == "response.audio.delta":
await self.output_queue.put(
(
self.output_sample_rate,
np.frombuffer(
base64.b64decode(event.delta), dtype=np.int16
).reshape(1, -1),
),
)
# Handle function calls (only in non-interpretation mode)
elif event.type == "response.function_call_arguments.start" and not self.interpretation_mode:
print(f"Function call started")
self.function_call_in_progress = True
self.current_function_args = ""
self.current_call_id = getattr(event, 'call_id', None)
elif event.type == "response.function_call_arguments.delta" and not self.interpretation_mode:
if self.function_call_in_progress:
self.current_function_args += event.delta
elif event.type == "response.function_call_arguments.done" and not self.interpretation_mode:
if self.function_call_in_progress:
print(f"Function call done, args: {self.current_function_args}")
try:
args = json.loads(self.current_function_args)
query = args.get("query", "")
# Emit search event to client
await self.output_queue.put(AdditionalOutputs({
"type": "search",
"query": query
}))
# Perform the search
search_results = await self.search_web(query)
print(f"Search results length: {len(search_results)}")
# Send function result back to the model
if self.connection and self.current_call_id:
await self.connection.conversation.item.create(
item={
"type": "function_call_output",
"call_id": self.current_call_id,
"output": search_results
}
)
await self.connection.response.create()
except Exception as e:
print(f"Function call error: {e}")
finally:
self.function_call_in_progress = False
self.current_function_args = ""
self.current_call_id = None
async def receive(self, frame: tuple[int, np.ndarray]) -> None:
if not self.connection:
return
try:
_, array = frame
array = array.squeeze()
audio_message = base64.b64encode(array.tobytes()).decode("utf-8")
await self.connection.input_audio_buffer.append(audio=audio_message)
except Exception as e:
print(f"Error in receive: {e}")
# Connection might be closed, ignore the error
async def emit(self) -> tuple[int, np.ndarray] | AdditionalOutputs | None:
item = await wait_for_item(self.output_queue)
# Check if it's a dict with text message
if isinstance(item, dict) and item.get('type') == 'text_message':
await self.process_text_message(item['content'])
return None
return item
async def shutdown(self) -> None:
if self.connection:
await self.connection.close()
self.connection = None
# Create initial handler instance
handler = OpenAIHandler(web_search_enabled=False, interpretation_mode=False)
# Create components
chatbot = gr.Chatbot(type="messages")
# Create stream with handler instance
stream = Stream(
handler, # Pass instance, not factory
mode="send-receive",
modality="audio",
additional_inputs=[chatbot],
additional_outputs=[chatbot],
additional_outputs_handler=update_chatbot,
rtc_configuration=get_twilio_turn_credentials() if get_space() else None,
concurrency_limit=5 if get_space() else None,
time_limit=300 if get_space() else None,
)
app = FastAPI()
# Mount stream
stream.mount(app)
# Intercept offer to capture settings
@app.post("/webrtc/offer", include_in_schema=False)
async def custom_offer(request: Request):
"""Intercept offer to capture settings"""
body = await request.json()
webrtc_id = body.get("webrtc_id")
web_search_enabled = body.get("web_search_enabled", False)
target_language = body.get("target_language", "")
system_prompt = body.get("system_prompt", "")
interpretation_mode = body.get("interpretation_mode", False)
interpretation_language = body.get("interpretation_language", "")
print(f"Custom offer - webrtc_id: {webrtc_id}, web_search_enabled: {web_search_enabled}, "
f"target_language: {target_language}, interpretation_mode: {interpretation_mode}, "
f"interpretation_language: {interpretation_language}")
# Store settings with timestamp
if webrtc_id:
connection_settings[webrtc_id] = {
'web_search_enabled': web_search_enabled,
'target_language': target_language,
'system_prompt': system_prompt,
'interpretation_mode': interpretation_mode,
'interpretation_language': interpretation_language,
'timestamp': asyncio.get_event_loop().time()
}
# Remove our custom route temporarily
custom_route = None
for i, route in enumerate(app.routes):
if hasattr(route, 'path') and route.path == "/webrtc/offer" and route.endpoint == custom_offer:
custom_route = app.routes.pop(i)
break
# Forward to stream's offer handler
response = await stream.offer(body)
# Re-add our custom route
if custom_route:
app.routes.insert(0, custom_route)
return response
@app.post("/chat/text")
async def chat_text(request: Request):
"""Handle text chat messages using GPT-4o-mini"""
try:
body = await request.json()
message = body.get("message", "")
web_search_enabled = body.get("web_search_enabled", False)
target_language = body.get("target_language", "")
system_prompt = body.get("system_prompt", "")
if not message:
return {"error": "메시지가 비어있습니다."}
# Process text chat
result = await process_text_chat(message, web_search_enabled, target_language, system_prompt)
return result
except Exception as e:
print(f"Error in chat_text endpoint: {e}")
return {"error": "채팅 처리 중 오류가 발생했습니다."}
@app.post("/text_message/{webrtc_id}")
async def receive_text_message(webrtc_id: str, request: Request):
"""Receive text message from client"""
body = await request.json()
message = body.get("content", "")
# Find the handler for this connection
if webrtc_id in stream.handlers:
handler = stream.handlers[webrtc_id]
# Queue the text message for processing
await handler.output_queue.put({
'type': 'text_message',
'content': message
})
return {"status": "ok"}
@app.get("/outputs")
async def outputs(webrtc_id: str):
"""Stream outputs including search events"""
async def output_stream():
async for output in stream.output_stream(webrtc_id):
if hasattr(output, 'args') and output.args:
# Check if it's a search event
if isinstance(output.args[0], dict) and output.args[0].get('type') == 'search':
yield f"event: search\ndata: {json.dumps(output.args[0])}\n\n"
# Regular transcript event with language info
elif isinstance(output.args[0], dict) and 'event' in output.args[0]:
event = output.args[0]['event']
if hasattr(event, 'transcript'):
data = {
"role": "assistant",
"content": event.transcript,
"language": output.args[0].get('language', '')
}
yield f"event: output\ndata: {json.dumps(data)}\n\n"
return StreamingResponse(output_stream(), media_type="text/event-stream")
@app.get("/")
async def index():
"""Serve the HTML page"""
rtc_config = get_twilio_turn_credentials() if get_space() else None
html_content = HTML_CONTENT.replace("__RTC_CONFIGURATION__", json.dumps(rtc_config))
return HTMLResponse(content=html_content)
if __name__ == "__main__":
import uvicorn
mode = os.getenv("MODE")
if mode == "UI":
stream.ui.launch(server_port=7860)
elif mode == "PHONE":
stream.fastphone(host="0.0.0.0", port=7860)
else:
uvicorn.run(app, host="0.0.0.0", port=7860)