|
<!DOCTYPE html> |
|
<html lang="en"> |
|
<head> |
|
<meta charset="UTF-8"> |
|
<meta name="viewport" content="width=device-width, initial-scale=1.0"> |
|
<title>Xortron Criminal Computing Corporation</title> |
|
<script src="https://cdn.tailwindcss.com"></script> |
|
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css"> |
|
<style> |
|
|
|
body { |
|
font-family: 'Inter', sans-serif; display: flex; justify-content: center; align-items: center; |
|
min-height: 100vh; background-color: #111827; padding: 1rem; |
|
} |
|
:root { |
|
--neon-cyan: #22d3ee; --neon-cyan-focus: #67e8f9; --neon-cyan-darker: #0e7490; |
|
} |
|
.main-container { |
|
background-color: #1f2937; border: 1px solid var(--neon-cyan); |
|
box-shadow: 0 0 15px rgba(34, 211, 238, 0.3); display: flex; |
|
flex-direction: column; height: 90vh; max-height: 800px; |
|
width: 100%; max-width: 768px; |
|
} |
|
.dark-input { |
|
background-color: #374151; border: 1px solid #4b5563; color: #f3f4f6; |
|
} |
|
.dark-input::placeholder { color: #9ca3af; } |
|
.dark-input:focus { |
|
border-color: var(--neon-cyan); outline: none; box-shadow: 0 0 0 2px rgba(34, 211, 238, 0.4); |
|
} |
|
.dark-chatbox { |
|
background-color: #374151; border: 1px solid #4b5563; flex-grow: 1; |
|
overflow-y: auto; scroll-behavior: smooth; |
|
} |
|
.chat-bubble { |
|
max-width: 80%; padding: 0.75rem 1rem; border-radius: 1rem; |
|
margin-bottom: 0.5rem; word-wrap: break-word; |
|
|
|
overflow-wrap: break-word; |
|
line-height: 1.6; |
|
} |
|
.user-bubble { |
|
background-color: var(--neon-cyan); color: #1f2937; margin-left: auto; |
|
border-bottom-right-radius: 0.25rem; |
|
} |
|
.assistant-bubble { |
|
background-color: #4b5563; color: #f3f4f6; margin-right: auto; |
|
border-bottom-left-radius: 0.25rem; |
|
} |
|
.assistant-bubble.streaming::after { |
|
content: '▋'; animation: blink 1s step-end infinite; |
|
opacity: 0.7; margin-left: 2px; font-size: 0.9em; |
|
} |
|
@keyframes blink { 50% { opacity: 0; } } |
|
#recordButton.listening { |
|
animation: pulse 1.5s infinite; background-color: #ef4444; border-color: #ef4444; |
|
} |
|
#recordButton.listening:hover { background-color: #dc2626; border-color: #dc2626; } |
|
#recordButton { background-color: #4b5563; border: 1px solid #6b7280; } |
|
#recordButton:hover:not(.listening) { |
|
background-color: #374151; border-color: var(--neon-cyan); |
|
box-shadow: 0 0 8px rgba(34, 211, 238, 0.5); |
|
} |
|
#sendButton { background-color: var(--neon-cyan); color: #1f2937; } |
|
#sendButton:hover { background-color: var(--neon-cyan-focus); } |
|
#sendButton:disabled { background-color: #6b7280; color: #9ca3af; cursor: not-allowed; } |
|
@keyframes pulse { |
|
0% { box-shadow: 0 0 0 0 rgba(239, 68, 68, 0.7); } |
|
70% { box-shadow: 0 0 0 10px rgba(239, 68, 68, 0); } |
|
100% { box-shadow: 0 0 0 0 rgba(239, 68, 68, 0); } |
|
} |
|
#chatbox::-webkit-scrollbar { width: 8px; } |
|
#chatbox::-webkit-scrollbar-track { background: #374151; border-radius: 10px; } |
|
#chatbox::-webkit-scrollbar-thumb { background: #6b7280; border-radius: 10px; } |
|
#chatbox::-webkit-scrollbar-thumb:hover { background: var(--neon-cyan); } |
|
|
|
|
|
.chat-bubble code:not(pre code) { |
|
background-color: #111827; |
|
padding: 0.2em 0.4em; |
|
margin: 0 0.1em; |
|
font-size: 85%; |
|
border-radius: 6px; |
|
font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace; |
|
word-wrap: break-word; |
|
} |
|
.chat-bubble pre { |
|
background-color: #111827; |
|
padding: 1em; |
|
border-radius: 6px; |
|
overflow-x: auto; |
|
margin: 0.8em 0; |
|
white-space: pre; |
|
color: #f3f4f6; |
|
} |
|
.chat-bubble pre code { |
|
background-color: transparent; |
|
padding: 0; |
|
margin: 0; |
|
font-size: inherit; |
|
border-radius: 0; |
|
white-space: inherit; |
|
color: inherit; |
|
} |
|
.chat-bubble ul, .chat-bubble ol { |
|
padding-left: 1.5em; |
|
margin-top: 0.5em; |
|
margin-bottom: 0.5em; |
|
} |
|
.chat-bubble li { |
|
margin-bottom: 0.25em; |
|
} |
|
.chat-bubble li > p { |
|
margin-bottom: 0; |
|
} |
|
.chat-bubble p { |
|
margin-bottom: 0.75em; |
|
} |
|
.chat-bubble p:last-child { |
|
margin-bottom: 0; |
|
} |
|
.chat-bubble strong, .chat-bubble b { |
|
font-weight: bold; |
|
} |
|
.chat-bubble em, .chat-bubble i { |
|
font-style: italic; |
|
} |
|
.chat-bubble blockquote { |
|
border-left: 4px solid var(--neon-cyan); |
|
padding-left: 1em; |
|
margin: 0.8em 0; |
|
color: #d1d5db; |
|
} |
|
.chat-bubble blockquote p { |
|
margin-bottom: 0.5em; |
|
} |
|
.chat-bubble a { |
|
color: var(--neon-cyan-focus); |
|
text-decoration: underline; |
|
} |
|
.chat-bubble a:hover { |
|
color: var(--neon-cyan); |
|
} |
|
.chat-bubble hr { |
|
border: none; |
|
border-top: 1px solid #4b5563; |
|
margin: 1em 0; |
|
} |
|
|
|
|
|
</style> |
|
<link rel="preconnect" href="https://fonts.googleapis.com"> |
|
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> |
|
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap" rel="stylesheet"> |
|
|
|
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script> |
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/dompurify/3.1.4/purify.min.js" integrity="sha512-WcCfo2F+5U1zKjjKwpPszIOxeh7o3N63FvQubHDjVAQnRBCw44fAnJsFzt7o06kEMt0h8+drQvdY9e+wOHhVKA==" crossorigin="anonymous" referrerpolicy="no-referrer"></script> |
|
</head> |
|
<body class="bg-gray-900"> |
|
<div class="main-container p-6 md:p-8 rounded-lg shadow-xl w-full"> |
|
<div class="text-2xl md:text-3xl font-bold mb-4 text-center text-gray-100 flex-shrink-0"> |
|
<img src="https://darkc0de-chat.hf.space/gradio_api/file=/tmp/gradio/6cfb2517f89a3a5912cc6e0a8107c1901bc7a5f71a40f29be691eac57c7ba1a6/j61iZTDaK9g0UW3aWGwWi.gif" alt="Header Image" style="display: block; margin-left: auto; margin-right: auto;"> |
|
</div> |
|
|
|
<div id="chatbox" class="dark-chatbox rounded-md p-4 mb-4 flex flex-col space-y-2"> |
|
</div> |
|
<div id="status" class="text-center text-sm text-gray-400 mb-2 h-5 flex-shrink-0"></div> |
|
<div class="flex items-center space-x-2 mb-4 flex-shrink-0"> |
|
<input type="text" id="textInput" placeholder="Type your message..." class="dark-input w-full px-3 py-2 rounded-md shadow-sm text-sm flex-grow" disabled> |
|
<button id="sendButton" class="px-4 py-2 rounded-md font-semibold shadow-sm transition duration-150 ease-in-out focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-offset-gray-800 focus:ring-[var(--neon-cyan)]" disabled> |
|
<i class="fas fa-paper-plane"></i> Send |
|
</button> |
|
</div> |
|
<div class="text-center flex-shrink-0"> |
|
<button id="recordButton" title="Start/Stop Listening" class="text-white font-bold py-3 px-5 rounded-full shadow-md transition duration-150 ease-in-out focus:outline-none"> |
|
<i class="fas fa-microphone text-xl"></i> |
|
</button> |
|
</div> |
|
<div class="text-center mt-4 flex-shrink-0"> <a href='https://ko-fi.com/Z8Z51E5TIG' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi6.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> |
|
</div> |
|
</div> |
|
|
|
<script> |
|
|
|
const recordButton = document.getElementById('recordButton'); |
|
const statusDiv = document.getElementById('status'); |
|
const chatbox = document.getElementById('chatbox'); |
|
const textInput = document.getElementById('textInput'); |
|
const sendButton = document.getElementById('sendButton'); |
|
|
|
|
|
const API_ENDPOINT_URL = "https://vulture-awake-probably.ngrok-free.app/v1/chat/completions"; |
|
|
|
|
|
let recognition; |
|
let isListening = false; |
|
let isApiProcessing = false; |
|
let conversationHistory = []; |
|
let restartTimer; |
|
let currentAssistantMessageElement = null; |
|
let sentenceBuffer = ""; |
|
let spokenTextPointer = 0; |
|
let recognitionWasRunning = false; |
|
|
|
|
|
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; |
|
if (!SpeechRecognition) { |
|
statusDiv.textContent = 'Voice input not supported.'; |
|
recordButton.disabled = true; |
|
recordButton.title = 'Speech Recognition not supported in this browser.'; |
|
recordButton.classList.add('opacity-50', 'cursor-not-allowed'); |
|
} else { |
|
recognition = new SpeechRecognition(); |
|
recognition.continuous = true; |
|
recognition.interimResults = false; |
|
recognition.lang = 'en-US'; |
|
recognition.maxAlternatives = 1; |
|
|
|
recognition.onstart = () => { |
|
console.log('Recognition started.'); |
|
if (isListening) statusDiv.textContent = 'Listening...'; |
|
}; |
|
|
|
recognition.onresult = (event) => { |
|
let finalTranscript = ''; |
|
for (let i = event.resultIndex; i < event.results.length; ++i) { |
|
if (event.results[i].isFinal) { |
|
finalTranscript += event.results[i][0].transcript; |
|
} |
|
} |
|
finalTranscript = finalTranscript.trim(); |
|
console.log('Transcript:', finalTranscript); |
|
if (finalTranscript && !isApiProcessing && isListening) { |
|
handleUserInput(finalTranscript); |
|
} else if (!finalTranscript) { |
|
console.log('Empty transcript received.'); |
|
} |
|
}; |
|
|
|
recognition.onerror = (event) => { |
|
console.error('Speech recognition error:', event.error); |
|
if (event.error === 'no-speech') { |
|
console.warn('Recognition error: No speech detected. Restarting if still listening.'); |
|
} else if (event.error === 'audio-capture') { |
|
console.warn('Recognition error: Audio capture issue.'); |
|
statusDiv.textContent = 'Mic Issue'; |
|
} else if (event.error === 'not-allowed') { |
|
statusDiv.textContent = 'Microphone access denied.'; |
|
addMessageToChatbox('assistant', 'Error: Microphone access denied.'); |
|
if (isListening) stopListening(true); |
|
} else { |
|
statusDiv.textContent = `Voice Error: ${event.error}`; |
|
if (isListening) stopListening(true); |
|
} |
|
}; |
|
|
|
recognition.onend = () => { |
|
console.log('Recognition ended.'); |
|
if (isListening && !isApiProcessing) { |
|
clearTimeout(restartTimer); |
|
restartTimer = setTimeout(() => { |
|
if (isListening) { |
|
console.log('Attempting to restart recognition...'); |
|
try { |
|
recognition.start(); |
|
} catch (e) { |
|
if (e.name !== 'InvalidStateError') { |
|
console.error("Error restarting recognition:", e); |
|
statusDiv.textContent = "Error restarting listening."; |
|
stopListening(true); |
|
} |
|
} |
|
} |
|
}, 250); |
|
} else if (!isListening) { |
|
updateButtonUI(false); |
|
if (!isApiProcessing && !isSpeaking && ttsQueue.length === 0) { |
|
statusDiv.textContent = ''; |
|
} |
|
} |
|
}; |
|
} |
|
|
|
|
|
const synth = window.speechSynthesis; |
|
let ttsQueue = []; |
|
let isSpeaking = false; |
|
|
|
if (!synth) { |
|
console.warn("Speech Synthesis not supported in this browser."); |
|
} |
|
|
|
function speakText(text) { |
|
|
|
let textToSpeak = text.replace(/```[\s\S]*?```/g, 'Code block.') |
|
.replace(/`([^`]+)`/g, '$1') |
|
.replace(/[*_~]+/g, ''); |
|
if (!synth || !textToSpeak) return; |
|
ttsQueue.push(textToSpeak); |
|
processTTSQueue(); |
|
} |
|
|
|
function processTTSQueue() { |
|
if (isSpeaking || ttsQueue.length === 0 || !synth) { |
|
return; |
|
} |
|
isSpeaking = true; |
|
const textToSpeak = ttsQueue.shift(); |
|
|
|
setTimeout(() => { |
|
synth.cancel(); |
|
const utterance = new SpeechSynthesisUtterance(textToSpeak); |
|
utterance.lang = 'en-US'; |
|
utterance.rate = 1.2; |
|
utterance.pitch = 1; |
|
utterance.volume = 1; |
|
|
|
utterance.onstart = () => { |
|
console.log("Speech started for:", textToSpeak.substring(0, 30) + "..."); |
|
statusDiv.textContent = 'Speaking...'; |
|
}; |
|
|
|
utterance.onend = () => { |
|
console.log("Speech finished for:", textToSpeak.substring(0, 30) + "..."); |
|
isSpeaking = false; |
|
if (ttsQueue.length === 0 && !isApiProcessing) { |
|
enableInputs(); |
|
statusDiv.textContent = isListening ? 'Listening...' : ''; |
|
restartRecognitionIfNeeded(recognitionWasRunning); |
|
} |
|
processTTSQueue(); |
|
}; |
|
|
|
utterance.onerror = (event) => { |
|
console.error('SpeechSynthesis Utterance Error:', event.error, "for text:", textToSpeak); |
|
statusDiv.textContent = 'Error speaking response.'; |
|
isSpeaking = false; |
|
if (ttsQueue.length === 0 && !isApiProcessing) { |
|
enableInputs(); |
|
statusDiv.textContent = isListening ? 'Listening...' : ''; |
|
restartRecognitionIfNeeded(recognitionWasRunning); |
|
} |
|
processTTSQueue(); |
|
}; |
|
|
|
console.log("Attempting to speak:", textToSpeak.substring(0, 50) + "..."); |
|
synth.speak(utterance); |
|
}, 50); |
|
} |
|
|
|
|
|
function handleUserInput(text) { |
|
if (!text || isApiProcessing) return; |
|
isApiProcessing = true; |
|
statusDiv.textContent = 'Processing...'; |
|
disableInputs(); |
|
|
|
addMessageToChatbox('user', text); |
|
sendToApi(text); |
|
} |
|
|
|
|
|
async function sendToApi(userText) { |
|
const apiEndpoint = API_ENDPOINT_URL; |
|
conversationHistory.push({ role: "user", content: userText }); |
|
|
|
statusDiv.textContent = 'Thinking...'; |
|
currentAssistantMessageElement = null; |
|
sentenceBuffer = ""; |
|
spokenTextPointer = 0; |
|
ttsQueue = []; |
|
recognitionWasRunning = false; |
|
|
|
if (isListening && recognition) { |
|
try { |
|
recognition.stop(); |
|
recognitionWasRunning = true; |
|
console.log("Stopped recognition temporarily for API call."); |
|
} catch(e) { console.warn("Could not stop recognition before API call:", e); } |
|
} |
|
if (synth && synth.speaking) { |
|
synth.cancel(); |
|
isSpeaking = false; |
|
} |
|
|
|
const requestBody = { |
|
messages: conversationHistory, |
|
|
|
|
|
max_tokens: 750, |
|
stream: true |
|
}; |
|
const requestHeaders = { |
|
'Content-Type': 'application/json', |
|
'Accept': 'text/event-stream' |
|
}; |
|
|
|
try { |
|
console.log("Sending request to:", apiEndpoint); |
|
const response = await fetch(apiEndpoint, { method: 'POST', headers: requestHeaders, body: JSON.stringify(requestBody) }); |
|
|
|
if (!response.ok) { |
|
const errorText = await response.text(); |
|
let detail = errorText; |
|
try { |
|
const errorJson = JSON.parse(errorText); |
|
detail = errorJson.detail || errorJson.error?.message || errorJson.message || JSON.stringify(errorJson); |
|
} catch (parseError) {} |
|
throw new Error(`API Error: ${response.status} ${response.statusText} - ${detail}`); |
|
} |
|
if (!response.body) { |
|
throw new Error("Response body is null, cannot process stream."); |
|
} |
|
|
|
const reader = response.body.getReader(); |
|
const decoder = new TextDecoder("utf-8"); |
|
let partialChunk = ""; |
|
let isDoneProcessingStream = false; |
|
|
|
while (!isDoneProcessingStream) { |
|
const { done, value } = await reader.read(); |
|
|
|
if (done) { |
|
console.log("Stream finished (reader signaled done)."); |
|
isDoneProcessingStream = true; |
|
if (partialChunk.trim()) { |
|
console.warn("Stream ended by reader 'done' with unprocessed partial chunk:", partialChunk); |
|
} |
|
break; |
|
} |
|
|
|
const chunkText = partialChunk + decoder.decode(value, { stream: true }); |
|
const eventStrings = chunkText.split("\n\n"); |
|
|
|
if (!chunkText.endsWith("\n\n") && eventStrings.length > 0) { |
|
partialChunk = eventStrings.pop(); |
|
} else { |
|
partialChunk = ""; |
|
} |
|
|
|
for (const eventString of eventStrings) { |
|
if (!eventString.trim()) continue; |
|
|
|
let content = ""; |
|
let isDoneSignalFound = false; |
|
|
|
const lines = eventString.split("\n"); |
|
for (const line of lines) { |
|
if (line.startsWith("data:")) { |
|
const dataJson = line.substring(5).trim(); |
|
if (dataJson === "[DONE]") { |
|
console.log("Received [DONE] signal in stream."); |
|
isDoneSignalFound = true; |
|
isDoneProcessingStream = true; |
|
break; |
|
} |
|
try { |
|
const data = JSON.parse(dataJson); |
|
if (data.choices && data.choices[0]?.delta?.content) { |
|
content += data.choices[0].delta.content; |
|
} |
|
} catch (e) { |
|
console.error("Error parsing stream data JSON:", e, "Data:", dataJson); |
|
} |
|
} |
|
} |
|
|
|
if (isDoneSignalFound) break; |
|
|
|
if (content) { |
|
processStreamContent(content); |
|
} |
|
} |
|
} |
|
|
|
|
|
if (sentenceBuffer.length > spokenTextPointer) { |
|
const remainingText = sentenceBuffer.substring(spokenTextPointer); |
|
console.log("Speaking remaining text after stream:", remainingText); |
|
speakText(remainingText); |
|
} |
|
|
|
if (currentAssistantMessageElement) { |
|
currentAssistantMessageElement.classList.remove('streaming'); |
|
if (sentenceBuffer) { |
|
try { |
|
marked.setOptions({ |
|
breaks: true, |
|
gfm: true |
|
}); |
|
const unsafeHtml = marked.parse(sentenceBuffer); |
|
const safeHtml = DOMPurify.sanitize(unsafeHtml); |
|
currentAssistantMessageElement.innerHTML = safeHtml; |
|
console.log("Rendered final sanitized HTML for assistant message."); |
|
} catch (e) { |
|
console.error("Error processing final Markdown/HTML:", e); |
|
currentAssistantMessageElement.textContent = sentenceBuffer; |
|
} |
|
} |
|
} |
|
|
|
if (sentenceBuffer) { |
|
conversationHistory.push({ role: "assistant", content: sentenceBuffer }); |
|
} else { |
|
console.log("API call successful but no content received. Removing last user message from history."); |
|
if (conversationHistory.length > 0 && conversationHistory[conversationHistory.length - 1].role === 'user') { |
|
conversationHistory.pop(); |
|
} |
|
} |
|
|
|
} catch (error) { |
|
console.error('Error during API call or streaming:', error); |
|
if (currentAssistantMessageElement) { currentAssistantMessageElement.classList.remove('streaming'); } |
|
|
|
let userFriendlyError = `Sorry, I encountered an error: ${error.message}`; |
|
if (error instanceof TypeError && error.message.toLowerCase().includes('fetch')) { |
|
userFriendlyError = `Connection Error: Could not connect to the API at ${apiEndpoint}. Please check the URL and network connection.`; |
|
statusDiv.textContent = 'Connection Error'; |
|
} else { |
|
statusDiv.textContent = `API Error: ${error.message.substring(0, 100)}...`; |
|
} |
|
addMessageToChatbox('assistant', userFriendlyError); |
|
|
|
if (conversationHistory.length > 0 && conversationHistory[conversationHistory.length - 1].role === 'user') { |
|
conversationHistory.pop(); |
|
} |
|
|
|
} finally { |
|
console.log("API processing finished or errored. Entering finally block."); |
|
isApiProcessing = false; |
|
|
|
setTimeout(() => { |
|
if (ttsQueue.length === 0 && !isSpeaking) { |
|
console.log("Finally: TTS idle. Enabling inputs and checking recognition restart."); |
|
enableInputs(); |
|
statusDiv.textContent = isListening ? 'Listening...' : ''; |
|
restartRecognitionIfNeeded(recognitionWasRunning); |
|
} else { |
|
console.log("Finally: TTS queue active or speaking. Inputs remain disabled. TTS onend will handle enabling/restart."); |
|
} |
|
}, 100); |
|
} |
|
} |
|
|
|
function processStreamContent(content) { |
|
if (!currentAssistantMessageElement) { |
|
currentAssistantMessageElement = addMessageToChatbox('assistant', '', true); |
|
} |
|
sentenceBuffer += content; |
|
currentAssistantMessageElement.textContent = sentenceBuffer; |
|
chatbox.scrollTop = chatbox.scrollHeight; |
|
|
|
let searchStart = spokenTextPointer; |
|
while (searchStart < sentenceBuffer.length) { |
|
const sentenceEndMatch = sentenceBuffer.substring(searchStart).match(/([.?!])(?:\s|\n|$)/); |
|
if (sentenceEndMatch) { |
|
const sentenceEndIndex = searchStart + sentenceEndMatch.index + sentenceEndMatch[1].length; |
|
const textToSpeak = sentenceBuffer.substring(spokenTextPointer, sentenceEndIndex).trim(); |
|
if (textToSpeak) { |
|
console.log("Found sentence for TTS:", textToSpeak); |
|
speakText(textToSpeak); |
|
spokenTextPointer = sentenceEndIndex; |
|
} |
|
searchStart = spokenTextPointer; |
|
} else { |
|
break; |
|
} |
|
} |
|
} |
|
|
|
function restartRecognitionIfNeeded(wasRunning) { |
|
if (wasRunning && isListening && recognition && !isApiProcessing && !isSpeaking && ttsQueue.length === 0) { |
|
console.log("Conditions met: Restarting recognition."); |
|
clearTimeout(restartTimer); |
|
try { |
|
statusDiv.textContent = 'Listening...'; |
|
recognition.start(); |
|
} catch (e) { |
|
if (e.name !== 'InvalidStateError') { |
|
console.error("Error restarting recognition post-API/TTS:", e); |
|
statusDiv.textContent = "Error restarting listening."; |
|
stopListening(true); |
|
} else { |
|
console.log("Recognition likely already restarting or started (InvalidStateError)."); |
|
if(isListening) statusDiv.textContent = 'Listening...'; |
|
} |
|
} |
|
} else if (!isListening && !isApiProcessing && !isSpeaking && ttsQueue.length === 0) { |
|
statusDiv.textContent = ''; |
|
} |
|
else { |
|
console.log(`Conditions not met for restarting recognition (wasRunning: ${wasRunning}, isListening: ${isListening}, isApiProcessing: ${isApiProcessing}, isSpeaking: ${isSpeaking}, ttsQueue: ${ttsQueue.length})`); |
|
} |
|
} |
|
|
|
function addMessageToChatbox(role, text, isStreaming = false) { |
|
const messageDiv = document.createElement('div'); |
|
messageDiv.classList.add('chat-bubble'); |
|
messageDiv.textContent = text; |
|
messageDiv.classList.add(role === 'user' ? 'user-bubble' : 'assistant-bubble'); |
|
if (role === 'assistant' && isStreaming) { |
|
messageDiv.classList.add('streaming'); |
|
} |
|
chatbox.appendChild(messageDiv); |
|
chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: 'smooth' }); |
|
return messageDiv; |
|
} |
|
|
|
function updateButtonUI(listening) { |
|
if (!recognition) return; |
|
if (listening) { |
|
recordButton.classList.add('listening'); |
|
recordButton.innerHTML = '<i class="fas fa-stop text-xl"></i>'; |
|
recordButton.title = "Stop Listening"; |
|
} else { |
|
recordButton.classList.remove('listening'); |
|
recordButton.innerHTML = '<i class="fas fa-microphone text-xl"></i>'; |
|
recordButton.title = "Start Listening"; |
|
} |
|
} |
|
|
|
function disableInputs() { |
|
console.log("Disabling inputs."); |
|
textInput.disabled = true; |
|
sendButton.disabled = true; |
|
if (recognition) { |
|
recordButton.disabled = true; |
|
recordButton.classList.add('opacity-50'); |
|
} |
|
} |
|
|
|
function enableInputs() { |
|
console.log("Enabling inputs."); |
|
textInput.disabled = false; |
|
sendButton.disabled = textInput.value.trim() === '' || isApiProcessing; |
|
if (recognition) { |
|
recordButton.disabled = false; |
|
recordButton.classList.remove('opacity-50'); |
|
} |
|
} |
|
|
|
function stopListening(forceStop = false) { |
|
if (!recognition) return; |
|
const wasListening = isListening; |
|
isListening = false; |
|
if (wasListening) { |
|
console.log("Stopping listening session."); |
|
clearTimeout(restartTimer); |
|
updateButtonUI(false); |
|
if (!isApiProcessing && !isSpeaking && ttsQueue.length === 0) { |
|
statusDiv.textContent = 'Stopping...'; |
|
setTimeout(() => { |
|
if (statusDiv.textContent === 'Stopping...') { statusDiv.textContent = ''; } |
|
}, 500); |
|
} |
|
try { |
|
recognition.abort(); |
|
console.log("Recognition aborted."); |
|
} catch (e) { |
|
console.warn("Error aborting recognition (might have already stopped):", e); |
|
} |
|
} |
|
if (synth) { |
|
console.log("Cancelling any TTS on stopListening."); |
|
synth.cancel(); |
|
ttsQueue = []; |
|
isSpeaking = false; |
|
} |
|
if (!isApiProcessing) { |
|
enableInputs(); |
|
if (!isSpeaking && ttsQueue.length === 0) { |
|
statusDiv.textContent = ''; |
|
} |
|
} |
|
} |
|
|
|
function startListening() { |
|
if (!recognition || isListening) return; |
|
navigator.mediaDevices.getUserMedia({ audio: true }) |
|
.then(stream => { |
|
stream.getTracks().forEach(track => track.stop()); |
|
console.log("Microphone permission granted or already available."); |
|
isListening = true; |
|
updateButtonUI(true); |
|
statusDiv.textContent = 'Starting...'; |
|
try { |
|
recognition.start(); |
|
} catch (e) { |
|
console.error("Error starting recognition:", e); |
|
statusDiv.textContent = "Error starting listening."; |
|
isListening = false; |
|
updateButtonUI(false); |
|
} |
|
}) |
|
.catch(err => { |
|
console.error("Microphone access error:", err); |
|
if (err.name === 'NotAllowedError' || err.name === 'PermissionDeniedError') { |
|
statusDiv.textContent = 'Microphone access denied.'; |
|
addMessageToChatbox('assistant', 'Error: Microphone access is required for voice input.'); |
|
} else { |
|
statusDiv.textContent = `Mic Error: ${err.name}`; |
|
addMessageToChatbox('assistant', `Error accessing microphone: ${err.message}`); |
|
} |
|
isListening = false; |
|
updateButtonUI(false); |
|
}); |
|
} |
|
|
|
recordButton.addEventListener('click', () => { |
|
if (!recognition) return; |
|
if (!isListening) { |
|
startListening(); |
|
} else { |
|
stopListening(); |
|
} |
|
}); |
|
|
|
sendButton.addEventListener('click', () => { |
|
const text = textInput.value.trim(); |
|
if (text && !isApiProcessing) { |
|
handleUserInput(text); |
|
textInput.value = ''; |
|
sendButton.disabled = true; |
|
} |
|
}); |
|
|
|
textInput.addEventListener('keypress', (e) => { |
|
if (e.key === 'Enter' && !e.shiftKey) { |
|
e.preventDefault(); |
|
const text = textInput.value.trim(); |
|
if (text && !sendButton.disabled) { |
|
handleUserInput(text); |
|
textInput.value = ''; |
|
sendButton.disabled = true; |
|
} |
|
} |
|
}); |
|
|
|
textInput.addEventListener('input', () => { |
|
sendButton.disabled = textInput.value.trim() === '' || isApiProcessing; |
|
}); |
|
|
|
chatbox.innerHTML = ''; |
|
addMessageToChatbox('assistant', 'Hello! Use the microphone or type a message below.'); |
|
console.log("Voice/Text Chat App Initialized (Markdown Enabled)"); |
|
updateButtonUI(false); |
|
enableInputs(); |
|
|
|
</script> |
|
</body> |
|
</html> |