Spaces:
Running
Running
File size: 25,684 Bytes
19a862c 3ba6123 19a862c ebcf47f 19a862c ebcf47f fa541aa 19a862c 3ba6123 060ac05 3ba6123 fa541aa 19a862c ebcf47f 060ac05 4bfa00d 060ac05 ebcf47f 060ac05 4bfa00d 060ac05 fa541aa 3ba6123 f8a195e 060ac05 3ba6123 19a862c 060ac05 71d66b0 19a862c 71d66b0 19a862c ebcf47f 060ac05 19a862c f8a195e 19a862c 4bfa00d 060ac05 f8a195e ebcf47f 4bfa00d 19a862c 3ba6123 19a862c 35c0ea2 19a862c ebcf47f 3ba6123 19a862c f8a195e 19a862c f8a195e 19a862c f8a195e 19a862c 35c0ea2 19a862c f8a195e 19a862c ebcf47f 19a862c 71d66b0 19a862c 060ac05 ebcf47f f8a195e 060ac05 ebcf47f 4bfa00d ebcf47f 4bfa00d ebcf47f 19a862c 3ba6123 19a862c 35c0ea2 4bfa00d ebcf47f 19a862c 3ba6123 71d66b0 ebcf47f f8a195e 4bfa00d ebcf47f 19a862c 35c0ea2 ebcf47f 060ac05 ebcf47f 4bfa00d ebcf47f 4bfa00d ebcf47f 4bfa00d ebcf47f 4bfa00d 19a862c ebcf47f 35c0ea2 ebcf47f 4bfa00d ebcf47f f8a195e ebcf47f f8a195e ebcf47f f8a195e 4bfa00d 35c0ea2 ebcf47f 4bfa00d ebcf47f 19a862c ebcf47f 19a862c ebcf47f 19a862c ebcf47f 19a862c 060ac05 35c0ea2 4bfa00d ebcf47f 19a862c 3ba6123 f8a195e 19a862c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
<title>AI Assistant (Gemma 3 1B - Doc Example Attempt)</title>
<style>
/* CSSλ μ΄μ κ³Ό λμΌ */
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
:root {
--primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529;
--bg-color: #f8f9fa; --user-msg-bg: #e7f5ff; --user-msg-text: #004085;
--bot-msg-bg: #ffffff; --bot-msg-border: #dee2e6; --system-msg-color: #6c757d;
--error-color: #721c24; --error-bg: #f8d7da; --error-border: #f5c6cb;
--warning-color: #856404; --warning-bg: #fff3cd; --warning-border: #ffeeba;
--success-color: #155724; --success-bg: #d4edda; --success-border: #c3e6cb;
--border-color: #dee2e6; --input-bg: #ffffff; --input-border: #ced4da;
--button-bg: var(--primary-color); --button-hover-bg: #0056b3; --button-disabled-bg: #adb5bd;
--scrollbar-thumb: var(--primary-color); --scrollbar-track: #e9ecef;
--header-bg: #ffffff; --header-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
--container-shadow: 0 4px 15px rgba(0, 0, 0, 0.07);
}
* { box-sizing: border-box; margin: 0; padding: 0; }
html { height: 100%; }
body { font-family: 'Roboto', sans-serif; display: flex; flex-direction: column; align-items: center; justify-content: flex-start; min-height: 100vh; background-color: var(--bg-color); color: var(--text-color); padding: 10px; overscroll-behavior: none; }
#control-panel { background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px; box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color); text-align: center; }
#loadModelButton { padding: 10px 20px; font-size: 1em; background-color: var(--primary-color); color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-bottom: 10px; }
#loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); }
#loadModelButton:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; }
#model-status { font-size: 0.9em; padding: 10px; border-radius: 4px; text-align: center; min-height: 40px; line-height: 1.4; }
#model-status.info { background-color: #e2e3e5; border: 1px solid #d6d8db; color: #383d41; }
#model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); }
#model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); }
#model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); }
#chat-container { width: 100%; max-width: 600px; height: 75vh; max-height: 700px; background-color: #ffffff; border-radius: 12px; box-shadow: var(--container-shadow); display: flex; flex-direction: column; overflow: hidden; border: 1px solid var(--border-color); }
h1 { text-align: center; color: var(--primary-color); padding: 15px; background-color: var(--header-bg); border-bottom: 1px solid var(--border-color); font-size: 1.2em; font-weight: 500; flex-shrink: 0; box-shadow: var(--header-shadow); position: relative; z-index: 10; }
#chatbox { flex-grow: 1; overflow-y: auto; padding: 15px; display: flex; flex-direction: column; gap: 12px; scrollbar-width: thin; scrollbar-color: var(--scrollbar-thumb) var(--scrollbar-track); background-color: var(--bg-color); }
#chatbox::-webkit-scrollbar { width: 6px; } #chatbox::-webkit-scrollbar-track { background: var(--scrollbar-track); border-radius: 3px; } #chatbox::-webkit-scrollbar-thumb { background-color: var(--scrollbar-thumb); border-radius: 3px; }
#messages div { padding: 10px 15px; border-radius: 16px; max-width: 85%; word-wrap: break-word; line-height: 1.5; font-size: 1em; box-shadow: 0 1px 2px rgba(0,0,0,0.05); position: relative; animation: fadeIn 0.25s ease-out; }
@keyframes fadeIn { from { opacity: 0; transform: translateY(5px); } to { opacity: 1; transform: translateY(0); } }
.user-message { background: var(--user-msg-bg); color: var(--user-msg-text); align-self: flex-end; border-bottom-right-radius: 4px; margin-left: auto; }
.bot-message { background-color: var(--bot-msg-bg); border: 1px solid var(--bot-msg-border); align-self: flex-start; border-bottom-left-radius: 4px; margin-right: auto; }
.bot-message a { color: var(--primary-color); text-decoration: none; } .bot-message a:hover { text-decoration: underline; }
.system-message { font-style: italic; color: var(--system-msg-color); text-align: center; font-size: 0.85em; background-color: transparent; box-shadow: none; align-self: center; max-width: 100%; padding: 5px 0; animation: none; }
.error-message { color: var(--error-color); font-weight: 500; background-color: var(--error-bg); border: 1px solid var(--error-border); padding: 10px 15px; border-radius: 8px; align-self: stretch; text-align: left; }
#input-area { display: flex; padding: 10px 12px; border-top: 1px solid var(--border-color); background-color: var(--header-bg); align-items: center; gap: 8px; flex-shrink: 0; }
#userInput { flex-grow: 1; padding: 10px 15px; border: 1px solid var(--input-border); border-radius: 20px; outline: none; font-size: 1em; font-family: 'Roboto', sans-serif; background-color: var(--input-bg); transition: border-color 0.2s ease; min-height: 42px; resize: none; overflow-y: auto; }
#userInput:focus { border-color: var(--primary-color); }
.control-button { padding: 0; border: none; border-radius: 50%; cursor: pointer; background-color: var(--button-bg); color: white; width: 42px; height: 42px; font-size: 1.3em; display: flex; align-items: center; justify-content: center; flex-shrink: 0; transition: background-color 0.2s ease, transform 0.1s ease; box-shadow: 0 1px 2px rgba(0,0,0,0.08); }
.control-button:hover:not(:disabled) { background-color: var(--button-hover-bg); transform: translateY(-1px); }
.control-button:active:not(:disabled) { transform: scale(0.95); }
.control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; }
#toggleSpeakerButton.muted { background-color: #aaa; }
@media (max-width: 600px) { /* Responsive styles */
body { padding: 5px; justify-content: flex-start; } #control-panel { margin-bottom: 5px; padding: 12px; }
#chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
h1 { font-size: 1.1em; padding: 12px; } #chatbox { padding: 12px 8px; gap: 10px; }
#messages div { max-width: 90%; font-size: 0.95em; padding: 9px 14px;}
#input-area { padding: 8px; gap: 5px; } #userInput { padding: 9px 14px; min-height: 40px; }
.control-button { width: 40px; height: 40px; font-size: 1.2em; }
}
</style>
<script type="importmap">
{
"imports": {
"@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest"
}
}
</script>
</head>
<body>
<div id="control-panel">
<h2>Model Loader</h2>
<button id="loadModelButton">Load Gemma 3 1B Model (Q4)</button>
<div id="model-status" class="info">Click button to load Gemma 3 1B (using Transformers.js v3+ & Q4 dtype). **Warning:** Model loading is expected to fail due to library incompatibility.</div>
</div>
<div id="chat-container">
<h1 id="chatbot-name">AI Assistant</h1>
<div id="chatbox">
<div id="messages">
<!-- Chat messages appear here -->
</div>
</div>
<div id="input-area">
<textarea id="userInput" placeholder="Please attempt to load the model first..." rows="1" disabled></textarea>
<button id="speechButton" class="control-button" title="Speak message" disabled>π€</button>
<button id="toggleSpeakerButton" class="control-button" title="Toggle AI speech output" disabled>π</button>
<button id="sendButton" class="control-button" title="Send message" disabled>β€</button>
</div>
</div>
<script type="module">
import { pipeline, env } from '@xenova/transformers';
const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA';
const TASK = 'text-generation';
const QUANTIZATION = 'q4';
env.allowLocalModels = false;
env.useBrowserCache = true;
env.backends.onnx.executionProviders = ['webgpu', 'wasm'];
console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
env.backends.onnx.prefer_alternative_execution_providers = true;
// DOM Elements
const chatbox = document.getElementById('messages');
const userInput = document.getElementById('userInput');
const sendButton = document.getElementById('sendButton');
const chatbotNameElement = document.getElementById('chatbot-name');
const speechButton = document.getElementById('speechButton');
const toggleSpeakerButton = document.getElementById('toggleSpeakerButton');
const modelStatus = document.getElementById('model-status');
const loadModelButton = document.getElementById('loadModelButton');
// State
let generator = null;
let isLoadingModel = false;
let conversationHistory = []; // Will store messages in { role: '...', content: '...' } format
let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
const stateKey = 'gemma3_1b_doc_state_v1'; // New key
const historyKey = 'gemma3_1b_doc_history_v1';
// Speech API
let recognition = null;
let synthesis = window.speechSynthesis;
let targetVoice = null;
let isListening = false;
// --- Initialization ---
window.addEventListener('load', () => {
loadState(); // Load state, including potentially saved history
chatbotNameElement.textContent = botState.botName;
updateSpeakerButtonUI();
initializeSpeechAPI();
setupInputAutosize();
updateChatUIState(false); // Initial UI state: disabled
displayHistory(); // Display saved history after elements are ready
setTimeout(loadVoices, 500);
loadModelButton.addEventListener('click', handleLoadModelClick);
console.log("Attempting to use Transformers.js (latest version from CDN)");
displayMessage('system', `Using latest Transformers.js from CDN. Ready to attempt loading ${MODEL_NAME}.`, false);
});
// --- State Persistence ---
function loadState() {
const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
// Load history - it should already be in the correct [{role:'...', content:'...'}, ...] format if saved previously
const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); if (!Array.isArray(conversationHistory)) conversationHistory = []; } catch(e) { conversationHistory = []; } }
}
function saveState() {
localStorage.setItem(stateKey, JSON.stringify(botState));
// Ensure history is saved in the messages format
localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
}
function displayHistory() {
chatbox.innerHTML = '';
// Display history assuming it's in messages format
conversationHistory.forEach(msg => {
// Only display user and model messages visually
if (msg.role === 'user' || msg.role === 'model') {
displayMessage(msg.role === 'user' ? 'user' : 'bot', msg.content, false);
}
});
}
// --- UI Update Functions ---
function displayMessage(sender, text, animate = true, isError = false) {
const messageDiv = document.createElement('div');
let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message';
if (sender === 'system' && isError) messageClass = 'error-message';
messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none';
text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>');
messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
}
function updateModelStatus(message, type = 'info') {
modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
}
function updateChatUIState(isModelLoadedSuccessfully) {
userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please attempt to load the model first..."; }
}
function updateSpeakerButtonUI() { /* No change */
toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? 'π' : 'π'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
}
function showSpeechStatus(message) { console.log("Speech Status:", message); }
function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null); }); }
// --- Model & AI Logic ---
async function handleLoadModelClick() {
if (isLoadingModel || generator) return;
isLoadingModel = true; generator = null;
updateChatUIState(false);
await initializeModel(MODEL_NAME);
isLoadingModel = false;
updateChatUIState(generator !== null);
}
async function initializeModel(modelId) {
updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" } using latest library...`, 'loading');
displayMessage('system', `Attempting to load ${modelId} using latest library & Q4 dtype (as per docs)...`, false);
try {
// Use pipeline exactly as in the documentation example
generator = await pipeline(TASK, modelId, {
dtype: QUANTIZATION,
progress_callback: (progress) => {
const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
updateModelStatus(msg, 'loading');
}
});
updateModelStatus(`${modelId} loaded successfully! (Unexpected?)`, 'success');
displayMessage('system', `[SUCCESS] ${modelId} loaded. If this worked, the library/model compatibility might have changed.`, false);
} catch (error) {
console.error(`Model loading failed for ${modelId} (with latest library, Q4):`, error);
let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
// Provide specific feedback based on the likely errors
if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
errorMsg += " The 'gemma3_text' model type is likely still unsupported by this library version.";
} else if (error.message.includes("split is not a function")) {
errorMsg += " A TypeError occurred, possibly due to config parsing issues (incompatibility).";
} else {
errorMsg += " Check console for details. Memory limits or network issues could also be factors.";
}
updateModelStatus(errorMsg, 'error');
displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
generator = null;
}
}
// Builds the messages array for the pipeline
function buildMessages(newUserMessage) {
// Start with a system prompt if desired (optional but good practice)
let messages = [{ role: "system", content: "You are a helpful assistant." }];
// Add history, ensuring it alternates user/model roles correctly
// Note: conversationHistory already stores {role, content}
messages = messages.concat(conversationHistory);
// Add the new user message
messages.push({ role: "user", content: newUserMessage });
console.log("Input Messages:", messages);
return messages;
}
// Cleans the response from the generator output
function cleanupResponse(output) {
// The documentation example uses output[0].generated_text.at(-1).content
// Let's try that structure first, with checks.
try {
if (output && output.length > 0 && output[0].generated_text && Array.isArray(output[0].generated_text)) {
const lastMessage = output[0].generated_text.at(-1); // Get the last message object
if (lastMessage && lastMessage.role === 'assistant' && lastMessage.content) { // Check role and content
let cleaned = lastMessage.content.trim();
// Additional cleanup for potential artifacts
cleaned = cleaned.replace(/<end_of_turn>/g, '').trim();
cleaned = cleaned.replace(/<start_of_turn>/g, '').trim(); // Just in case
if (cleaned.length > 0) return cleaned;
}
}
} catch (e) {
console.error("Error parsing generator output structure:", e, "Output:", output);
}
// Fallback if the expected structure isn't found or parsing fails
console.warn("Could not extract response using standard messages structure. Using fallback.");
const fallbacks = [ "Sorry, I had trouble formatting my response.", "My response might be incomplete.", "Something went wrong displaying the answer." ];
return fallbacks[Math.floor(Math.random() * fallbacks.length)];
}
// --- Main Interaction Logic ---
async function handleUserMessage() {
const userText = userInput.value.trim();
if (!userText || !generator || isLoadingModel) return;
userInput.value = ''; userInput.style.height = 'auto';
updateChatUIState(true); // Disable send button during processing
// Add user message to UI *and* history in the correct format
displayMessage('user', userText);
conversationHistory.push({ role: 'user', content: userText });
updateModelStatus("AI thinking...", "loading");
// Prepare messages array for the model
const messages = buildMessages(userText); // No need to pass userText separately
try {
// Call generator with the messages array
const outputs = await generator(messages, {
max_new_tokens: 300,
temperature: 0.7,
// repetition_penalty: 1.1, // May not be supported by all models/tasks in message format
top_k: 50,
// top_p: 0.9, // May not be supported
do_sample: true, // Sample for more varied responses
// streamer: streamer // Streaming could be added later if needed
});
const replyText = cleanupResponse(outputs); // Process the output
console.log("Cleaned AI Output:", replyText);
// Add AI response to UI *and* history
displayMessage('bot', replyText);
conversationHistory.push({ role: 'assistant', content: replyText }); // Use 'assistant' or 'model' role
if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) {
speakText(replyText);
}
saveState(); // Save updated history
} catch (error) {
console.error("AI response generation error:", error);
displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
// Optionally remove the last user message from history if generation fails?
// conversationHistory.pop();
} finally {
if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success");
updateChatUIState(generator !== null);
userInput.focus();
}
}
// --- Speech API Functions ---
function initializeSpeechAPI() { /* No changes */
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
}
function loadVoices() { /* No changes */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
function findAndSetVoice(voices) { /* No changes */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
function speakText(text) { /* No changes */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }
// --- Event Listeners ---
sendButton.addEventListener('click', handleUserMessage);
userInput.addEventListener('keypress', (e) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); handleUserMessage(); } });
speechButton.addEventListener('click', () => { if (recognition && !isListening && generator && !isLoadingModel) { try { recognition.start(); } catch (error) { console.error("Rec start fail:", error); updateModelStatus(`Failed to start recognition`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 2000); isListening = false; updateChatUIState(generator !== null); } } });
</script>
</body>
</html> |