Spaces:
Running
Running
Update index.html
Browse files- index.html +90 -755
index.html
CHANGED
@@ -3,8 +3,9 @@
|
|
3 |
<head>
|
4 |
<meta charset="UTF-8">
|
5 |
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
6 |
-
<title>AI Chat -
|
7 |
-
<
|
|
|
8 |
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/styles/vs.min.css">
|
9 |
<style>
|
10 |
html, body { height: 100%; margin: 0; }
|
@@ -36,7 +37,7 @@
|
|
36 |
z-index: 1;
|
37 |
}
|
38 |
.win95window.fullscreen {
|
39 |
-
position:
|
40 |
left: 0; top: 0; right: 0; bottom: 0;
|
41 |
min-width: 100vw; min-height: 100vh; max-width: none; max-height: none;
|
42 |
width: 100vw; height: 100vh;
|
@@ -290,46 +291,46 @@
|
|
290 |
background-position: 6px 7px;
|
291 |
background-size: 16px 16px;
|
292 |
}
|
293 |
-
#contextSaveArea95 {
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
#contextSaveArea95 h4 { margin: 0 0 8px 0; color: #444; font-size: 1.08em; }
|
302 |
-
#contextSaveArea95 pre {
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
#contextSaveArea95 label { font-weight: bold; }
|
314 |
-
#contextSaveArea95 input[type="text"] {
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
#contextSaveArea95 button {
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
#contextSaveArea95 button:active { background: #c0c0c0; border: 2px inset #808080; }
|
334 |
#contextSaveArea95 button:disabled { background: #e0e0e0; color: #bbb; border: 2px outset #b0b0b0; }
|
335 |
.model-pair-selection {
|
@@ -358,18 +359,18 @@
|
|
358 |
max-width: 180px;
|
359 |
}
|
360 |
@media (max-width: 650px) {
|
361 |
-
.win95window {
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
.win95content { padding: 0; }
|
367 |
-
.settings95 {
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
.system-preset-area {
|
374 |
flex-direction: column;
|
375 |
align-items: flex-start;
|
@@ -382,18 +383,18 @@
|
|
382 |
gap: 4px;
|
383 |
padding: 6px 4px;
|
384 |
}
|
385 |
-
.input-area95 {
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
}
|
390 |
-
.chat-container95 {
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
.dropzone95 {
|
395 |
-
|
396 |
-
|
397 |
}
|
398 |
}
|
399 |
</style>
|
@@ -405,7 +406,7 @@
|
|
405 |
<span style="display:inline-block;width:18px;height:18px;background:#fff;border:1px solid #808080;margin-right:7px;box-shadow:inset 2px 2px #c0c0c0;">
|
406 |
<span style="display:inline-block;width:9px;height:9px;background:#008080;margin:4px 0 0 4px;vertical-align:middle;"></span>
|
407 |
</span>
|
408 |
-
AI Chat -
|
409 |
</div>
|
410 |
<div class="win95controls">
|
411 |
<button id="maximizeBtn" title="เต็มจอ">□</button>
|
@@ -415,40 +416,20 @@
|
|
415 |
</div>
|
416 |
<div class="win95content">
|
417 |
<form class="settings95" id="settingsForm95" autocomplete="off" onsubmit="return false;">
|
418 |
-
<label for="
|
419 |
-
<
|
420 |
-
<option value="anthropic">Anthropic (Claude)</option>
|
421 |
-
<option value="xai">xAI (Grok-3)</option>
|
422 |
-
<option value="groq">Groq</option>
|
423 |
-
<option value="openai">OpenAI</option>
|
424 |
-
<option value="ollama">Ollama (Local)</option>
|
425 |
-
<option value="huggingface">HuggingFace</option>
|
426 |
-
<option value="google">Google (Gemini)</option>
|
427 |
-
</select>
|
428 |
-
<label for="modelSelect95">โมเดล:</label>
|
429 |
-
<select id="modelSelect95"></select>
|
430 |
-
<label for="apiKey95">API Key:</label>
|
431 |
-
<input type="text" id="apiKey95" placeholder="กรอก API Key ของคุณ">
|
432 |
<button id="confirmApiKeyBtn95" type="button">บันทึก</button>
|
433 |
<span class="api-status" id="apiKeyStatus95"></span>
|
434 |
</form>
|
435 |
-
|
436 |
-
<div class="model-pair-selection" id="multiModalSelection"
|
437 |
-
<label for="
|
438 |
-
<select id="
|
|
|
439 |
<option value="Qwen/Qwen2.5-Coder-32B-Instruct">Qwen2.5-Coder 32B</option>
|
440 |
-
<option value="Qwen/Qwen2-VL-72B-Instruct">Qwen2-VL 72B</option>
|
441 |
-
<option value="microsoft/Florence-2-large">Florence-2 Large</option>
|
442 |
-
<option value="llava-hf/llava-1.5-7b-hf">LLaVA 1.5 7B (HF)</option>
|
443 |
-
</select>
|
444 |
-
<label for="ocrModelSelect95">OCR Model:</label>
|
445 |
-
<select id="ocrModelSelect95">
|
446 |
-
<option value="scb10x/typhoon-v1.5x-72b-instruct">Typhoon OCR 7B</option>
|
447 |
-
<option value="microsoft/trocr-base-printed">TrOCR Base</option>
|
448 |
-
<option value="PaddlePaddle/PaddleOCR">PaddleOCR</option>
|
449 |
</select>
|
450 |
</div>
|
451 |
-
|
452 |
<div class="system-preset-area">
|
453 |
<label for="systemPresetSelect95">ระบบ:</label>
|
454 |
<select id="systemPresetSelect95">
|
@@ -460,7 +441,7 @@
|
|
460 |
</select>
|
461 |
<input type="text" id="customSystemPrompt95" placeholder="ใส่ System Prompt ของคุณ" style="display:none; flex: 1; min-width: 200px;">
|
462 |
</div>
|
463 |
-
|
464 |
<div id="contextSaveArea95">
|
465 |
<h4>Context ปัจจุบัน</h4>
|
466 |
<div id="warningMessage95" style="color:orange;font-weight:bold;"></div>
|
@@ -471,7 +452,7 @@
|
|
471 |
<button id="confirmSaveBtn95">ยืนยัน context และเริ่มแชทใหม่</button>
|
472 |
</div>
|
473 |
<div class="chat-container95" id="messagesDiv95">
|
474 |
-
<div class="message95 ai">สวัสดี! AI Chat
|
475 |
</div>
|
476 |
<form id="chatForm95" class="input-area95" autocomplete="off">
|
477 |
<button type="button" class="file-attach-btn" id="fileAttachBtn" title="แนบไฟล์">
|
@@ -491,6 +472,7 @@
|
|
491 |
const win95window = document.getElementById('win95window');
|
492 |
const maximizeBtn = document.getElementById('maximizeBtn');
|
493 |
let isFullscreen = false;
|
|
|
494 |
maximizeBtn.onclick = function() {
|
495 |
isFullscreen = !isFullscreen;
|
496 |
win95window.classList.toggle('fullscreen', isFullscreen);
|
@@ -498,122 +480,7 @@ maximizeBtn.onclick = function() {
|
|
498 |
maximizeBtn.title = isFullscreen ? "คืนหน้าต่าง" : "เต็มจอ";
|
499 |
};
|
500 |
|
501 |
-
// ---
|
502 |
-
const PROVIDERS = {
|
503 |
-
anthropic: {
|
504 |
-
name: "Anthropic (Claude)",
|
505 |
-
endpoint: "https://api.anthropic.com/v1/messages",
|
506 |
-
apiKeyLabel: "Anthropic API Key",
|
507 |
-
models: [
|
508 |
-
{ value: "claude-3-5-sonnet-20241022", label: "Claude-3.5 Sonnet" },
|
509 |
-
{ value: "claude-3-haiku-20240307", label: "Claude-3 Haiku" },
|
510 |
-
{ value: "claude-3-opus-20240229", label: "Claude-3 Opus" },
|
511 |
-
{ value: "claude-3-sonnet-20240229", label: "Claude-3 Sonnet" }
|
512 |
-
],
|
513 |
-
isStream: false,
|
514 |
-
streamEndpoint: "https://api.anthropic.com/v1/messages",
|
515 |
-
supportsMultiModal: true
|
516 |
-
},
|
517 |
-
xai: {
|
518 |
-
name: "xAI (Grok)",
|
519 |
-
endpoint: "https://api.x.ai/v1/chat/completions",
|
520 |
-
apiKeyLabel: "xAI API Key",
|
521 |
-
models: [
|
522 |
-
{ value: "grok-3-beta", label: "Grok-3 Beta" },
|
523 |
-
{ value: "grok-3-fast-beta", label: "Grok-3 Fast Beta" },
|
524 |
-
{ value: "grok-3-mini-beta", label: "Grok-3 Mini Beta" },
|
525 |
-
{ value: "grok-3-mini-fast-beta", label: "Grok-3 Mini Fast Beta" },
|
526 |
-
{ value: "grok-2-1212", label: "Grok-2" },
|
527 |
-
{ value: "grok-2-mini", label: "Grok-2 Mini" }
|
528 |
-
],
|
529 |
-
isStream: true,
|
530 |
-
supportsMultiModal: true
|
531 |
-
},
|
532 |
-
groq: {
|
533 |
-
name: "Groq",
|
534 |
-
endpoint: "https://api.groq.com/openai/v1/chat/completions",
|
535 |
-
apiKeyLabel: "Groq API Key",
|
536 |
-
models: [
|
537 |
-
{ value: "llama-3.3-70b-versatile", label: "Llama-3.3 70B" },
|
538 |
-
{ value: "llama3-70b-8192", label: "Llama-3 70B 8K" },
|
539 |
-
{ value: "llama3-8b-8192", label: "Llama-3 8B 8K" },
|
540 |
-
{ value: "mixtral-8x7b-32768", label: "Mixtral 8x7B 32K" },
|
541 |
-
{ value: "gemma2-9b-it", label: "Gemma-2 9B IT" },
|
542 |
-
{ value: "gemma-7b-it", label: "Gemma 7B IT" }
|
543 |
-
],
|
544 |
-
isStream: true,
|
545 |
-
supportsMultiModal: false
|
546 |
-
},
|
547 |
-
openai: {
|
548 |
-
name: "OpenAI",
|
549 |
-
endpoint: "https://api.openai.com/v1/chat/completions",
|
550 |
-
apiKeyLabel: "OpenAI API Key",
|
551 |
-
models: [
|
552 |
-
{ value: "gpt-4o", label: "GPT-4o" },
|
553 |
-
{ value: "gpt-4o-mini", label: "GPT-4o Mini" },
|
554 |
-
{ value: "gpt-4-turbo", label: "GPT-4 Turbo" },
|
555 |
-
{ value: "gpt-3.5-turbo", label: "GPT-3.5 Turbo" },
|
556 |
-
{ value: "o1-preview", label: "o1-preview" },
|
557 |
-
{ value: "o1-mini", label: "o1-mini" }
|
558 |
-
],
|
559 |
-
isStream: true,
|
560 |
-
supportsMultiModal: true
|
561 |
-
},
|
562 |
-
ollama: {
|
563 |
-
name: "Ollama (Local)",
|
564 |
-
endpoint: "http://localhost:11434/api/chat",
|
565 |
-
apiKeyLabel: "ไม่ต้องใส่ API Key",
|
566 |
-
models: [
|
567 |
-
{ value: "llama3.2-vision", label: "Llama 3.2 Vision" },
|
568 |
-
{ value: "llama3.2", label: "Llama 3.2" },
|
569 |
-
{ value: "llama3.1", label: "Llama 3.1" },
|
570 |
-
{ value: "codellama", label: "Code Llama" },
|
571 |
-
{ value: "mistral", label: "Mistral" },
|
572 |
-
{ value: "gemma2", label: "Gemma 2" }
|
573 |
-
],
|
574 |
-
isStream: true,
|
575 |
-
supportsMultiModal: true
|
576 |
-
},
|
577 |
-
huggingface: {
|
578 |
-
name: "HuggingFace",
|
579 |
-
endpoint: "https://api-inference.huggingface.co/models/", // Add trailing slash
|
580 |
-
apiKeyLabel: "HuggingFace API Key",
|
581 |
-
models: [
|
582 |
-
{ value: "Qwen/Qwen2.5-Coder-32B-Instruct", label: "Qwen2.5-Coder 32B (Text/Code)" },
|
583 |
-
{ value: "Qwen/Qwen2-VL-72B-Instruct", label: "Qwen2-VL 72B (Vision)" },
|
584 |
-
{ value: "scb10x/typhoon-v1.5x-72b-instruct", label: "Typhoon 72B (Text/Code)" },
|
585 |
-
{ value: "microsoft/Florence-2-large", label: "Florence-2 Large (Vision)" },
|
586 |
-
{ value: "llava-hf/llava-1.5-7b-hf", label: "LLaVA 1.5 7B (Vision)" },
|
587 |
-
{ value: "microsoft/trocr-base-printed", label: "TrOCR Base (OCR)" },
|
588 |
-
{ value: "google/t5-base-qa", label: "T5 Base QA (Text)" } // Example, more models can be added
|
589 |
-
],
|
590 |
-
isStream: false, // HuggingFace inference API usually doesn't stream by default
|
591 |
-
supportsMultiModal: true,
|
592 |
-
specificModelEndpoints: { // For multi-modal HF models, we might need specific endpoints
|
593 |
-
"Qwen/Qwen2-VL-72B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2-VL-72B-Instruct",
|
594 |
-
"microsoft/Florence-2-large": "https://api-inference.huggingface.co/models/microsoft/Florence-2-large",
|
595 |
-
"llava-hf/llava-1.5-7b-hf": "https://api-inference.huggingface.co/models/llava-hf/llava-1.5-7b-hf",
|
596 |
-
"microsoft/trocr-base-printed": "https://api-inference.huggingface.co/models/microsoft/trocr-base-printed",
|
597 |
-
"PaddlePaddle/PaddleOCR": "https://api-inference.huggingface.co/models/PaddlePaddle/PaddleOCR", // Placeholder, usually requires dedicated setup or API
|
598 |
-
}
|
599 |
-
},
|
600 |
-
google: {
|
601 |
-
name: "Google (Gemini)",
|
602 |
-
endpoint: "https://generativelanguage.googleapis.com/v1beta/models/",
|
603 |
-
apiKeyLabel: "Google API Key",
|
604 |
-
models: [
|
605 |
-
{ value: "gemini-pro", label: "Gemini Pro" },
|
606 |
-
{ value: "gemini-pro-vision", label: "Gemini Pro Vision" },
|
607 |
-
{ value: "gemini-1.5-pro-latest", label: "Gemini 1.5 Pro" },
|
608 |
-
{ value: "gemini-1.5-flash-latest", label: "Gemini 1.5 Flash" }
|
609 |
-
],
|
610 |
-
isStream: true,
|
611 |
-
supportsMultiModal: true
|
612 |
-
}
|
613 |
-
};
|
614 |
-
|
615 |
-
const providerSelect = document.getElementById('providerSelect95');
|
616 |
-
const modelSelect = document.getElementById('modelSelect95');
|
617 |
const apiKeyInput = document.getElementById('apiKey95');
|
618 |
const confirmApiKeyBtn = document.getElementById('confirmApiKeyBtn95');
|
619 |
const apiKeyStatus = document.getElementById('apiKeyStatus95');
|
@@ -630,9 +497,7 @@ const confirmSaveBtn = document.getElementById('confirmSaveBtn95');
|
|
630 |
const fileInput = document.getElementById('fileInput');
|
631 |
const fileNameSpan = document.getElementById('fileName');
|
632 |
const dropzone = document.getElementById('dropzone95');
|
633 |
-
const
|
634 |
-
const visionModelSelect = document.getElementById('visionModelSelect95');
|
635 |
-
const ocrModelSelect = document.getElementById('ocrModelSelect95');
|
636 |
|
637 |
let currentApiKey = '';
|
638 |
let conversationHistory = [];
|
@@ -641,13 +506,13 @@ let lastDetectedCode = '';
|
|
641 |
let savedGoal = '';
|
642 |
|
643 |
// --- Save/Load API Key from Local Storage ---
|
644 |
-
function saveApiKey(
|
645 |
-
localStorage.setItem(
|
646 |
updateApiKeyStatus(true);
|
647 |
}
|
648 |
|
649 |
-
function loadApiKey(
|
650 |
-
return localStorage.getItem(
|
651 |
}
|
652 |
|
653 |
function updateApiKeyStatus(isSuccess) {
|
@@ -662,75 +527,13 @@ function updateApiKeyStatus(isSuccess) {
|
|
662 |
}
|
663 |
|
664 |
confirmApiKeyBtn.addEventListener('click', () => {
|
665 |
-
const provider = providerSelect.value;
|
666 |
currentApiKey = apiKeyInput.value.trim();
|
667 |
-
saveApiKey(
|
668 |
});
|
669 |
|
670 |
-
//
|
671 |
-
|
672 |
-
|
673 |
-
const providerInfo = PROVIDERS[selectedProvider];
|
674 |
-
modelSelect.innerHTML = ''; // Clear existing options
|
675 |
-
|
676 |
-
if (providerInfo && providerInfo.models) {
|
677 |
-
providerInfo.models.forEach(model => {
|
678 |
-
const option = document.createElement('option');
|
679 |
-
option.value = model.value;
|
680 |
-
option.textContent = model.label;
|
681 |
-
modelSelect.appendChild(option);
|
682 |
-
});
|
683 |
-
}
|
684 |
-
|
685 |
-
// Update API Key input based on selected provider
|
686 |
-
apiKeyInput.value = loadApiKey(selectedProvider);
|
687 |
-
currentApiKey = apiKeyInput.value.trim();
|
688 |
-
apiKeyInput.placeholder = PROVIDERS[selectedProvider].apiKeyLabel;
|
689 |
-
|
690 |
-
// Show/hide multi-modal model selection
|
691 |
-
if (providerInfo.supportsMultiModal) {
|
692 |
-
multiModalSelection.style.display = 'flex';
|
693 |
-
// Populate vision and OCR models specific to HuggingFace if selected
|
694 |
-
if (selectedProvider === 'huggingface') {
|
695 |
-
visionModelSelect.innerHTML = '';
|
696 |
-
ocrModelSelect.innerHTML = '';
|
697 |
-
PROVIDERS.huggingface.models.forEach(model => {
|
698 |
-
if (model.label.includes('(Vision)')) {
|
699 |
-
const option = document.createElement('option');
|
700 |
-
option.value = model.value;
|
701 |
-
option.textContent = model.label;
|
702 |
-
visionModelSelect.appendChild(option);
|
703 |
-
} else if (model.label.includes('(OCR)')) {
|
704 |
-
const option = document.createElement('option');
|
705 |
-
option.value = model.value;
|
706 |
-
option.textContent = model.label;
|
707 |
-
ocrModelSelect.appendChild(option);
|
708 |
-
}
|
709 |
-
});
|
710 |
-
// Add a "None" option for OCR if no OCR model is needed for a specific task
|
711 |
-
const noneOcrOption = document.createElement('option');
|
712 |
-
noneOcrOption.value = 'none';
|
713 |
-
noneOcrOption.textContent = 'None (ไม่ใช้ OCR)';
|
714 |
-
ocrModelSelect.appendChild(noneOcrOption);
|
715 |
-
} else {
|
716 |
-
// For other providers, ensure they have default options or are handled
|
717 |
-
// For simplicity, we can default to specific models or clear them if not relevant
|
718 |
-
// This part might need further refinement based on actual multi-modal APIs
|
719 |
-
visionModelSelect.innerHTML = `
|
720 |
-
<option value="default_vision">Default Vision Model</option>
|
721 |
-
`;
|
722 |
-
ocrModelSelect.innerHTML = `
|
723 |
-
<option value="default_ocr">Default OCR Model</option>
|
724 |
-
<option value="none">None (ไม่ใช้ OCR)</option>
|
725 |
-
`;
|
726 |
-
}
|
727 |
-
} else {
|
728 |
-
multiModalSelection.style.display = 'none';
|
729 |
-
}
|
730 |
-
}
|
731 |
-
|
732 |
-
providerSelect.addEventListener('change', populateModels);
|
733 |
-
document.addEventListener('DOMContentLoaded', populateModels);
|
734 |
|
735 |
// --- System Preset and Custom Prompt ---
|
736 |
systemPresetSelect.addEventListener('change', (event) => {
|
@@ -745,7 +548,7 @@ systemPresetSelect.addEventListener('change', (event) => {
|
|
745 |
function addMessage(text, sender, imageUrl = null, codeLanguage = null) {
|
746 |
const messageDiv = document.createElement('div');
|
747 |
messageDiv.classList.add('message95', sender);
|
748 |
-
|
749 |
if (imageUrl) {
|
750 |
const img = document.createElement('img');
|
751 |
img.src = imageUrl;
|
@@ -767,7 +570,7 @@ function addMessage(text, sender, imageUrl = null, codeLanguage = null) {
|
|
767 |
|
768 |
const language = match[1] || 'plaintext';
|
769 |
const code = match[2];
|
770 |
-
|
771 |
// Store the last detected code if it's the latest message and from AI
|
772 |
if (sender === 'ai') {
|
773 |
lastDetectedCode = code;
|
@@ -921,9 +724,7 @@ chatForm.addEventListener('submit', async (e) => {
|
|
921 |
return;
|
922 |
}
|
923 |
|
924 |
-
const selectedProvider = providerSelect.value;
|
925 |
const selectedModel = modelSelect.value;
|
926 |
-
const providerInfo = PROVIDERS[selectedProvider];
|
927 |
const apiKey = currentApiKey;
|
928 |
const systemPreset = systemPresetSelect.value;
|
929 |
let systemPrompt = '';
|
@@ -932,471 +733,5 @@ chatForm.addEventListener('submit', async (e) => {
|
|
932 |
systemPrompt = customSystemPromptInput.value.trim();
|
933 |
} else if (systemPreset === 'code-full') {
|
934 |
if (lastDetectedCode) {
|
935 |
-
systemPrompt =
|
936 |
-
|
937 |
-
systemPrompt = `คุณคือผู้ช่วยพัฒนาเว็บที่มีความสามารถสูงในการเขียนโค้ดและสร้าง artifact ได้อย่างสมบูรณ์แบบตามคำสั่งของผู้ใช้ คุณจะทำงานร่วมกับผู้ใช้เพื่อแก้ไขและปรับปรุง artifact ที่มีอยู่ โค้ดทั้งหมดจะต้องถูกเขียนใน code block เดียว เพื่อเป็นไฟล์โค้ดที่สมบูรณ์และพร้อมใช้งาน โดยไม่มีการแยกโค้ด HTML และ JavaScript ในการตอบกลับของคุณ ให้เอาต์พุตเฉพาะโค้ด HTML โดยไม่มีข้อความอธิบายใดๆ เพิ่มเติม เมื่อใดก็ตามที่ได้รับคำสั่ง คุณจะตรวจสอบการรันโค้ดอีกครั้งเพื่อให้แน่ใจว่าไม่มีข้อผิดพลาดในการเอาต์พุต`;
|
938 |
-
}
|
939 |
-
} else if (systemPreset === 'code-function') {
|
940 |
-
if (lastDetectedCode) {
|
941 |
-
systemPrompt = `คุณคือผู้ช่วยพัฒนาเว็บที่มีความสามารถสูงในการเขียนโค้ดและสร้าง artifact ได้อย่างสมบูรณ์แบบตามคำสั่งของผู้ใช้ คุณจะทำงานร่วมกับผู้ใช้เพื่อแก้ไขและปรับปรุง artifact ที่มีอยู่ สำหรับการตอบกลับที่เกี่ยวข้องกับโค้ด ให้คุณตอบกลับเฉพาะโค้ดที่เกี่ยวข้องกับฟังก์ชันหรือส่วนที่แก้ไขเท่านั้น หากคุณให้ไฟล์โค้ดที่สมบูรณ์ คุณจะต้องตอบกลับเฉพาะโค้ด HTML เท่านั้น โดยไม่มีข้อความอธิบายใดๆ เพิ่มเติม เมื่อใดก็ตามที่ได้รับคำสั่ง คุณจะตรวจสอบการรันโค้ดอีกครั้งเพื่อให้แน่ใจว่าไม่มีข้อผิดพลาดในการเอาต์พุต ผู้ใช้ได้ให้โค้ด HTML/CSS/JS ล่าสุดแก่คุณแล้ว และต้องการให้คุณช่วยเหลือในการพัฒนาต่อ. โค้ดปัจจุบันคือ: \n\`\`\`html\n${lastDetectedCode}\n\`\`\`\nเป้าหมายของโค้ดนี้คือ: ${savedGoal || 'ไม่ระบุ'}.`;
|
942 |
-
} else {
|
943 |
-
systemPrompt = `คุณคือผู้ช่วยพัฒนาเว็บที่มีความสามารถสูง��นการเขียนโค้ดและสร้าง artifact ได้อย่างสมบูรณ์แบบตามคำสั่งของผู้ใช้ คุณจะทำงานร่วมกับผู้ใช้เพื่อแก้ไขและปรับปรุง artifact ที่มีอยู่ สำหรับการตอบกลับที่เกี่ยวข้องกับโค้ด ให้คุณตอบกลับเฉพาะโค้ดที่เกี่ยวข้องกับฟังก์ชันหรือส่วนที่แก้ไขเท่านั้น หากคุณให้ไฟล์โค้ดที่สมบูรณ์ คุณจะต้องตอบกลับเฉพาะโค้ด HTML เท่านั้น โดยไม่มีข้อความอธิบายใดๆ เพิ่มเติม เมื่อใดก็ตามที่ได้รับคำสั่ง คุณจะตรวจสอบการรันโค้ดอีกครั้งเพื่อให้แน่ใจว่าไม่มีข้อผิดพลาดในการเอาต์พุต`;
|
944 |
-
}
|
945 |
-
} else if (systemPreset === 'multimodal') {
|
946 |
-
systemPrompt = `คุณคือผู้ช่วย Multi-Modal AI ที่เชี่ยวชาญในการวิเคราะห์รูปภาพ, ข้อความ, และเอกสาร. คุณสามารถตอบคำถามเกี่ยวกับเนื้อหาในไฟล์ที่แนบมา, สรุปข้อมูล, หรือดึงข้อความจากรูปภาพโดยใช้ OCR. เมื่อมีการร้องขอวิเคราะห์รูปภาพหรือ OCR ให้ใช้ความสามารถเหล่านั้นเพื่อตอบคำถาม. หากมีข้อมูลจากไฟล์แนบหลายประเภท ให้พยายามผสานรวมข้อมูลเหล่านั้นเพื่อการตอบสนองที่ครอบคลุม.`;
|
947 |
-
} else { // general
|
948 |
-
systemPrompt = `คุณคือ AI ผู้ช่วยที่สุภาพและให้ข้อมูลที่เป็นประโยชน์`;
|
949 |
-
}
|
950 |
-
|
951 |
-
// Clear previous error
|
952 |
-
errorMessageDiv.style.display = 'none';
|
953 |
-
|
954 |
-
if (!apiKey && selectedProvider !== 'ollama') {
|
955 |
-
displayError("กรุณาใส่ API Key ของคุณ");
|
956 |
-
return;
|
957 |
-
}
|
958 |
-
|
959 |
-
let userContent = [];
|
960 |
-
if (userMessage) {
|
961 |
-
userContent.push({ type: 'text', text: userMessage });
|
962 |
-
}
|
963 |
-
|
964 |
-
for (const fileObj of attachedFiles) {
|
965 |
-
const file = fileObj.file;
|
966 |
-
if (fileObj.type === 'image') {
|
967 |
-
try {
|
968 |
-
const base64 = await readFileAsBase64(file);
|
969 |
-
userContent.push({ type: 'image_url', image_url: { url: `data:${file.type};base64,${base64}` } });
|
970 |
-
addMessage(`แนบรูปภาพ: ${file.name}`, 'user', `data:${file.type};base64,${base64}`);
|
971 |
-
} catch (error) {
|
972 |
-
displayError(`ไม่สามารถอ่านไฟล์รูปภาพได้: ${error.message}`);
|
973 |
-
return;
|
974 |
-
}
|
975 |
-
} else if (fileObj.type === 'text') {
|
976 |
-
try {
|
977 |
-
const textContent = await readFileAsText(file);
|
978 |
-
userContent.push({ type: 'text', text: `เนื้อหาจากไฟล์ ${file.name}:\n\`\`\`\n${textContent}\n\`\`\`` });
|
979 |
-
addMessage(`แนบไฟล์ข้อความ: ${file.name}`, 'user');
|
980 |
-
} catch (error) {
|
981 |
-
displayError(`ไม่สามารถอ่านไฟล์ข้อความได้: ${error.message}`);
|
982 |
-
return;
|
983 |
-
}
|
984 |
-
} else if (fileObj.type === 'pdf' || fileObj.type === 'docx' || fileObj.type === 'xlsx') {
|
985 |
-
// For PDF, DOCX, XLSX, we need to inform the model that we have these files
|
986 |
-
// Actual content extraction would require a backend or a dedicated library
|
987 |
-
userContent.push({ type: 'text', text: `(มีไฟล์ ${fileObj.type.toUpperCase()} แนบมา: ${file.name}. โปรดพิจารณาข้อมูลจากไฟล์นี้ในการตอบกลับ หากต้องการวิเคราะห์เนื้อหา ให้แจ้งฉันเพื่อประมวลผล)` });
|
988 |
-
addMessage(`แนบไฟล์ ${fileObj.type.toUpperCase()}: ${file.name}`, 'user');
|
989 |
-
}
|
990 |
-
}
|
991 |
-
|
992 |
-
if (userContent.length === 0) {
|
993 |
-
displayError("กรุณาพิมพ์ข้อความหรือแนบไฟล์");
|
994 |
-
return;
|
995 |
-
}
|
996 |
-
|
997 |
-
addMessage(userMessage, 'user');
|
998 |
-
userInput.value = '';
|
999 |
-
fileNameSpan.textContent = '';
|
1000 |
-
attachedFiles = []; // Clear attached files after sending
|
1001 |
-
|
1002 |
-
// Prepare message for API
|
1003 |
-
let messages = [];
|
1004 |
-
if (systemPrompt) {
|
1005 |
-
messages.push({ role: 'system', content: systemPrompt });
|
1006 |
-
}
|
1007 |
-
|
1008 |
-
// Add previous conversation history
|
1009 |
-
messages = messages.concat(conversationHistory);
|
1010 |
-
|
1011 |
-
// Add current user message
|
1012 |
-
if (selectedProvider === 'anthropic' || selectedProvider === 'openai' || selectedProvider === 'xai' || selectedProvider === 'google') {
|
1013 |
-
messages.push({ role: 'user', content: userContent });
|
1014 |
-
} else if (selectedProvider === 'ollama') {
|
1015 |
-
// Ollama's chat endpoint expects content as string for text models
|
1016 |
-
// For vision models, it can handle image_data
|
1017 |
-
let contentForOllama = [];
|
1018 |
-
for (const part of userContent) {
|
1019 |
-
if (part.type === 'text') {
|
1020 |
-
contentForOllama.push(part.text);
|
1021 |
-
} else if (part.type === 'image_url') {
|
1022 |
-
const base64 = part.image_url.url.split(',')[1];
|
1023 |
-
contentForOllama.push({ type: 'image', image_data: base64 });
|
1024 |
-
}
|
1025 |
-
}
|
1026 |
-
messages.push({ role: 'user', content: contentForOllama.join('\n') }); // Concatenate text parts
|
1027 |
-
} else if (selectedProvider === 'huggingface') {
|
1028 |
-
// HuggingFace has various model types and endpoints.
|
1029 |
-
// This is a simplified approach. For actual multi-modal, we'd need separate calls.
|
1030 |
-
// For general text models, it expects a single string input.
|
1031 |
-
// For vision models, it expects image bytes.
|
1032 |
-
// We will handle multi-modal specifically below.
|
1033 |
-
messages.push({ role: 'user', content: userContent.filter(p => p.type === 'text').map(p => p.text).join('\n') });
|
1034 |
-
}
|
1035 |
-
|
1036 |
-
let requestBody = {};
|
1037 |
-
let url = providerInfo.endpoint;
|
1038 |
-
let headers = {
|
1039 |
-
'Content-Type': 'application/json',
|
1040 |
-
};
|
1041 |
-
|
1042 |
-
if (selectedProvider !== 'ollama') { // Ollama doesn't need API key in header
|
1043 |
-
headers['Authorization'] = `Bearer ${apiKey}`;
|
1044 |
-
}
|
1045 |
-
|
1046 |
-
// Logic for Multi-Modal HuggingFace calls
|
1047 |
-
if (selectedProvider === 'huggingface' && (attachedFiles.some(f => f.type === 'image') || systemPreset === 'multimodal')) {
|
1048 |
-
const visionModel = visionModelSelect.value;
|
1049 |
-
const ocrModel = ocrModelSelect.value;
|
1050 |
-
let imageFile = attachedFiles.find(f => f.type === 'image')?.file;
|
1051 |
-
|
1052 |
-
if (!imageFile && systemPreset === 'multimodal') {
|
1053 |
-
displayError("โปรดแนบรูปภาพสำหรับการวิเคราะห์ Multi-Modal.");
|
1054 |
-
return;
|
1055 |
-
}
|
1056 |
-
|
1057 |
-
let imageBase64 = null;
|
1058 |
-
if (imageFile) {
|
1059 |
-
try {
|
1060 |
-
imageBase64 = await readFileAsBase64(imageFile);
|
1061 |
-
} catch (error) {
|
1062 |
-
displayError(`ไม่สามารถอ่านไฟล์รูปภาพได้: ${error.message}`);
|
1063 |
-
return;
|
1064 |
-
}
|
1065 |
-
}
|
1066 |
-
|
1067 |
-
let visionAnalysis = '';
|
1068 |
-
let ocrText = '';
|
1069 |
-
|
1070 |
-
// Step 1: Call Vision Model (if applicable)
|
1071 |
-
if (visionModel && visionModel !== 'none' && imageBase64) {
|
1072 |
-
const visionEndpoint = PROVIDERS.huggingface.specificModelEndpoints[visionModel] || `${providerInfo.endpoint}${visionModel}`;
|
1073 |
-
try {
|
1074 |
-
const visionResponse = await fetch(visionEndpoint, {
|
1075 |
-
method: 'POST',
|
1076 |
-
headers: {
|
1077 |
-
'Authorization': `Bearer ${apiKey}`,
|
1078 |
-
'Content-Type': 'application/json'
|
1079 |
-
},
|
1080 |
-
body: JSON.stringify({
|
1081 |
-
inputs: `data:image/jpeg;base64,${imageBase64}`,
|
1082 |
-
parameters: { return_full_text: false } // Adjust parameters as needed by model
|
1083 |
-
})
|
1084 |
-
});
|
1085 |
-
if (!visionResponse.ok) {
|
1086 |
-
const errorData = await visionResponse.json();
|
1087 |
-
throw new Error(`Vision API error: ${visionResponse.statusText} - ${JSON.stringify(errorData)}`);
|
1088 |
-
}
|
1089 |
-
const visionData = await visionResponse.json();
|
1090 |
-
// Parse visionData based on model output format (this is generic)
|
1091 |
-
visionAnalysis = `\n[ข้อมูลจาก Vision Model (${visionModel})]:\n${JSON.stringify(visionData, null, 2)}\n`;
|
1092 |
-
} catch (error) {
|
1093 |
-
displayError(`ข้อผิดพลาดในการวิเคราะห์รูปภาพด้วย ${visionModel}: ${error.message}`);
|
1094 |
-
return;
|
1095 |
-
}
|
1096 |
-
}
|
1097 |
-
|
1098 |
-
// Step 2: Call OCR Model (if applicable)
|
1099 |
-
if (ocrModel && ocrModel !== 'none' && imageBase64) {
|
1100 |
-
const ocrEndpoint = PROVIDERS.huggingface.specificModelEndpoints[ocrModel] || `${providerInfo.endpoint}${ocrModel}`;
|
1101 |
-
try {
|
1102 |
-
const ocrResponse = await fetch(ocrEndpoint, {
|
1103 |
-
method: 'POST',
|
1104 |
-
headers: {
|
1105 |
-
'Authorization': `Bearer ${apiKey}`,
|
1106 |
-
'Content-Type': 'application/json'
|
1107 |
-
},
|
1108 |
-
body: JSON.stringify({
|
1109 |
-
inputs: `data:image/jpeg;base64,${imageBase64}`
|
1110 |
-
})
|
1111 |
-
});
|
1112 |
-
if (!ocrResponse.ok) {
|
1113 |
-
const errorData = await ocrResponse.json();
|
1114 |
-
throw new Error(`OCR API error: ${ocrResponse.statusText} - ${JSON.stringify(errorData)}`);
|
1115 |
-
}
|
1116 |
-
const ocrData = await ocrResponse.json();
|
1117 |
-
// Parse OCR data based on model output format (this is generic)
|
1118 |
-
ocrText = `\n[ข้อมูลจาก OCR Model (${ocrModel})]:\n${ocrData.extracted_text || JSON.stringify(ocrData, null, 2)}\n`;
|
1119 |
-
} catch (error) {
|
1120 |
-
displayError(`ข้อผิดพลาดในการทำ OCR ด้วย ${ocrModel}: ${error.message}`);
|
1121 |
-
return;
|
1122 |
-
}
|
1123 |
-
}
|
1124 |
-
|
1125 |
-
// Combine vision/OCR results with user's text message
|
1126 |
-
let combinedPrompt = userMessage;
|
1127 |
-
if (visionAnalysis) combinedPrompt += visionAnalysis;
|
1128 |
-
if (ocrText) combinedPrompt += ocrText;
|
1129 |
-
|
1130 |
-
// Now send the combined prompt to a text-based model if needed, or just display results
|
1131 |
-
// For HuggingFace, if a primary text model is chosen, we'd send to it.
|
1132 |
-
// Assuming the main model select should be a text model for combined analysis
|
1133 |
-
url = `${providerInfo.endpoint}${selectedModel}`; // Use the primary selected model for text processing
|
1134 |
-
requestBody = {
|
1135 |
-
inputs: combinedPrompt,
|
1136 |
-
parameters: {
|
1137 |
-
max_new_tokens: 1000,
|
1138 |
-
temperature: 0.7,
|
1139 |
-
top_p: 0.9,
|
1140 |
-
},
|
1141 |
-
options: {
|
1142 |
-
use_cache: false // Prevent caching for dynamic inputs
|
1143 |
-
}
|
1144 |
-
};
|
1145 |
-
headers['Content-Type'] = 'application/json'; // Ensure correct content type for JSON body
|
1146 |
-
|
1147 |
-
} else if (selectedProvider === 'huggingface') {
|
1148 |
-
// Normal text-only HuggingFace request
|
1149 |
-
url = `${providerInfo.endpoint}${selectedModel}`;
|
1150 |
-
requestBody = {
|
1151 |
-
inputs: messages[messages.length - 1].content.filter(p => p.type === 'text').map(p => p.text).join('\n'),
|
1152 |
-
parameters: {
|
1153 |
-
max_new_tokens: 1000,
|
1154 |
-
temperature: 0.7,
|
1155 |
-
top_p: 0.9,
|
1156 |
-
},
|
1157 |
-
options: {
|
1158 |
-
use_cache: false
|
1159 |
-
}
|
1160 |
-
};
|
1161 |
-
headers['Content-Type'] = 'application/json';
|
1162 |
-
|
1163 |
-
} else if (selectedProvider === 'google') {
|
1164 |
-
url += `${selectedModel}:generateContent?key=${apiKey}`;
|
1165 |
-
let contents = [];
|
1166 |
-
let currentContent = {
|
1167 |
-
role: "user",
|
1168 |
-
parts: []
|
1169 |
-
};
|
1170 |
-
|
1171 |
-
for (const part of userContent) {
|
1172 |
-
if (part.type === 'text') {
|
1173 |
-
currentContent.parts.push({ text: part.text });
|
1174 |
-
} else if (part.type === 'image_url') {
|
1175 |
-
currentContent.parts.push({
|
1176 |
-
inline_data: {
|
1177 |
-
mime_type: part.image_url.url.split(';')[0].split(':')[1],
|
1178 |
-
data: part.image_url.url.split(',')[1]
|
1179 |
-
}
|
1180 |
-
});
|
1181 |
-
}
|
1182 |
-
}
|
1183 |
-
contents.push(currentContent);
|
1184 |
-
|
1185 |
-
// Convert history to Gemini format
|
1186 |
-
for (let i = 0; i < conversationHistory.length; i++) {
|
1187 |
-
const item = conversationHistory[i];
|
1188 |
-
if (item.role === 'system') {
|
1189 |
-
// Gemini doesn't have a direct 'system' role in chat, often absorbed into initial user prompt or ignored.
|
1190 |
-
// For simplicity, we can prepend to the first user message or ignore.
|
1191 |
-
// For now, we'll try to add it as a user message that guides the AI.
|
1192 |
-
if (i === 0 && item.content) { // Only apply if it's the very first system prompt
|
1193 |
-
contents.unshift({
|
1194 |
-
role: "user",
|
1195 |
-
parts: [{ text: item.content }]
|
1196 |
-
});
|
1197 |
-
contents.unshift({
|
1198 |
-
role: "model",
|
1199 |
-
parts: [{ text: "เข้าใจแล้ว." }]
|
1200 |
-
});
|
1201 |
-
}
|
1202 |
-
} else if (item.role === 'user') {
|
1203 |
-
contents.push({ role: "user", parts: [{ text: item.content.text || item.content }] });
|
1204 |
-
} else if (item.role === 'assistant') {
|
1205 |
-
contents.push({ role: "model", parts: [{ text: item.content }] });
|
1206 |
-
}
|
1207 |
-
}
|
1208 |
-
|
1209 |
-
requestBody = {
|
1210 |
-
contents: contents,
|
1211 |
-
generationConfig: {
|
1212 |
-
maxOutputTokens: 2000,
|
1213 |
-
temperature: 0.7,
|
1214 |
-
topP: 0.9
|
1215 |
-
}
|
1216 |
-
};
|
1217 |
-
headers['Content-Type'] = 'application/json';
|
1218 |
-
|
1219 |
-
|
1220 |
-
} else if (selectedProvider === 'ollama') {
|
1221 |
-
requestBody = {
|
1222 |
-
model: selectedModel,
|
1223 |
-
messages: messages,
|
1224 |
-
stream: providerInfo.isStream
|
1225 |
-
};
|
1226 |
-
} else if (selectedProvider === 'anthropic') {
|
1227 |
-
requestBody = {
|
1228 |
-
model: selectedModel,
|
1229 |
-
messages: messages.map(msg => ({ // Anthropic expects 'user' and 'assistant' roles, 'system' handled by system_prompt
|
1230 |
-
role: msg.role === 'system' ? 'user' : msg.role, // Temporarily map system to user for Anthropic's initial context
|
1231 |
-
content: msg.content
|
1232 |
-
})),
|
1233 |
-
max_tokens: 4096,
|
1234 |
-
stream: providerInfo.isStream
|
1235 |
-
};
|
1236 |
-
if (systemPrompt) {
|
1237 |
-
requestBody.system = systemPrompt; // Anthropic's dedicated system_prompt field
|
1238 |
-
}
|
1239 |
-
} else { // OpenAI, Groq, xAI
|
1240 |
-
requestBody = {
|
1241 |
-
model: selectedModel,
|
1242 |
-
messages: messages.map(msg => {
|
1243 |
-
if (Array.isArray(msg.content) && selectedProvider === 'openai') {
|
1244 |
-
// OpenAI can take array of content parts
|
1245 |
-
return { role: msg.role, content: msg.content };
|
1246 |
-
} else if (msg.role === 'system') {
|
1247 |
-
return { role: 'system', content: msg.content };
|
1248 |
-
} else {
|
1249 |
-
// For other providers or if content is string, just use text
|
1250 |
-
return { role: msg.role, content: typeof msg.content === 'string' ? msg.content : (msg.content.text || msg.content.map(p => p.text).join('\n')) };
|
1251 |
-
}
|
1252 |
-
}),
|
1253 |
-
stream: providerInfo.isStream
|
1254 |
-
};
|
1255 |
-
}
|
1256 |
-
|
1257 |
-
try {
|
1258 |
-
const response = await fetch(url, {
|
1259 |
-
method: 'POST',
|
1260 |
-
headers: headers,
|
1261 |
-
body: JSON.stringify(requestBody)
|
1262 |
-
});
|
1263 |
-
|
1264 |
-
if (!response.ok) {
|
1265 |
-
const errorData = await response.json();
|
1266 |
-
throw new Error(`API Error: ${response.status} - ${JSON.stringify(errorData)}`);
|
1267 |
-
}
|
1268 |
-
|
1269 |
-
if (providerInfo.isStream && selectedProvider !== 'google') { // Handle streaming for OpenAI, Groq, xAI, Ollama
|
1270 |
-
const reader = response.body.getReader();
|
1271 |
-
const decoder = new TextDecoder('utf-8');
|
1272 |
-
let fullResponse = '';
|
1273 |
-
let assistantMessageDiv = null;
|
1274 |
-
|
1275 |
-
while (true) {
|
1276 |
-
const { done, value } = await reader.read();
|
1277 |
-
if (done) break;
|
1278 |
-
|
1279 |
-
const chunk = decoder.decode(value);
|
1280 |
-
if (selectedProvider === 'ollama') {
|
1281 |
-
// Ollama can send multiple JSON objects in one chunk
|
1282 |
-
chunk.split('\n').filter(Boolean).forEach(line => {
|
1283 |
-
try {
|
1284 |
-
const data = JSON.parse(line);
|
1285 |
-
if (data.done === false && data.message && data.message.content) {
|
1286 |
-
fullResponse += data.message.content;
|
1287 |
-
if (!assistantMessageDiv) {
|
1288 |
-
assistantMessageDiv = document.createElement('div');
|
1289 |
-
assistantMessageDiv.classList.add('message95', 'ai');
|
1290 |
-
messagesDiv.appendChild(assistantMessageDiv);
|
1291 |
-
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
1292 |
-
}
|
1293 |
-
assistantMessageDiv.innerHTML = formatText(fullResponse); // Update HTML content
|
1294 |
-
} else if (data.done === true) {
|
1295 |
-
// Done, finalize the message
|
1296 |
-
}
|
1297 |
-
} catch (e) {
|
1298 |
-
console.error("Error parsing Ollama stream chunk:", e, line);
|
1299 |
-
}
|
1300 |
-
});
|
1301 |
-
} else if (selectedProvider === 'huggingface') {
|
1302 |
-
// HuggingFace typically does not stream, so this block might not be hit for HF models
|
1303 |
-
// If a streaming HF endpoint is used, this logic would need to be adapted
|
1304 |
-
fullResponse += chunk;
|
1305 |
-
if (!assistantMessageDiv) {
|
1306 |
-
assistantMessageDiv = document.createElement('div');
|
1307 |
-
assistantMessageDiv.classList.add('message95', 'ai');
|
1308 |
-
messagesDiv.appendChild(assistantMessageDiv);
|
1309 |
-
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
1310 |
-
}
|
1311 |
-
assistantMessageDiv.innerHTML = formatText(fullResponse); // Update HTML content
|
1312 |
-
} else { // OpenAI, Groq, xAI
|
1313 |
-
chunk.split('\n').filter(Boolean).forEach(line => {
|
1314 |
-
if (line.startsWith('data: ')) {
|
1315 |
-
const data = JSON.parse(line.substring(6));
|
1316 |
-
if (data.choices && data.choices.length > 0) {
|
1317 |
-
const delta = data.choices[0].delta;
|
1318 |
-
if (delta && delta.content) {
|
1319 |
-
fullResponse += delta.content;
|
1320 |
-
if (!assistantMessageDiv) {
|
1321 |
-
assistantMessageDiv = document.createElement('div');
|
1322 |
-
assistantMessageDiv.classList.add('message95', 'ai');
|
1323 |
-
messagesDiv.appendChild(assistantMessageDiv);
|
1324 |
-
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
1325 |
-
}
|
1326 |
-
assistantMessageDiv.innerHTML = formatText(fullResponse); // Update HTML content
|
1327 |
-
}
|
1328 |
-
}
|
1329 |
-
}
|
1330 |
-
});
|
1331 |
-
}
|
1332 |
-
}
|
1333 |
-
if (assistantMessageDiv) {
|
1334 |
-
hljs.highlightAll(); // Re-highlight all code blocks after stream finishes
|
1335 |
-
conversationHistory.push({ role: 'assistant', content: fullResponse });
|
1336 |
-
const codeMatch = fullResponse.match(/```(\w+)?\n([\s\S]+?)\n```/);
|
1337 |
-
if (codeMatch) {
|
1338 |
-
lastDetectedCode = codeMatch[2];
|
1339 |
-
savedCodeDisplay.textContent = lastDetectedCode;
|
1340 |
-
contextSaveArea.style.display = 'block';
|
1341 |
-
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
1342 |
-
}
|
1343 |
-
}
|
1344 |
-
} else if (selectedProvider === 'google') {
|
1345 |
-
const result = await response.json();
|
1346 |
-
let fullResponse = '';
|
1347 |
-
if (result.candidates && result.candidates[0] && result.candidates[0].content && result.candidates[0].content.parts) {
|
1348 |
-
fullResponse = result.candidates[0].content.parts.map(part => part.text).join('');
|
1349 |
-
}
|
1350 |
-
addMessage(fullResponse, 'ai');
|
1351 |
-
conversationHistory.push({ role: 'assistant', content: fullResponse });
|
1352 |
-
|
1353 |
-
const codeMatch = fullResponse.match(/```(\w+)?\n([\s\S]+?)\n```/);
|
1354 |
-
if (codeMatch) {
|
1355 |
-
lastDetectedCode = codeMatch[2];
|
1356 |
-
savedCodeDisplay.textContent = lastDetectedCode;
|
1357 |
-
contextSaveArea.style.display = 'block';
|
1358 |
-
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
1359 |
-
}
|
1360 |
-
hljs.highlightAll();
|
1361 |
-
|
1362 |
-
} else { // Handle non-streaming responses (e.g., HuggingFace Inference API, Anthropic)
|
1363 |
-
const data = await response.json();
|
1364 |
-
let assistantResponseText = '';
|
1365 |
-
if (selectedProvider === 'anthropic') {
|
1366 |
-
assistantResponseText = data.content[0].text;
|
1367 |
-
} else if (selectedProvider === 'huggingface') {
|
1368 |
-
// HuggingFace response varies greatly by model
|
1369 |
-
// This is a generic handling for text generation models
|
1370 |
-
if (Array.isArray(data) && data[0] && data[0].generated_text) {
|
1371 |
-
assistantResponseText = data[0].generated_text;
|
1372 |
-
} else {
|
1373 |
-
assistantResponseText = JSON.stringify(data, null, 2); // Fallback for unexpected formats
|
1374 |
-
}
|
1375 |
-
} else { // Fallback for other non-streaming or new providers
|
1376 |
-
assistantResponseText = data.choices[0].message.content;
|
1377 |
-
}
|
1378 |
-
|
1379 |
-
addMessage(assistantResponseText, 'ai');
|
1380 |
-
conversationHistory.push({ role: 'assistant', content: assistantResponseText });
|
1381 |
-
|
1382 |
-
const codeMatch = assistantResponseText.match(/```(\w+)?\n([\s\S]+?)\n```/);
|
1383 |
-
if (codeMatch) {
|
1384 |
-
lastDetectedCode = codeMatch[2];
|
1385 |
-
savedCodeDisplay.textContent = lastDetectedCode;
|
1386 |
-
contextSaveArea.style.display = 'block';
|
1387 |
-
messagesDiv.scrollTop = messagesDiv.scrollHeight;
|
1388 |
-
}
|
1389 |
-
hljs.highlightAll();
|
1390 |
-
}
|
1391 |
-
|
1392 |
-
} catch (error) {
|
1393 |
-
displayError(error.message);
|
1394 |
-
console.error("Fetch error:", error);
|
1395 |
-
} finally {
|
1396 |
-
chatForm.querySelector('button[type="submit"]').disabled = false;
|
1397 |
-
userInput.focus();
|
1398 |
-
}
|
1399 |
-
});
|
1400 |
-
</script>
|
1401 |
-
</body>
|
1402 |
-
</html>
|
|
|
3 |
<head>
|
4 |
<meta charset="UTF-8">
|
5 |
<meta name="viewport" content="width=device-width,initial-scale=1.0">
|
6 |
+
<title>AI Chat - Typhoon & Qwen2.5-Coder</title>
|
7 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
8 |
+
|
9 |
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.9.0/styles/vs.min.css">
|
10 |
<style>
|
11 |
html, body { height: 100%; margin: 0; }
|
|
|
37 |
z-index: 1;
|
38 |
}
|
39 |
.win95window.fullscreen {
|
40 |
+
position: ;
|
41 |
left: 0; top: 0; right: 0; bottom: 0;
|
42 |
min-width: 100vw; min-height: 100vh; max-width: none; max-height: none;
|
43 |
width: 100vw; height: 100vh;
|
|
|
291 |
background-position: 6px 7px;
|
292 |
background-size: 16px 16px;
|
293 |
}
|
294 |
+
#contextSaveArea95 {
|
295 |
+
padding: 10px 10px 12px;
|
296 |
+
background: #f8f8e0;
|
297 |
+
border-top: 2px solid #fff;
|
298 |
+
border-bottom: 2px solid #808080;
|
299 |
+
display: none;
|
300 |
+
font-size: 0.98em;
|
301 |
+
}
|
302 |
#contextSaveArea95 h4 { margin: 0 0 8px 0; color: #444; font-size: 1.08em; }
|
303 |
+
#contextSaveArea95 pre {
|
304 |
+
background: #fff;
|
305 |
+
border: 2px inset #808080;
|
306 |
+
padding: 8px 5px;
|
307 |
+
font-size: 0.95em;
|
308 |
+
max-height: 120px;
|
309 |
+
overflow: auto;
|
310 |
+
margin: 7px 0;
|
311 |
+
white-space: pre-wrap;
|
312 |
+
word-break: break-all;
|
313 |
+
}
|
314 |
#contextSaveArea95 label { font-weight: bold; }
|
315 |
+
#contextSaveArea95 input[type="text"] {
|
316 |
+
width: 98%;
|
317 |
+
min-width: 0;
|
318 |
+
border: 2px inset #808080;
|
319 |
+
background: #fff;
|
320 |
+
font-size: 1em;
|
321 |
+
margin-bottom: 3px;
|
322 |
+
padding: 4px 8px;
|
323 |
+
}
|
324 |
+
#contextSaveArea95 button {
|
325 |
+
margin-top: 6px;
|
326 |
+
font-size: 1em;
|
327 |
+
background: #e0e0e0;
|
328 |
+
border: 2px outset #fff;
|
329 |
+
color: #000;
|
330 |
+
padding: 2.5px 14px;
|
331 |
+
cursor: pointer;
|
332 |
+
border-radius: 0;
|
333 |
+
}
|
334 |
#contextSaveArea95 button:active { background: #c0c0c0; border: 2px inset #808080; }
|
335 |
#contextSaveArea95 button:disabled { background: #e0e0e0; color: #bbb; border: 2px outset #b0b0b0; }
|
336 |
.model-pair-selection {
|
|
|
359 |
max-width: 180px;
|
360 |
}
|
361 |
@media (max-width: 650px) {
|
362 |
+
.win95window {
|
363 |
+
min-width: 0;
|
364 |
+
max-width: 98vw;
|
365 |
+
box-shadow: 1px 2px 0px #0006, 0 0 0 2px #6664;
|
366 |
+
}
|
367 |
.win95content { padding: 0; }
|
368 |
+
.settings95 {
|
369 |
+
flex-direction: column;
|
370 |
+
align-items: flex-start;
|
371 |
+
gap: 4px;
|
372 |
+
padding: 8px 4px;
|
373 |
+
}
|
374 |
.system-preset-area {
|
375 |
flex-direction: column;
|
376 |
align-items: flex-start;
|
|
|
383 |
gap: 4px;
|
384 |
padding: 6px 4px;
|
385 |
}
|
386 |
+
.input-area95 {
|
387 |
+
flex-direction: column;
|
388 |
+
gap: 8px;
|
389 |
+
padding: 8px 4px;
|
390 |
}
|
391 |
+
.chat-container95 {
|
392 |
+
padding: 10px 2px;
|
393 |
+
font-size: 0.98em;
|
394 |
+
}
|
395 |
+
.dropzone95 {
|
396 |
+
font-size: 1em;
|
397 |
+
padding: 20px 2px 14px;
|
398 |
}
|
399 |
}
|
400 |
</style>
|
|
|
406 |
<span style="display:inline-block;width:18px;height:18px;background:#fff;border:1px solid #808080;margin-right:7px;box-shadow:inset 2px 2px #c0c0c0;">
|
407 |
<span style="display:inline-block;width:9px;height:9px;background:#008080;margin:4px 0 0 4px;vertical-align:middle;"></span>
|
408 |
</span>
|
409 |
+
AI Chat - Typhoon & Qwen2.5-Coder
|
410 |
</div>
|
411 |
<div class="win95controls">
|
412 |
<button id="maximizeBtn" title="เต็มจอ">□</button>
|
|
|
416 |
</div>
|
417 |
<div class="win95content">
|
418 |
<form class="settings95" id="settingsForm95" autocomplete="off" onsubmit="return false;">
|
419 |
+
<label for="apiKey95">HuggingFace API Key:</label>
|
420 |
+
<input type="text" id="apiKey95" placeholder="กรอก HuggingFace API Key">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
421 |
<button id="confirmApiKeyBtn95" type="button">บันทึก</button>
|
422 |
<span class="api-status" id="apiKeyStatus95"></span>
|
423 |
</form>
|
424 |
+
|
425 |
+
<div class="model-pair-selection" id="multiModalSelection">
|
426 |
+
<label for="modelSelect95">เลือกโมเดล:</label>
|
427 |
+
<select id="modelSelect95">
|
428 |
+
<option value="scb10x/typhoon-v1.5x-72b-instruct">Typhoon OCR 72B</option>
|
429 |
<option value="Qwen/Qwen2.5-Coder-32B-Instruct">Qwen2.5-Coder 32B</option>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
430 |
</select>
|
431 |
</div>
|
432 |
+
|
433 |
<div class="system-preset-area">
|
434 |
<label for="systemPresetSelect95">ระบบ:</label>
|
435 |
<select id="systemPresetSelect95">
|
|
|
441 |
</select>
|
442 |
<input type="text" id="customSystemPrompt95" placeholder="ใส่ System Prompt ของคุณ" style="display:none; flex: 1; min-width: 200px;">
|
443 |
</div>
|
444 |
+
|
445 |
<div id="contextSaveArea95">
|
446 |
<h4>Context ปัจจุบัน</h4>
|
447 |
<div id="warningMessage95" style="color:orange;font-weight:bold;"></div>
|
|
|
452 |
<button id="confirmSaveBtn95">ยืนยัน context และเริ่มแชทใหม่</button>
|
453 |
</div>
|
454 |
<div class="chat-container95" id="messagesDiv95">
|
455 |
+
<div class="message95 ai">สวัสดี! AI Chat พร้อมใช้งาน 🎉<br>เลือกระหว่าง Typhoon OCR 72B หรือ Qwen2.5-Coder 32B!</div>
|
456 |
</div>
|
457 |
<form id="chatForm95" class="input-area95" autocomplete="off">
|
458 |
<button type="button" class="file-attach-btn" id="fileAttachBtn" title="แนบไฟล์">
|
|
|
472 |
const win95window = document.getElementById('win95window');
|
473 |
const maximizeBtn = document.getElementById('maximizeBtn');
|
474 |
let isFullscreen = false;
|
475 |
+
|
476 |
maximizeBtn.onclick = function() {
|
477 |
isFullscreen = !isFullscreen;
|
478 |
win95window.classList.toggle('fullscreen', isFullscreen);
|
|
|
480 |
maximizeBtn.title = isFullscreen ? "คืนหน้าต่าง" : "เต็มจอ";
|
481 |
};
|
482 |
|
483 |
+
// --- API Setup ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
484 |
const apiKeyInput = document.getElementById('apiKey95');
|
485 |
const confirmApiKeyBtn = document.getElementById('confirmApiKeyBtn95');
|
486 |
const apiKeyStatus = document.getElementById('apiKeyStatus95');
|
|
|
497 |
const fileInput = document.getElementById('fileInput');
|
498 |
const fileNameSpan = document.getElementById('fileName');
|
499 |
const dropzone = document.getElementById('dropzone95');
|
500 |
+
const modelSelect = document.getElementById('modelSelect95');
|
|
|
|
|
501 |
|
502 |
let currentApiKey = '';
|
503 |
let conversationHistory = [];
|
|
|
506 |
let savedGoal = '';
|
507 |
|
508 |
// --- Save/Load API Key from Local Storage ---
|
509 |
+
function saveApiKey(key) {
|
510 |
+
localStorage.setItem('huggingface_apiKey', key);
|
511 |
updateApiKeyStatus(true);
|
512 |
}
|
513 |
|
514 |
+
function loadApiKey() {
|
515 |
+
return localStorage.getItem('huggingface_apiKey') || '';
|
516 |
}
|
517 |
|
518 |
function updateApiKeyStatus(isSuccess) {
|
|
|
527 |
}
|
528 |
|
529 |
confirmApiKeyBtn.addEventListener('click', () => {
|
|
|
530 |
currentApiKey = apiKeyInput.value.trim();
|
531 |
+
saveApiKey(currentApiKey);
|
532 |
});
|
533 |
|
534 |
+
// Initialize API key on load
|
535 |
+
apiKeyInput.value = loadApiKey();
|
536 |
+
currentApiKey = apiKeyInput.value.trim();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
537 |
|
538 |
// --- System Preset and Custom Prompt ---
|
539 |
systemPresetSelect.addEventListener('change', (event) => {
|
|
|
548 |
function addMessage(text, sender, imageUrl = null, codeLanguage = null) {
|
549 |
const messageDiv = document.createElement('div');
|
550 |
messageDiv.classList.add('message95', sender);
|
551 |
+
|
552 |
if (imageUrl) {
|
553 |
const img = document.createElement('img');
|
554 |
img.src = imageUrl;
|
|
|
570 |
|
571 |
const language = match[1] || 'plaintext';
|
572 |
const code = match[2];
|
573 |
+
|
574 |
// Store the last detected code if it's the latest message and from AI
|
575 |
if (sender === 'ai') {
|
576 |
lastDetectedCode = code;
|
|
|
724 |
return;
|
725 |
}
|
726 |
|
|
|
727 |
const selectedModel = modelSelect.value;
|
|
|
728 |
const apiKey = currentApiKey;
|
729 |
const systemPreset = systemPresetSelect.value;
|
730 |
let systemPrompt = '';
|
|
|
733 |
systemPrompt = customSystemPromptInput.value.trim();
|
734 |
} else if (systemPreset === 'code-full') {
|
735 |
if (lastDetectedCode) {
|
736 |
+
systemPrompt = `คุณคือผู้ช่วยพัฒนาเว็บที่มีความสามารถสูงในการเขียนโค้ดและสร้าง artifact ได้อย่างสมบูรณ์แบบตามคำสั่งของผู้ใช้ คุณจะทำงานร่วมกับผู้ใช้เพื่อแก้ไขและปรับปรุง artifact ที่มีอยู่ โค้ดทั้งหมดจะต้องถูกเขียนใน code block เดียว เพื่อเป็นไฟล์โค้ดที่สมบูรณ์และพร้อมใช้งาน โดยไม่มีการแยกโค้ด HTML และ JavaScript ในการตอบกลับของคุณ ให้เอาต์พุตเฉพาะโค้ด HTML โดยไม่มีข้อความอธิบายใดๆ เพิ่มเติม เมื่อใดก็ตามที่ได้รับคำสั่ง คุณจะตรวจสอบการรันโค้ดอีกครั้งเพื่อให้แน่ใจว่าไม่มีข้อผิดพลาดในการเอาต์พุต ผู้ใช้ได้ให้โค้ด HTML/CSS/JS ล่าสุดแก่คุณแล้ว และต้องการให้คุณช่วยเหลือในการพัฒนาต่อ. โค้ดปัจจุบันคือ: \n\`\`\`html\n${lastDetectedCode}\n\`\`\`\nเป้าหมายของโค้ดนี้คือ: ${saved
|
737 |
+
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|