Update app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,467 @@
|
|
1 |
-
|
2 |
-
from huggingface_hub import InferenceClient
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
""
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
-
|
|
|
1 |
+
#!/usr/bin/env python3
|
|
|
2 |
|
3 |
+
import tkinter as tk
|
4 |
+
from tkinter import ttk, scrolledtext, messagebox
|
5 |
+
import threading
|
6 |
+
import queue
|
7 |
+
import os
|
8 |
+
from datetime import datetime
|
9 |
+
from typing import List, Dict, Generator
|
10 |
+
import warnings
|
11 |
+
warnings.filterwarnings("ignore")
|
12 |
|
13 |
+
# Try to import required libraries
|
14 |
+
try:
|
15 |
+
import torch
|
16 |
+
from transformers import (
|
17 |
+
AutoModelForCausalLM,
|
18 |
+
AutoTokenizer,
|
19 |
+
TextIteratorStreamer,
|
20 |
+
pipeline
|
21 |
+
)
|
22 |
+
TRANSFORMERS_AVAILABLE = True
|
23 |
+
except ImportError:
|
24 |
+
TRANSFORMERS_AVAILABLE = False
|
25 |
|
26 |
+
class CPULLMChatApp:
|
27 |
+
def __init__(self, root):
|
28 |
+
self.root = root
|
29 |
+
self.root.title("CPU LLM Chat Application")
|
30 |
+
self.root.geometry("1000x700")
|
31 |
+
|
32 |
+
# Chat history
|
33 |
+
self.chat_history: List[Dict[str, str]] = []
|
34 |
+
|
35 |
+
# Model variables
|
36 |
+
self.model = None
|
37 |
+
self.tokenizer = None
|
38 |
+
self.generator = None
|
39 |
+
self.model_loaded = False
|
40 |
+
|
41 |
+
# Threading
|
42 |
+
self.generation_thread = None
|
43 |
+
self.stop_generation = False
|
44 |
+
self.response_queue = queue.Queue()
|
45 |
+
|
46 |
+
# Configuration
|
47 |
+
self.max_input_length = 2048
|
48 |
+
self.max_new_tokens = tk.IntVar(value=256) # Reduced for CPU
|
49 |
+
self.temperature = tk.DoubleVar(value=0.7)
|
50 |
+
self.top_p = tk.DoubleVar(value=0.9)
|
51 |
+
self.top_k = tk.IntVar(value=50)
|
52 |
+
self.repetition_penalty = tk.DoubleVar(value=1.1)
|
53 |
+
|
54 |
+
self.setup_ui()
|
55 |
+
self.check_dependencies()
|
56 |
+
|
57 |
+
def setup_ui(self):
|
58 |
+
# Create main frame
|
59 |
+
main_frame = ttk.Frame(self.root, padding="10")
|
60 |
+
main_frame.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
61 |
+
|
62 |
+
# Configure grid weights
|
63 |
+
self.root.columnconfigure(0, weight=1)
|
64 |
+
self.root.rowconfigure(0, weight=1)
|
65 |
+
main_frame.columnconfigure(0, weight=1)
|
66 |
+
main_frame.rowconfigure(1, weight=1)
|
67 |
+
|
68 |
+
# Title and model selection
|
69 |
+
title_frame = ttk.Frame(main_frame)
|
70 |
+
title_frame.grid(row=0, column=0, sticky=(tk.W, tk.E), pady=(0, 10))
|
71 |
+
title_frame.columnconfigure(1, weight=1)
|
72 |
+
|
73 |
+
ttk.Label(title_frame, text="CPU LLM Chat", font=("Arial", 16, "bold")).grid(row=0, column=0, sticky=tk.W)
|
74 |
+
|
75 |
+
# Model selection
|
76 |
+
ttk.Label(title_frame, text="Model:").grid(row=0, column=2, padx=(20, 5))
|
77 |
+
self.model_var = tk.StringVar(value="microsoft/DialoGPT-medium")
|
78 |
+
model_combo = ttk.Combobox(title_frame, textvariable=self.model_var, width=30)
|
79 |
+
model_combo['values'] = [
|
80 |
+
"microsoft/DialoGPT-medium",
|
81 |
+
"microsoft/DialoGPT-small",
|
82 |
+
"distilgpt2",
|
83 |
+
"gpt2",
|
84 |
+
"facebook/blenderbot-400M-distill"
|
85 |
+
]
|
86 |
+
model_combo.grid(row=0, column=3, padx=(0, 10))
|
87 |
+
|
88 |
+
self.load_model_btn = ttk.Button(title_frame, text="Load Model", command=self.load_model)
|
89 |
+
self.load_model_btn.grid(row=0, column=4)
|
90 |
+
|
91 |
+
# Chat area
|
92 |
+
chat_frame = ttk.Frame(main_frame)
|
93 |
+
chat_frame.grid(row=1, column=0, sticky=(tk.W, tk.E, tk.N, tk.S), pady=(0, 10))
|
94 |
+
chat_frame.columnconfigure(0, weight=1)
|
95 |
+
chat_frame.rowconfigure(0, weight=1)
|
96 |
+
|
97 |
+
# Chat history display
|
98 |
+
self.chat_display = scrolledtext.ScrolledText(
|
99 |
+
chat_frame,
|
100 |
+
wrap=tk.WORD,
|
101 |
+
state=tk.DISABLED,
|
102 |
+
font=("Arial", 10)
|
103 |
+
)
|
104 |
+
self.chat_display.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
105 |
+
|
106 |
+
# Configure tags for styling
|
107 |
+
self.chat_display.tag_configure("user", foreground="blue", font=("Arial", 10, "bold"))
|
108 |
+
self.chat_display.tag_configure("assistant", foreground="green", font=("Arial", 10))
|
109 |
+
self.chat_display.tag_configure("system", foreground="gray", font=("Arial", 9, "italic"))
|
110 |
+
|
111 |
+
# Input area
|
112 |
+
input_frame = ttk.Frame(main_frame)
|
113 |
+
input_frame.grid(row=2, column=0, sticky=(tk.W, tk.E), pady=(0, 10))
|
114 |
+
input_frame.columnconfigure(0, weight=1)
|
115 |
+
|
116 |
+
# Input text
|
117 |
+
self.input_text = scrolledtext.ScrolledText(input_frame, height=3, wrap=tk.WORD)
|
118 |
+
self.input_text.grid(row=0, column=0, sticky=(tk.W, tk.E), padx=(0, 10))
|
119 |
+
self.input_text.bind("<Control-Return>", lambda e: self.send_message())
|
120 |
+
|
121 |
+
# Send button
|
122 |
+
button_frame = ttk.Frame(input_frame)
|
123 |
+
button_frame.grid(row=0, column=1, sticky=(tk.N, tk.S))
|
124 |
+
|
125 |
+
self.send_btn = ttk.Button(button_frame, text="Send", command=self.send_message)
|
126 |
+
self.send_btn.pack(pady=(0, 5))
|
127 |
+
|
128 |
+
self.stop_btn = ttk.Button(button_frame, text="Stop", command=self.stop_generation_func, state=tk.DISABLED)
|
129 |
+
self.stop_btn.pack(pady=(0, 5))
|
130 |
+
|
131 |
+
self.clear_btn = ttk.Button(button_frame, text="Clear", command=self.clear_chat)
|
132 |
+
self.clear_btn.pack()
|
133 |
+
|
134 |
+
# Parameters panel
|
135 |
+
params_frame = ttk.LabelFrame(main_frame, text="Generation Parameters", padding="5")
|
136 |
+
params_frame.grid(row=3, column=0, sticky=(tk.W, tk.E), pady=(0, 10))
|
137 |
+
params_frame.columnconfigure(1, weight=1)
|
138 |
+
params_frame.columnconfigure(3, weight=1)
|
139 |
+
|
140 |
+
# Max tokens
|
141 |
+
ttk.Label(params_frame, text="Max Tokens:").grid(row=0, column=0, sticky=tk.W, padx=(0, 5))
|
142 |
+
ttk.Scale(params_frame, from_=50, to=512, variable=self.max_new_tokens, orient=tk.HORIZONTAL).grid(row=0, column=1, sticky=(tk.W, tk.E), padx=(0, 10))
|
143 |
+
ttk.Label(params_frame, textvariable=self.max_new_tokens).grid(row=0, column=2, padx=(0, 20))
|
144 |
+
|
145 |
+
# Temperature
|
146 |
+
ttk.Label(params_frame, text="Temperature:").grid(row=1, column=0, sticky=tk.W, padx=(0, 5))
|
147 |
+
ttk.Scale(params_frame, from_=0.1, to=2.0, variable=self.temperature, orient=tk.HORIZONTAL).grid(row=1, column=1, sticky=(tk.W, tk.E), padx=(0, 10))
|
148 |
+
temp_label = ttk.Label(params_frame, text="")
|
149 |
+
temp_label.grid(row=1, column=2, padx=(0, 20))
|
150 |
+
|
151 |
+
# Top-p
|
152 |
+
ttk.Label(params_frame, text="Top-p:").grid(row=0, column=3, sticky=tk.W, padx=(0, 5))
|
153 |
+
ttk.Scale(params_frame, from_=0.1, to=1.0, variable=self.top_p, orient=tk.HORIZONTAL).grid(row=0, column=4, sticky=(tk.W, tk.E), padx=(0, 10))
|
154 |
+
top_p_label = ttk.Label(params_frame, text="")
|
155 |
+
top_p_label.grid(row=0, column=5)
|
156 |
+
|
157 |
+
# Top-k
|
158 |
+
ttk.Label(params_frame, text="Top-k:").grid(row=1, column=3, sticky=tk.W, padx=(0, 5))
|
159 |
+
ttk.Scale(params_frame, from_=1, to=100, variable=self.top_k, orient=tk.HORIZONTAL).grid(row=1, column=4, sticky=(tk.W, tk.E), padx=(0, 10))
|
160 |
+
ttk.Label(params_frame, textvariable=self.top_k).grid(row=1, column=5)
|
161 |
+
|
162 |
+
# Update parameter labels
|
163 |
+
def update_temp_label(*args):
|
164 |
+
temp_label.config(text=f"{self.temperature.get():.2f}")
|
165 |
+
def update_top_p_label(*args):
|
166 |
+
top_p_label.config(text=f"{self.top_p.get():.2f}")
|
167 |
+
|
168 |
+
self.temperature.trace('w', update_temp_label)
|
169 |
+
self.top_p.trace('w', update_top_p_label)
|
170 |
+
update_temp_label()
|
171 |
+
update_top_p_label()
|
172 |
+
|
173 |
+
# Status bar
|
174 |
+
self.status_var = tk.StringVar(value="Ready - Please load a model first")
|
175 |
+
status_bar = ttk.Label(main_frame, textvariable=self.status_var, relief=tk.SUNKEN, anchor=tk.W)
|
176 |
+
status_bar.grid(row=4, column=0, sticky=(tk.W, tk.E))
|
177 |
+
|
178 |
+
# Add example messages
|
179 |
+
examples_frame = ttk.LabelFrame(main_frame, text="Example Messages", padding="5")
|
180 |
+
examples_frame.grid(row=5, column=0, sticky=(tk.W, tk.E), pady=(10, 0))
|
181 |
+
|
182 |
+
examples = [
|
183 |
+
"Hello! How are you today?",
|
184 |
+
"Tell me a short joke.",
|
185 |
+
"What's the weather like?",
|
186 |
+
"Explain quantum computing in simple terms."
|
187 |
+
]
|
188 |
+
|
189 |
+
for i, example in enumerate(examples):
|
190 |
+
btn = ttk.Button(examples_frame, text=example,
|
191 |
+
command=lambda e=example: self.set_input_text(e))
|
192 |
+
btn.grid(row=i//2, column=i%2, sticky=(tk.W, tk.E), padx=5, pady=2)
|
193 |
+
|
194 |
+
examples_frame.columnconfigure(0, weight=1)
|
195 |
+
examples_frame.columnconfigure(1, weight=1)
|
196 |
+
|
197 |
+
def check_dependencies(self):
|
198 |
+
if not TRANSFORMERS_AVAILABLE:
|
199 |
+
self.add_system_message("❌ Transformers library not found. Please install: pip install torch transformers")
|
200 |
+
self.send_btn.config(state=tk.DISABLED)
|
201 |
+
self.load_model_btn.config(state=tk.DISABLED)
|
202 |
+
else:
|
203 |
+
self.add_system_message("✅ Dependencies loaded. Please select and load a model.")
|
204 |
+
|
205 |
+
def set_input_text(self, text):
|
206 |
+
self.input_text.delete("1.0", tk.END)
|
207 |
+
self.input_text.insert("1.0", text)
|
208 |
+
self.input_text.focus()
|
209 |
+
|
210 |
+
def add_system_message(self, message):
|
211 |
+
self.chat_display.config(state=tk.NORMAL)
|
212 |
+
self.chat_display.insert(tk.END, f"[{datetime.now().strftime('%H:%M:%S')}] {message}\n", "system")
|
213 |
+
self.chat_display.config(state=tk.DISABLED)
|
214 |
+
self.chat_display.see(tk.END)
|
215 |
+
|
216 |
+
def add_user_message(self, message):
|
217 |
+
self.chat_display.config(state=tk.NORMAL)
|
218 |
+
self.chat_display.insert(tk.END, f"\n👤 You: ", "user")
|
219 |
+
self.chat_display.insert(tk.END, f"{message}\n", "user")
|
220 |
+
self.chat_display.config(state=tk.DISABLED)
|
221 |
+
self.chat_display.see(tk.END)
|
222 |
+
|
223 |
+
def add_assistant_message(self, message):
|
224 |
+
self.chat_display.config(state=tk.NORMAL)
|
225 |
+
self.chat_display.insert(tk.END, f"🤖 Assistant: ", "assistant")
|
226 |
+
self.chat_display.insert(tk.END, f"{message}\n", "assistant")
|
227 |
+
self.chat_display.config(state=tk.DISABLED)
|
228 |
+
self.chat_display.see(tk.END)
|
229 |
+
|
230 |
+
def update_assistant_message(self, additional_text):
|
231 |
+
self.chat_display.config(state=tk.NORMAL)
|
232 |
+
self.chat_display.insert(tk.END, additional_text, "assistant")
|
233 |
+
self.chat_display.config(state=tk.DISABLED)
|
234 |
+
self.chat_display.see(tk.END)
|
235 |
+
|
236 |
+
def load_model(self):
|
237 |
+
if not TRANSFORMERS_AVAILABLE:
|
238 |
+
messagebox.showerror("Error", "Transformers library not available")
|
239 |
+
return
|
240 |
+
|
241 |
+
model_name = self.model_var.get()
|
242 |
+
if not model_name:
|
243 |
+
messagebox.showwarning("Warning", "Please select a model")
|
244 |
+
return
|
245 |
+
|
246 |
+
# Disable buttons during loading
|
247 |
+
self.load_model_btn.config(state=tk.DISABLED)
|
248 |
+
self.send_btn.config(state=tk.DISABLED)
|
249 |
+
self.status_var.set(f"Loading model: {model_name}...")
|
250 |
+
|
251 |
+
# Load model in separate thread
|
252 |
+
thread = threading.Thread(target=self._load_model_thread, args=(model_name,))
|
253 |
+
thread.daemon = True
|
254 |
+
thread.start()
|
255 |
+
|
256 |
+
def _load_model_thread(self, model_name):
|
257 |
+
try:
|
258 |
+
self.add_system_message(f"Loading model: {model_name}")
|
259 |
+
|
260 |
+
# Force CPU usage and optimize for CPU
|
261 |
+
device = "cpu"
|
262 |
+
torch_dtype = torch.float32 # Use float32 for CPU
|
263 |
+
|
264 |
+
# Load tokenizer
|
265 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left")
|
266 |
+
if self.tokenizer.pad_token is None:
|
267 |
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
268 |
+
|
269 |
+
# Load model with CPU optimizations
|
270 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
271 |
+
model_name,
|
272 |
+
torch_dtype=torch_dtype,
|
273 |
+
device_map={"": device},
|
274 |
+
low_cpu_mem_usage=True
|
275 |
+
)
|
276 |
+
|
277 |
+
# Set model to evaluation mode
|
278 |
+
self.model.eval()
|
279 |
+
|
280 |
+
self.model_loaded = True
|
281 |
+
|
282 |
+
# Update UI on main thread
|
283 |
+
self.root.after(0, self._model_loaded_callback, model_name)
|
284 |
+
|
285 |
+
except Exception as e:
|
286 |
+
error_msg = f"Failed to load model: {str(e)}"
|
287 |
+
self.root.after(0, self._model_load_error_callback, error_msg)
|
288 |
+
|
289 |
+
def _model_loaded_callback(self, model_name):
|
290 |
+
self.add_system_message(f"✅ Model loaded successfully: {model_name}")
|
291 |
+
self.status_var.set(f"Model loaded: {model_name}")
|
292 |
+
self.load_model_btn.config(state=tk.NORMAL)
|
293 |
+
self.send_btn.config(state=tk.NORMAL)
|
294 |
+
|
295 |
+
def _model_load_error_callback(self, error_msg):
|
296 |
+
self.add_system_message(f"❌ {error_msg}")
|
297 |
+
self.status_var.set("Model loading failed")
|
298 |
+
self.load_model_btn.config(state=tk.NORMAL)
|
299 |
+
messagebox.showerror("Model Loading Error", error_msg)
|
300 |
+
|
301 |
+
def send_message(self):
|
302 |
+
if not self.model_loaded:
|
303 |
+
messagebox.showwarning("Warning", "Please load a model first")
|
304 |
+
return
|
305 |
+
|
306 |
+
message = self.input_text.get("1.0", tk.END).strip()
|
307 |
+
if not message:
|
308 |
+
return
|
309 |
+
|
310 |
+
# Add user message to chat
|
311 |
+
self.add_user_message(message)
|
312 |
+
self.input_text.delete("1.0", tk.END)
|
313 |
+
|
314 |
+
# Disable send button and enable stop button
|
315 |
+
self.send_btn.config(state=tk.DISABLED)
|
316 |
+
self.stop_btn.config(state=tk.NORMAL)
|
317 |
+
self.stop_generation = False
|
318 |
+
|
319 |
+
# Add to chat history
|
320 |
+
self.chat_history.append({"role": "user", "content": message})
|
321 |
+
|
322 |
+
# Start generation thread
|
323 |
+
self.generation_thread = threading.Thread(target=self._generate_response, args=(message,))
|
324 |
+
self.generation_thread.daemon = True
|
325 |
+
self.generation_thread.start()
|
326 |
+
|
327 |
+
# Start checking for responses
|
328 |
+
self.check_response_queue()
|
329 |
+
|
330 |
+
def _generate_response(self, message):
|
331 |
+
try:
|
332 |
+
self.status_var.set("Generating response...")
|
333 |
+
|
334 |
+
# Prepare input
|
335 |
+
if "DialoGPT" in self.model_var.get():
|
336 |
+
# For DialoGPT, use conversation history
|
337 |
+
chat_history_ids = None
|
338 |
+
for turn in self.chat_history[-5:]: # Use last 5 turns
|
339 |
+
new_user_input_ids = self.tokenizer.encode(
|
340 |
+
turn["content"] + self.tokenizer.eos_token,
|
341 |
+
return_tensors='pt'
|
342 |
+
)
|
343 |
+
|
344 |
+
if chat_history_ids is not None:
|
345 |
+
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1)
|
346 |
+
else:
|
347 |
+
bot_input_ids = new_user_input_ids
|
348 |
+
|
349 |
+
chat_history_ids = bot_input_ids
|
350 |
+
|
351 |
+
input_ids = chat_history_ids
|
352 |
+
else:
|
353 |
+
# For other models, use simple encoding
|
354 |
+
input_ids = self.tokenizer.encode(message, return_tensors='pt')
|
355 |
+
|
356 |
+
# Limit input length
|
357 |
+
if input_ids.shape[1] > self.max_input_length:
|
358 |
+
input_ids = input_ids[:, -self.max_input_length:]
|
359 |
+
|
360 |
+
# Generation parameters
|
361 |
+
generation_kwargs = {
|
362 |
+
'input_ids': input_ids,
|
363 |
+
'max_new_tokens': self.max_new_tokens.get(),
|
364 |
+
'temperature': self.temperature.get(),
|
365 |
+
'top_p': self.top_p.get(),
|
366 |
+
'top_k': self.top_k.get(),
|
367 |
+
'repetition_penalty': self.repetition_penalty.get(),
|
368 |
+
'do_sample': True,
|
369 |
+
'pad_token_id': self.tokenizer.pad_token_id,
|
370 |
+
'eos_token_id': self.tokenizer.eos_token_id,
|
371 |
+
'no_repeat_ngram_size': 2,
|
372 |
+
}
|
373 |
+
|
374 |
+
# Create streamer for real-time output
|
375 |
+
streamer = TextIteratorStreamer(
|
376 |
+
self.tokenizer,
|
377 |
+
skip_prompt=True,
|
378 |
+
skip_special_tokens=True,
|
379 |
+
timeout=30.0
|
380 |
+
)
|
381 |
+
generation_kwargs['streamer'] = streamer
|
382 |
+
|
383 |
+
# Start generation in a separate thread
|
384 |
+
generation_thread = threading.Thread(
|
385 |
+
target=self.model.generate,
|
386 |
+
kwargs=generation_kwargs
|
387 |
+
)
|
388 |
+
generation_thread.start()
|
389 |
+
|
390 |
+
# Stream the response
|
391 |
+
self.response_queue.put(("start", ""))
|
392 |
+
|
393 |
+
generated_text = ""
|
394 |
+
for new_text in streamer:
|
395 |
+
if self.stop_generation:
|
396 |
+
break
|
397 |
+
generated_text += new_text
|
398 |
+
self.response_queue.put(("update", new_text))
|
399 |
+
|
400 |
+
if not self.stop_generation:
|
401 |
+
# Add to chat history
|
402 |
+
self.chat_history.append({"role": "assistant", "content": generated_text})
|
403 |
+
self.response_queue.put(("complete", generated_text))
|
404 |
+
else:
|
405 |
+
self.response_queue.put(("stopped", ""))
|
406 |
+
|
407 |
+
except Exception as e:
|
408 |
+
self.response_queue.put(("error", str(e)))
|
409 |
+
|
410 |
+
def check_response_queue(self):
|
411 |
+
try:
|
412 |
+
while True:
|
413 |
+
action, data = self.response_queue.get_nowait()
|
414 |
+
|
415 |
+
if action == "start":
|
416 |
+
self.add_assistant_message("")
|
417 |
+
elif action == "update":
|
418 |
+
self.update_assistant_message(data)
|
419 |
+
elif action == "complete":
|
420 |
+
self.status_var.set("Response complete")
|
421 |
+
self.send_btn.config(state=tk.NORMAL)
|
422 |
+
self.stop_btn.config(state=tk.DISABLED)
|
423 |
+
return
|
424 |
+
elif action == "stopped":
|
425 |
+
self.update_assistant_message(" [Generation stopped]")
|
426 |
+
self.status_var.set("Generation stopped")
|
427 |
+
self.send_btn.config(state=tk.NORMAL)
|
428 |
+
self.stop_btn.config(state=tk.DISABLED)
|
429 |
+
return
|
430 |
+
elif action == "error":
|
431 |
+
self.add_system_message(f"❌ Generation error: {data}")
|
432 |
+
self.status_var.set("Generation failed")
|
433 |
+
self.send_btn.config(state=tk.NORMAL)
|
434 |
+
self.stop_btn.config(state=tk.DISABLED)
|
435 |
+
return
|
436 |
+
|
437 |
+
except queue.Empty:
|
438 |
+
pass
|
439 |
+
|
440 |
+
# Schedule next check
|
441 |
+
self.root.after(100, self.check_response_queue)
|
442 |
+
|
443 |
+
def stop_generation_func(self):
|
444 |
+
self.stop_generation = True
|
445 |
+
self.status_var.set("Stopping generation...")
|
446 |
+
|
447 |
+
def clear_chat(self):
|
448 |
+
self.chat_history = []
|
449 |
+
self.chat_display.config(state=tk.NORMAL)
|
450 |
+
self.chat_display.delete("1.0", tk.END)
|
451 |
+
self.chat_display.config(state=tk.DISABLED)
|
452 |
+
self.add_system_message("Chat cleared")
|
453 |
|
454 |
+
def main():
|
455 |
+
root = tk.Tk()
|
456 |
+
app = CPULLMChatApp(root)
|
457 |
+
|
458 |
+
# Center the window
|
459 |
+
root.update_idletasks()
|
460 |
+
x = (root.winfo_screenwidth() - root.winfo_width()) // 2
|
461 |
+
y = (root.winfo_screenheight() - root.winfo_height()) // 2
|
462 |
+
root.geometry(f"+{x}+{y}")
|
463 |
+
|
464 |
+
root.mainloop()
|
465 |
|
466 |
if __name__ == "__main__":
|
467 |
+
main()
|