Fraser commited on
Commit
4164789
·
1 Parent(s): 60e0688

big client swap

Browse files
src/App.svelte CHANGED
@@ -18,8 +18,13 @@
18
  // Gradio client instances
19
  let fluxClient: GradioClient | null = $state(null);
20
  let joyCaptionClient: GradioClient | null = $state(null);
21
- let zephyrClient: GradioClient | null = $state(null);
22
- let qwenClient: GradioClient | null = $state(null);
 
 
 
 
 
23
 
24
  // Navigation state
25
  let activeTab: TabId = $state('scanner');
@@ -97,7 +102,7 @@
97
  try {
98
  const opts = hfToken ? { hf_token: hfToken } : {};
99
 
100
- // Connect to all three spaces
101
  fluxClient = await gradioClient.Client.connect(
102
  "black-forest-labs/FLUX.1-schnell",
103
  opts
@@ -108,27 +113,43 @@
108
  opts
109
  );
110
 
111
- zephyrClient = await gradioClient.Client.connect(
112
- "Fraser/zephyr-7b",
113
  opts
114
  );
115
 
116
- qwenClient = await gradioClient.Client.connect(
117
- "Qwen/Qwen3-Demo",
118
- opts
119
- );
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  authStore.setBannerMessage("");
122
 
123
- // Set up qwen client reset function
124
- setQwenClientResetter(async () => {
125
- console.log('🔄 Resetting qwen client connection...');
126
- const opts = hfToken ? { hf_token: hfToken } : {};
127
- qwenClient = await gradioClient.Client.connect(
128
- "Qwen/Qwen3-Demo",
129
- opts
130
- );
131
- });
132
 
133
  } catch (err) {
134
  console.error(err);
@@ -152,8 +173,7 @@
152
  <Scanner
153
  {fluxClient}
154
  {joyCaptionClient}
155
- {zephyrClient}
156
- {qwenClient}
157
  />
158
  {:else if activeTab === 'encounters'}
159
  <Encounters />
 
18
  // Gradio client instances
19
  let fluxClient: GradioClient | null = $state(null);
20
  let joyCaptionClient: GradioClient | null = $state(null);
21
+ let hunyuanClient: GradioClient | null = $state(null);
22
+
23
+ // Unused clients (kept for future use but not initialized)
24
+ // let zephyrClient: GradioClient | null = $state(null);
25
+ // let qwenClient: GradioClient | null = $state(null);
26
+ // let commandClient: GradioClient | null = $state(null);
27
+ // let dotsClient: GradioClient | null = $state(null);
28
 
29
  // Navigation state
30
  let activeTab: TabId = $state('scanner');
 
102
  try {
103
  const opts = hfToken ? { hf_token: hfToken } : {};
104
 
105
+ // Connect to essential spaces only
106
  fluxClient = await gradioClient.Client.connect(
107
  "black-forest-labs/FLUX.1-schnell",
108
  opts
 
113
  opts
114
  );
115
 
116
+ hunyuanClient = await gradioClient.Client.connect(
117
+ "tencent/hunyuan-turbos",
118
  opts
119
  );
120
 
121
+ // Unused clients (commented out to save resources)
122
+ // zephyrClient = await gradioClient.Client.connect(
123
+ // "Fraser/zephyr-7b",
124
+ // opts
125
+ // );
126
+
127
+ // qwenClient = await gradioClient.Client.connect(
128
+ // "Qwen/Qwen3-Demo",
129
+ // opts
130
+ // );
131
+
132
+ // commandClient = await gradioClient.Client.connect(
133
+ // "Fraser/command-a-vision",
134
+ // opts
135
+ // );
136
+
137
+ // dotsClient = await gradioClient.Client.connect(
138
+ // "Fraser/dots-demo",
139
+ // opts
140
+ // );
141
 
142
  authStore.setBannerMessage("");
143
 
144
+ // Qwen client reset function (commented out since qwen is not used)
145
+ // setQwenClientResetter(async () => {
146
+ // console.log('🔄 Resetting qwen client connection...');
147
+ // const opts = hfToken ? { hf_token: hfToken } : {};
148
+ // qwenClient = await gradioClient.Client.connect(
149
+ // "Qwen/Qwen3-Demo",
150
+ // opts
151
+ // );
152
+ // });
153
 
154
  } catch (err) {
155
  console.error(err);
 
173
  <Scanner
174
  {fluxClient}
175
  {joyCaptionClient}
176
+ {hunyuanClient}
 
177
  />
178
  {:else if activeTab === 'encounters'}
179
  <Encounters />
src/lib/components/AutoTrainerScanner/AutoTrainerScanner.svelte CHANGED
@@ -11,12 +11,17 @@
11
 
12
  interface Props {
13
  joyCaptionClient: GradioClient;
14
- zephyrClient: GradioClient;
15
  fluxClient: GradioClient;
16
- qwenClient: GradioClient;
 
 
 
 
 
 
17
  }
18
 
19
- let { joyCaptionClient, zephyrClient, fluxClient, qwenClient }: Props = $props();
20
 
21
  // Scanner state
22
  let scanState = $state({
@@ -47,7 +52,7 @@
47
 
48
  // Initialize trainer paths on component mount
49
  $effect(() => {
50
- if (joyCaptionClient && zephyrClient && fluxClient) {
51
  loadInitialState();
52
  }
53
  });
@@ -332,9 +337,8 @@
332
  <PicletGenerator
333
  bind:this={picletGenerator}
334
  {joyCaptionClient}
335
- {zephyrClient}
336
  {fluxClient}
337
- {qwenClient}
338
  isTrainerMode={true}
339
  onTrainerImageCompleted={onTrainerImageCompleted}
340
  onTrainerImageFailed={onTrainerImageFailed}
 
11
 
12
  interface Props {
13
  joyCaptionClient: GradioClient;
 
14
  fluxClient: GradioClient;
15
+ hunyuanClient: GradioClient;
16
+
17
+ // Unused clients (kept for future use)
18
+ // zephyrClient: GradioClient;
19
+ // qwenClient: GradioClient;
20
+ // commandClient: GradioClient;
21
+ // dotsClient: GradioClient;
22
  }
23
 
24
+ let { joyCaptionClient, fluxClient, hunyuanClient }: Props = $props();
25
 
26
  // Scanner state
27
  let scanState = $state({
 
52
 
53
  // Initialize trainer paths on component mount
54
  $effect(() => {
55
+ if (joyCaptionClient && fluxClient && hunyuanClient) {
56
  loadInitialState();
57
  }
58
  });
 
337
  <PicletGenerator
338
  bind:this={picletGenerator}
339
  {joyCaptionClient}
 
340
  {fluxClient}
341
+ {hunyuanClient}
342
  isTrainerMode={true}
343
  onTrainerImageCompleted={onTrainerImageCompleted}
344
  onTrainerImageFailed={onTrainerImageFailed}
src/lib/components/Pages/Scanner.svelte CHANGED
@@ -6,29 +6,32 @@
6
  interface Props {
7
  fluxClient: GradioClient | null;
8
  joyCaptionClient: GradioClient | null;
9
- zephyrClient: GradioClient | null;
10
- qwenClient: GradioClient | null;
 
 
 
 
 
11
  }
12
 
13
- let { fluxClient, joyCaptionClient, zephyrClient, qwenClient }: Props = $props();
14
  </script>
15
 
16
  <div class="scanner-page">
17
- {#if fluxClient && joyCaptionClient && zephyrClient && qwenClient}
18
  <!-- Auto Trainer Scanner -->
19
  <AutoTrainerScanner
20
  {joyCaptionClient}
21
- {zephyrClient}
22
  {fluxClient}
23
- {qwenClient}
24
  />
25
 
26
  <!-- Manual Piclet Generator -->
27
  <PicletGenerator
28
  {fluxClient}
29
  {joyCaptionClient}
30
- {zephyrClient}
31
- {qwenClient}
32
  />
33
  {:else}
34
  <div class="loading-state">
 
6
  interface Props {
7
  fluxClient: GradioClient | null;
8
  joyCaptionClient: GradioClient | null;
9
+ hunyuanClient: GradioClient | null;
10
+
11
+ // Unused clients (kept for future use)
12
+ // zephyrClient: GradioClient | null;
13
+ // qwenClient: GradioClient | null;
14
+ // commandClient: GradioClient | null;
15
+ // dotsClient: GradioClient | null;
16
  }
17
 
18
+ let { fluxClient, joyCaptionClient, hunyuanClient }: Props = $props();
19
  </script>
20
 
21
  <div class="scanner-page">
22
+ {#if fluxClient && joyCaptionClient && hunyuanClient}
23
  <!-- Auto Trainer Scanner -->
24
  <AutoTrainerScanner
25
  {joyCaptionClient}
 
26
  {fluxClient}
27
+ {hunyuanClient}
28
  />
29
 
30
  <!-- Manual Piclet Generator -->
31
  <PicletGenerator
32
  {fluxClient}
33
  {joyCaptionClient}
34
+ {hunyuanClient}
 
35
  />
36
  {:else}
37
  <div class="loading-state">
src/lib/components/PicletGenerator/PicletGenerator.svelte CHANGED
@@ -21,15 +21,106 @@
21
 
22
  let {
23
  joyCaptionClient,
24
- zephyrClient,
25
  fluxClient,
26
- qwenClient,
 
 
 
 
 
 
 
27
  isTrainerMode = false,
28
  onTrainerImageCompleted,
29
  onTrainerImageFailed
30
  }: Props = $props();
31
 
32
- let state: PicletWorkflowState = $state({
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  currentStep: 'upload',
34
  userImage: null,
35
  imageCaption: null,
@@ -55,15 +146,15 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
55
 
56
 
57
  async function importPiclet(picletData: PicletInstance) {
58
- state.isProcessing = true;
59
- state.currentStep = 'complete';
60
 
61
  try {
62
  // Save the imported piclet
63
  const savedId = await savePicletInstance(picletData);
64
 
65
- // Create a success state similar to generation
66
- state.picletImage = {
67
  imageUrl: picletData.imageUrl,
68
  imageData: picletData.imageData,
69
  seed: 0,
@@ -71,20 +162,20 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
71
  };
72
 
73
  // Show import success
74
- state.isProcessing = false;
75
  alert(`Successfully imported ${picletData.nickname || picletData.typeId}!`);
76
 
77
  // Reset to allow another import/generation
78
  setTimeout(() => reset(), 2000);
79
  } catch (error) {
80
- state.error = `Failed to import piclet: ${error}`;
81
- state.isProcessing = false;
82
  }
83
  }
84
 
85
  async function handleImageSelected(file: File) {
86
  if (!joyCaptionClient || !fluxClient) {
87
- state.error = "Services not connected. Please wait...";
88
  return;
89
  }
90
 
@@ -92,8 +183,8 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
92
  imageQueue = [];
93
  currentImageIndex = 0;
94
 
95
- state.userImage = file;
96
- state.error = null;
97
 
98
  // Check if this is a piclet card with metadata
99
  const picletData = await extractPicletMetadata(file);
@@ -108,7 +199,7 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
108
 
109
  async function handleImagesSelected(files: File[]) {
110
  if (!joyCaptionClient || !fluxClient) {
111
- state.error = "Services not connected. Please wait...";
112
  return;
113
  }
114
 
@@ -127,8 +218,8 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
127
  }
128
 
129
  const currentFile = imageQueue[currentImageIndex];
130
- state.userImage = currentFile;
131
- state.error = null;
132
 
133
  // Check if this is a piclet card with metadata
134
  const picletData = await extractPicletMetadata(currentFile);
@@ -165,24 +256,24 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
165
  }
166
 
167
  async function startWorkflow() {
168
- state.isProcessing = true;
169
 
170
  try {
171
  // Step 1: Generate detailed object description with joy-caption
172
  await captionImage();
173
- await new Promise(resolve => setTimeout(resolve, 100)); // Small delay for state update
174
 
175
  // Step 2: Generate free-form monster concept with qwen3
176
  await generateConcept();
177
- await new Promise(resolve => setTimeout(resolve, 100)); // Small delay for state update
178
 
179
  // Step 3: Generate structured monster stats based on both caption and concept
180
  await generateStats();
181
- await new Promise(resolve => setTimeout(resolve, 100)); // Small delay for state update
182
 
183
  // Step 4: Generate image prompt with qwen3
184
  await generateImagePrompt();
185
- await new Promise(resolve => setTimeout(resolve, 100)); // Small delay for state update
186
 
187
  // Step 5: Generate monster image
188
  await generateMonsterImage();
@@ -190,7 +281,7 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
190
  // Step 6: Auto-save the piclet as uncaught
191
  await autoSavePiclet();
192
 
193
- state.currentStep = 'complete';
194
 
195
  // If processing a queue or in trainer mode, auto-advance to next image after a short delay
196
  if (imageQueue.length > 1 || isTrainerMode) {
@@ -203,17 +294,17 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
203
  if (err && typeof err === 'object' && 'message' in err) {
204
  const errorMessage = String(err.message);
205
  if (errorMessage.includes('exceeded your GPU quota') || errorMessage.includes('GPU quota')) {
206
- state.error = 'GPU quota exceeded! You need to sign in with Hugging Face for free GPU time, or upgrade to Hugging Face Pro for more quota.';
207
  } else {
208
- state.error = errorMessage;
209
  }
210
  } else if (err instanceof Error) {
211
- state.error = err.message;
212
  } else {
213
- state.error = 'An unknown error occurred';
214
  }
215
  } finally {
216
- state.isProcessing = false;
217
  }
218
  }
219
 
@@ -242,15 +333,15 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
242
  }
243
 
244
  async function captionImage() {
245
- state.currentStep = 'captioning';
246
 
247
- if (!joyCaptionClient || !state.userImage) {
248
  throw new Error('Caption service not available or no image provided');
249
  }
250
 
251
  try {
252
  const output = await joyCaptionClient.predict("/stream_chat", [
253
- state.userImage, // input_image
254
  "Descriptive", // caption_type
255
  "long", // caption_length
256
  [], // extra_options
@@ -260,7 +351,7 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
260
 
261
  const [, caption] = output.data;
262
  // Store the detailed object description
263
- state.imageCaption = caption;
264
  console.log('Detailed object description generated:', caption);
265
  } catch (error) {
266
  handleAPIError(error);
@@ -268,15 +359,16 @@ Focus on: colors, body shape, eyes, limbs, mouth, and key visual features. Omit
268
  }
269
 
270
  async function generateConcept() {
271
- state.currentStep = 'conceptualizing';
272
 
273
- if (!qwenClient || !state.imageCaption) {
274
- throw new Error('Qwen service not available or no image caption provided');
 
275
  }
276
 
277
  const conceptPrompt = `Based on this detailed object description, create a Pokémon-style monster that transforms the object into an imaginative creature. The monster should clearly be inspired by the object's appearance but reimagined as a living monster.
278
 
279
- Object description: "${state.imageCaption}"
280
 
281
  Guidelines:
282
  - Take the object's key visual elements (colors, shapes, materials) incorporating all of them into a single creature design
@@ -297,7 +389,7 @@ Format your response exactly as follows:
297
  {Detailed physical description showing how the object becomes a creature. Ensure the creature uses all the unique attributes of the object. Include colors, shapes, materials, eyes, limbs, mouth, and distinctive features. This section should be comprehensive as it will be used for both stats generation and image creation.}`;
298
 
299
  try {
300
- // Create the required state structure based on qwen.html
301
  const defaultState = {
302
  "conversation_contexts": {},
303
  "conversations": [],
@@ -311,56 +403,18 @@ Format your response exactly as follows:
311
  "thinking_budget": 1
312
  };
313
 
314
- // Create thinking button state
315
  const thinkingBtnState = {
316
  "enable_thinking": true
317
  };
318
 
319
- console.log('Generating monster concept with qwen3...');
320
-
321
- // Call the add_message function (fn_index 13)
322
- const output = await withQwenTimeout(() => qwenClient.predict(13, [
323
- conceptPrompt, // input_value
324
- defaultSettings, // settings_form_value
325
- thinkingBtnState, // thinking_btn_state_value
326
- defaultState // state_value
327
- ]));
328
-
329
- console.log('Qwen3 concept response:', output);
330
-
331
- // Extract the response text from the output
332
- let responseText = "";
333
- if (output && output.data && Array.isArray(output.data)) {
334
- // The chatbot response is at index 5 in the outputs array
335
- const chatbotUpdate = output.data[5];
336
-
337
- if (chatbotUpdate && chatbotUpdate.value && Array.isArray(chatbotUpdate.value)) {
338
- const chatHistory = chatbotUpdate.value;
339
-
340
- if (chatHistory.length > 0) {
341
- // Get the last message (assistant's response)
342
- const lastMessage = chatHistory[chatHistory.length - 1];
343
-
344
- if (lastMessage && lastMessage.content && Array.isArray(lastMessage.content)) {
345
- // Extract text content from the message
346
- const textContents = lastMessage.content
347
- .filter((item: any) => item.type === "text")
348
- .map((item: any) => item.content)
349
- .join("\n");
350
- responseText = textContents || "Response received but no text content found";
351
- } else if (lastMessage && lastMessage.role === "assistant") {
352
- // Fallback - if content structure is different
353
- responseText = JSON.stringify(lastMessage, null, 2);
354
- }
355
- }
356
- }
357
- }
358
 
359
  if (!responseText || responseText.trim() === '') {
360
  throw new Error('Failed to generate monster concept');
361
  }
362
 
363
- state.picletConcept = responseText;
364
  console.log('Monster concept generated:', responseText);
365
  } catch (error) {
366
  handleAPIError(error);
@@ -368,18 +422,19 @@ Format your response exactly as follows:
368
  }
369
 
370
  async function generateImagePrompt() {
371
- state.currentStep = 'promptCrafting';
372
 
373
- if (!qwenClient || !state.picletConcept || !state.imageCaption) {
374
- throw new Error('Qwen service not available or no concept/caption available for prompt generation');
 
375
  }
376
 
377
  // Extract the Monster Visual Description from the structured concept
378
- const visualDescMatch = state.picletConcept.match(/## Monster Visual Description\s*\n([\s\S]*?)(?=##|$)/);
379
 
380
  if (visualDescMatch && visualDescMatch[1]) {
381
- state.imagePrompt = visualDescMatch[1].trim();
382
- console.log('Extracted visual description for image generation:', state.imagePrompt);
383
  return; // Skip qwen3 call since we have the description
384
  }
385
 
@@ -388,13 +443,13 @@ Format your response exactly as follows:
388
 
389
  MONSTER CONCEPT:
390
  """
391
- ${state.picletConcept}
392
  """
393
 
394
  Create a concise visual description (1-3 sentences, max 100 words). Focus only on colors, shapes, materials, eyes, limbs, mouth, and distinctive features. Omit all non-visual information like abilities and backstory.`;
395
 
396
  try {
397
- // Create the required state structure based on qwen.html
398
  const defaultState = {
399
  "conversation_contexts": {},
400
  "conversations": [],
@@ -408,73 +463,35 @@ Create a concise visual description (1-3 sentences, max 100 words). Focus only o
408
  "thinking_budget": 1
409
  };
410
 
411
- // Create thinking button state
412
  const thinkingBtnState = {
413
  "enable_thinking": true
414
  };
415
 
416
- console.log('Generating image prompt with qwen3...');
417
-
418
- // Call the add_message function (fn_index 13)
419
- const output = await withQwenTimeout(() => qwenClient.predict(13, [
420
- imagePromptPrompt, // input_value
421
- defaultSettings, // settings_form_value
422
- thinkingBtnState, // thinking_btn_state_value
423
- defaultState // state_value
424
- ]));
425
-
426
- console.log('Qwen3 image prompt response:', output);
427
-
428
- // Extract the response text from the output using the same pattern as generateConcept
429
- let responseText = "";
430
- if (output && output.data && Array.isArray(output.data)) {
431
- // The chatbot response is at index 5 in the outputs array
432
- const chatbotUpdate = output.data[5];
433
-
434
- if (chatbotUpdate && chatbotUpdate.value && Array.isArray(chatbotUpdate.value)) {
435
- const chatHistory = chatbotUpdate.value;
436
-
437
- if (chatHistory.length > 0) {
438
- // Get the last message (assistant's response)
439
- const lastMessage = chatHistory[chatHistory.length - 1];
440
-
441
- if (lastMessage && lastMessage.content && Array.isArray(lastMessage.content)) {
442
- // Extract text content from the message
443
- const textContents = lastMessage.content
444
- .filter((item: any) => item.type === "text")
445
- .map((item: any) => item.content)
446
- .join("\n");
447
- responseText = textContents || "Response received but no text content found";
448
- } else if (lastMessage && lastMessage.role === "assistant") {
449
- // Fallback - if content structure is different
450
- responseText = JSON.stringify(lastMessage, null, 2);
451
- }
452
- }
453
- }
454
- }
455
 
456
  if (!responseText || responseText.trim() === '') {
457
  throw new Error('Failed to generate image prompt');
458
  }
459
 
460
- state.imagePrompt = responseText.trim();
461
- console.log('Image prompt generated:', state.imagePrompt);
462
  } catch (error) {
463
  handleAPIError(error);
464
  }
465
  }
466
 
467
  async function generateMonsterImage() {
468
- state.currentStep = 'generating';
469
 
470
- if (!fluxClient || !state.imagePrompt || !state.picletStats) {
471
  throw new Error('Image generation service not available or no prompt/stats');
472
  }
473
 
474
  // The image prompt should already be generated by generateImagePrompt() in the workflow
475
 
476
  // Get tier for image quality enhancement
477
- const tier = state.picletStats.tier || 'medium';
478
  const tierDescriptions = {
479
  low: 'simple and basic design',
480
  medium: 'detailed and well-crafted design',
@@ -484,7 +501,7 @@ Create a concise visual description (1-3 sentences, max 100 words). Focus only o
484
 
485
  try {
486
  const output = await fluxClient.predict("/infer", [
487
- `${state.imagePrompt}\nNow generate a Pokémon-Anime-style image of the monster in an idle pose with a plain white background. This is a ${tier} tier monster with ${tierDescriptions[tier as keyof typeof tierDescriptions]}. The monster should not be attacking or in motion. The full monster must be visible within the frame.`,
488
  0, // seed
489
  true, // randomizeSeed
490
  1024, // width
@@ -504,20 +521,20 @@ Create a concise visual description (1-3 sentences, max 100 words). Focus only o
504
  console.log('Processing image for background removal...');
505
  try {
506
  const transparentBase64 = await removeBackground(url);
507
- state.picletImage = {
508
  imageUrl: url,
509
  imageData: transparentBase64,
510
  seed: usedSeed,
511
- prompt: state.imagePrompt
512
  };
513
  console.log('Background removal completed successfully');
514
  } catch (processError) {
515
  console.error('Failed to process image for background removal:', processError);
516
  // Fallback to original image
517
- state.picletImage = {
518
  imageUrl: url,
519
  seed: usedSeed,
520
- prompt: state.imagePrompt
521
  };
522
  }
523
  } else {
@@ -529,30 +546,31 @@ Create a concise visual description (1-3 sentences, max 100 words). Focus only o
529
  }
530
 
531
  async function generateStats() {
532
- state.currentStep = 'statsGenerating';
533
 
534
- if (!qwenClient || !state.picletConcept || !state.imageCaption) {
535
- throw new Error('Qwen service not available or no concept/caption available for stats generation');
 
536
  }
537
 
538
  // Default tier (will be set from the generated stats)
539
  let tier: 'low' | 'medium' | 'high' | 'legendary' = 'medium';
540
 
541
  // Extract monster name and rarity from the structured concept
542
- const monsterNameMatch = state.picletConcept.match(/# Monster Name\s*\n([\s\S]*?)(?=^##|$)/m);
543
  const monsterName = monsterNameMatch ? monsterNameMatch[1].trim() : 'Unknown Monster';
544
 
545
- const rarityMatch = state.picletConcept.match(/# Object Rarity\s*\n([\s\S]*?)(?=^#)/m);
546
  const objectRarity = rarityMatch ? rarityMatch[1].trim().toLowerCase() : 'common';
547
 
548
  // Create comprehensive battle-ready monster prompt
549
  const statsPrompt = `Based on this detailed object description and monster concept, create a complete battle-ready monster for the Pictuary Battle System:
550
 
551
  ORIGINAL OBJECT DESCRIPTION:
552
- "${state.imageCaption}"
553
 
554
  MONSTER CONCEPT:
555
- "${state.picletConcept}"
556
 
557
  The object rarity has been assessed as: ${objectRarity}
558
 
@@ -758,7 +776,7 @@ Write your response within \`\`\`json\`\`\``;
758
  console.log('Generating monster stats with qwen3');
759
 
760
  try {
761
- // Create the required state structure based on qwen.html
762
  const defaultState = {
763
  "conversation_contexts": {},
764
  "conversations": [],
@@ -772,58 +790,12 @@ Write your response within \`\`\`json\`\`\``;
772
  "thinking_budget": 1
773
  };
774
 
775
- // Create thinking button state
776
  const thinkingBtnState = {
777
  "enable_thinking": true
778
  };
779
 
780
- // Call the add_message function (fn_index 13)
781
- const output = await withQwenTimeout(() => qwenClient.predict(13, [
782
- statsPrompt, // input_value
783
- defaultSettings, // settings_form_value
784
- thinkingBtnState, // thinking_btn_state_value
785
- defaultState // state_value
786
- ]));
787
-
788
- console.log('Qwen3 stats response:', output);
789
-
790
- // Extract the response text from the output using the same pattern as generateConcept
791
- let responseText = "";
792
- if (output && output.data && Array.isArray(output.data)) {
793
- // The chatbot response is at index 5 in the outputs array
794
- const chatbotUpdate = output.data[5];
795
-
796
- if (chatbotUpdate && chatbotUpdate.value && Array.isArray(chatbotUpdate.value)) {
797
- const chatHistory = chatbotUpdate.value;
798
-
799
- if (chatHistory.length > 0) {
800
- // Get the last message (assistant's response)
801
- const lastMessage = chatHistory[chatHistory.length - 1];
802
-
803
- console.log('Full message structure:', JSON.stringify(lastMessage, null, 2));
804
-
805
- if (lastMessage && lastMessage.content && Array.isArray(lastMessage.content)) {
806
- // Extract ALL text content from the message more robustly
807
- const textContents = lastMessage.content
808
- .filter((item: any) => item.type === "text")
809
- .map((item: any) => {
810
- console.log('Content item:', item);
811
- return item.content || '';
812
- })
813
- .join(""); // Join without separator to avoid breaking JSON
814
- responseText = textContents || "Response received but no text content found";
815
- console.log('Extracted text length:', responseText.length);
816
- console.log('Extracted text preview:', responseText.substring(0, 200) + '...');
817
- } else if (lastMessage && typeof lastMessage === 'string') {
818
- // Handle case where the message is a plain string
819
- responseText = lastMessage;
820
- } else if (lastMessage && lastMessage.role === "assistant") {
821
- // Fallback - if content structure is different
822
- responseText = JSON.stringify(lastMessage, null, 2);
823
- }
824
- }
825
- }
826
- }
827
 
828
  if (!responseText || responseText.trim() === '') {
829
  throw new Error('Failed to generate monster stats');
@@ -939,7 +911,7 @@ Write your response within \`\`\`json\`\`\``;
939
  }
940
 
941
  const stats: PicletStats = parsedStats;
942
- state.picletStats = stats;
943
  console.log('Monster stats generated:', stats);
944
  console.log('Monster stats JSON:', JSON.stringify(stats, null, 2));
945
  } catch (parseError) {
@@ -952,22 +924,22 @@ Write your response within \`\`\`json\`\`\``;
952
  }
953
 
954
  async function autoSavePiclet() {
955
- if (!state.picletImage || !state.imageCaption || !state.picletConcept || !state.imagePrompt || !state.picletStats) {
956
  console.error('Cannot auto-save: missing required data');
957
  return;
958
  }
959
 
960
  try {
961
  // Create a clean copy of stats to ensure it's serializable
962
- const cleanStats = JSON.parse(JSON.stringify(state.picletStats));
963
 
964
  const picletData = {
965
- name: state.picletStats.name,
966
- imageUrl: state.picletImage.imageUrl,
967
- imageData: state.picletImage.imageData,
968
- imageCaption: state.imageCaption,
969
- concept: state.picletConcept,
970
- imagePrompt: state.imagePrompt,
971
  stats: cleanStats,
972
  createdAt: new Date()
973
  };
@@ -994,10 +966,10 @@ Write your response within \`\`\`json\`\`\``;
994
  } catch (err) {
995
  console.error('Failed to auto-save piclet:', err);
996
  console.error('Piclet data that failed to save:', {
997
- name: state.picletStats?.name,
998
- hasImageUrl: !!state.picletImage?.imageUrl,
999
- hasImageData: !!state.picletImage?.imageData,
1000
- hasStats: !!state.picletStats
1001
  });
1002
 
1003
  // If in trainer mode, notify failure
@@ -1011,7 +983,7 @@ Write your response within \`\`\`json\`\`\``;
1011
  }
1012
 
1013
  function reset() {
1014
- state = {
1015
  currentStep: 'upload',
1016
  userImage: null,
1017
  imageCaption: null,
@@ -1030,7 +1002,7 @@ Write your response within \`\`\`json\`\`\``;
1030
  trainerImagePaths.push(imagePath);
1031
 
1032
  // If this is the first image and we're not processing, start processing
1033
- if (imageQueue.length === 1 && !state.isProcessing) {
1034
  processCurrentImage();
1035
  }
1036
  }
@@ -1038,33 +1010,49 @@ Write your response within \`\`\`json\`\`\``;
1038
 
1039
  <div class="piclet-generator">
1040
 
1041
- {#if state.currentStep !== 'upload'}
1042
- <WorkflowProgress currentStep={state.currentStep} error={state.error} />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1043
  {/if}
1044
 
1045
- {#if state.currentStep === 'upload'}
1046
  <UploadStep
1047
  onImageSelected={handleImageSelected}
1048
  onImagesSelected={handleImagesSelected}
1049
- isProcessing={state.isProcessing}
1050
  imageQueue={imageQueue}
1051
  currentImageIndex={currentImageIndex}
1052
  />
1053
- {:else if state.currentStep === 'complete'}
1054
- <PicletResult workflowState={state} onReset={reset} />
1055
  {:else}
1056
  <div class="processing-container">
1057
  <div class="spinner"></div>
1058
  <p class="processing-text">
1059
- {#if state.currentStep === 'captioning'}
1060
  Analyzing your image...
1061
- {:else if state.currentStep === 'conceptualizing'}
1062
  Creating Piclet concept...
1063
- {:else if state.currentStep === 'statsGenerating'}
1064
  Generating battle stats...
1065
- {:else if state.currentStep === 'promptCrafting'}
1066
  Creating image prompt...
1067
- {:else if state.currentStep === 'generating'}
1068
  Generating your Piclet...
1069
  {/if}
1070
  </p>
@@ -1080,6 +1068,34 @@ Write your response within \`\`\`json\`\`\``;
1080
  padding: 2rem;
1081
  }
1082
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1083
 
1084
  .processing-container {
1085
  display: flex;
 
21
 
22
  let {
23
  joyCaptionClient,
 
24
  fluxClient,
25
+ hunyuanClient,
26
+
27
+ // Unused clients (kept for future use)
28
+ // zephyrClient,
29
+ // qwenClient,
30
+ // commandClient,
31
+ // dotsClient,
32
+
33
  isTrainerMode = false,
34
  onTrainerImageCompleted,
35
  onTrainerImageFailed
36
  }: Props = $props();
37
 
38
+ // Text generation client management - simplified to only use HunyuanTurbos
39
+ type TextGenerationClient = 'hunyuan' | 'command' | 'zephyr' | 'qwen' | 'dots';
40
+ let currentTextClient: TextGenerationClient = $workflowState('hunyuan'); // Fixed to HunyuanTurbos only
41
+
42
+ // Get the active text generation client (now only returns HunyuanTurbos)
43
+ const getActiveTextClient = () => {
44
+ return hunyuanClient; // Only HunyuanTurbos is active
45
+
46
+ // Unused client switching (kept for future use)
47
+ // switch (currentTextClient) {
48
+ // case 'hunyuan': return hunyuanClient;
49
+ // case 'command': return commandClient;
50
+ // case 'zephyr': return zephyrClient;
51
+ // case 'qwen': return qwenClient;
52
+ // case 'dots': return dotsClient;
53
+ // default: return hunyuanClient;
54
+ // }
55
+ };
56
+
57
+ // Unified text generation function (simplified to only use HunyuanTurbos)
58
+ const generateText = async (prompt: string): Promise<string> => {
59
+ const client = getActiveTextClient();
60
+ if (!client) {
61
+ throw new Error(`HunyuanTurbos client is not available`);
62
+ }
63
+
64
+ console.log(`Generating text with HunyuanTurbos...`);
65
+
66
+ // Use HunyuanTurbos client only
67
+ const hunyuanResult = await client.predict("/chat", [prompt]);
68
+ return hunyuanResult.data[0] || '';
69
+
70
+ // Unused client handling (kept for future use)
71
+ // switch (currentTextClient) {
72
+ // case 'hunyuan':
73
+ // // HunyuanTurbos client (assuming similar API to other simple clients)
74
+ // const hunyuanResult = await client.predict("/chat", [prompt]);
75
+ // return hunyuanResult.data[0] || '';
76
+ //
77
+ // case 'command':
78
+ // case 'dots':
79
+ // // Command and dots clients use direct predict call (assuming similar to zephyr)
80
+ // const result = await client.predict("/chat", [prompt]);
81
+ // return result.data[0] || '';
82
+ //
83
+ // case 'zephyr':
84
+ // // Zephyr client uses direct predict call
85
+ // const zephyrResult = await client.predict("/chat", [prompt]);
86
+ // return zephyrResult.data[0] || '';
87
+ //
88
+ // case 'qwen':
89
+ // // Qwen client uses the complex API with settings and workflowState
90
+ // const defaultSettings = {
91
+ // "system_prompt": "",
92
+ // "max_new_tokens": 2048,
93
+ // "temperature": 0.7,
94
+ // "top_p": 0.9,
95
+ // "top_k": 50,
96
+ // "repetition_penalty": 1.05
97
+ // };
98
+ //
99
+ // const defaultState = {
100
+ // "messages": [],
101
+ // "system": "",
102
+ // "tools": null
103
+ // };
104
+ //
105
+ // const thinkingBtnState = {
106
+ // "enable_thinking": true
107
+ // };
108
+ //
109
+ // const output = await withQwenTimeout(() => client.predict(13, [
110
+ // prompt,
111
+ // defaultSettings,
112
+ // thinkingBtnState,
113
+ // defaultState
114
+ // ]));
115
+ //
116
+ // return output.data[0] || '';
117
+ //
118
+ // default:
119
+ // throw new Error(`Unsupported text client: ${currentTextClient}`);
120
+ // }
121
+ };
122
+
123
+ let workflowState: PicletWorkflowState = $state({
124
  currentStep: 'upload',
125
  userImage: null,
126
  imageCaption: null,
 
146
 
147
 
148
  async function importPiclet(picletData: PicletInstance) {
149
+ workflowState.isProcessing = true;
150
+ workflowState.currentStep = 'complete';
151
 
152
  try {
153
  // Save the imported piclet
154
  const savedId = await savePicletInstance(picletData);
155
 
156
+ // Create a success workflowState similar to generation
157
+ workflowState.picletImage = {
158
  imageUrl: picletData.imageUrl,
159
  imageData: picletData.imageData,
160
  seed: 0,
 
162
  };
163
 
164
  // Show import success
165
+ workflowState.isProcessing = false;
166
  alert(`Successfully imported ${picletData.nickname || picletData.typeId}!`);
167
 
168
  // Reset to allow another import/generation
169
  setTimeout(() => reset(), 2000);
170
  } catch (error) {
171
+ workflowState.error = `Failed to import piclet: ${error}`;
172
+ workflowState.isProcessing = false;
173
  }
174
  }
175
 
176
  async function handleImageSelected(file: File) {
177
  if (!joyCaptionClient || !fluxClient) {
178
+ workflowState.error = "Services not connected. Please wait...";
179
  return;
180
  }
181
 
 
183
  imageQueue = [];
184
  currentImageIndex = 0;
185
 
186
+ workflowState.userImage = file;
187
+ workflowState.error = null;
188
 
189
  // Check if this is a piclet card with metadata
190
  const picletData = await extractPicletMetadata(file);
 
199
 
200
  async function handleImagesSelected(files: File[]) {
201
  if (!joyCaptionClient || !fluxClient) {
202
+ workflowState.error = "Services not connected. Please wait...";
203
  return;
204
  }
205
 
 
218
  }
219
 
220
  const currentFile = imageQueue[currentImageIndex];
221
+ workflowState.userImage = currentFile;
222
+ workflowState.error = null;
223
 
224
  // Check if this is a piclet card with metadata
225
  const picletData = await extractPicletMetadata(currentFile);
 
256
  }
257
 
258
  async function startWorkflow() {
259
+ workflowState.isProcessing = true;
260
 
261
  try {
262
  // Step 1: Generate detailed object description with joy-caption
263
  await captionImage();
264
+ await new Promise(resolve => setTimeout(resolve, 100)); // Small delay for workflowState update
265
 
266
  // Step 2: Generate free-form monster concept with qwen3
267
  await generateConcept();
268
+ await new Promise(resolve => setTimeout(resolve, 100)); // Small delay for workflowState update
269
 
270
  // Step 3: Generate structured monster stats based on both caption and concept
271
  await generateStats();
272
+ await new Promise(resolve => setTimeout(resolve, 100)); // Small delay for workflowState update
273
 
274
  // Step 4: Generate image prompt with qwen3
275
  await generateImagePrompt();
276
+ await new Promise(resolve => setTimeout(resolve, 100)); // Small delay for workflowState update
277
 
278
  // Step 5: Generate monster image
279
  await generateMonsterImage();
 
281
  // Step 6: Auto-save the piclet as uncaught
282
  await autoSavePiclet();
283
 
284
+ workflowState.currentStep = 'complete';
285
 
286
  // If processing a queue or in trainer mode, auto-advance to next image after a short delay
287
  if (imageQueue.length > 1 || isTrainerMode) {
 
294
  if (err && typeof err === 'object' && 'message' in err) {
295
  const errorMessage = String(err.message);
296
  if (errorMessage.includes('exceeded your GPU quota') || errorMessage.includes('GPU quota')) {
297
+ workflowState.error = 'GPU quota exceeded! You need to sign in with Hugging Face for free GPU time, or upgrade to Hugging Face Pro for more quota.';
298
  } else {
299
+ workflowState.error = errorMessage;
300
  }
301
  } else if (err instanceof Error) {
302
+ workflowState.error = err.message;
303
  } else {
304
+ workflowState.error = 'An unknown error occurred';
305
  }
306
  } finally {
307
+ workflowState.isProcessing = false;
308
  }
309
  }
310
 
 
333
  }
334
 
335
  async function captionImage() {
336
+ workflowState.currentStep = 'captioning';
337
 
338
+ if (!joyCaptionClient || !workflowState.userImage) {
339
  throw new Error('Caption service not available or no image provided');
340
  }
341
 
342
  try {
343
  const output = await joyCaptionClient.predict("/stream_chat", [
344
+ workflowState.userImage, // input_image
345
  "Descriptive", // caption_type
346
  "long", // caption_length
347
  [], // extra_options
 
351
 
352
  const [, caption] = output.data;
353
  // Store the detailed object description
354
+ workflowState.imageCaption = caption;
355
  console.log('Detailed object description generated:', caption);
356
  } catch (error) {
357
  handleAPIError(error);
 
359
  }
360
 
361
  async function generateConcept() {
362
+ workflowState.currentStep = 'conceptualizing';
363
 
364
+ const activeClient = getActiveTextClient();
365
+ if (!activeClient || !workflowState.imageCaption) {
366
+ throw new Error(`${currentTextClient} service not available or no image caption provided`);
367
  }
368
 
369
  const conceptPrompt = `Based on this detailed object description, create a Pokémon-style monster that transforms the object into an imaginative creature. The monster should clearly be inspired by the object's appearance but reimagined as a living monster.
370
 
371
+ Object description: "${workflowState.imageCaption}"
372
 
373
  Guidelines:
374
  - Take the object's key visual elements (colors, shapes, materials) incorporating all of them into a single creature design
 
389
  {Detailed physical description showing how the object becomes a creature. Ensure the creature uses all the unique attributes of the object. Include colors, shapes, materials, eyes, limbs, mouth, and distinctive features. This section should be comprehensive as it will be used for both stats generation and image creation.}`;
390
 
391
  try {
392
+ // Create the required workflowState structure based on qwen.html
393
  const defaultState = {
394
  "conversation_contexts": {},
395
  "conversations": [],
 
403
  "thinking_budget": 1
404
  };
405
 
406
+ // Create thinking button workflowState
407
  const thinkingBtnState = {
408
  "enable_thinking": true
409
  };
410
 
411
+ const responseText = await generateText(conceptPrompt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412
 
413
  if (!responseText || responseText.trim() === '') {
414
  throw new Error('Failed to generate monster concept');
415
  }
416
 
417
+ workflowState.picletConcept = responseText;
418
  console.log('Monster concept generated:', responseText);
419
  } catch (error) {
420
  handleAPIError(error);
 
422
  }
423
 
424
  async function generateImagePrompt() {
425
+ workflowState.currentStep = 'promptCrafting';
426
 
427
+ const activeClient = getActiveTextClient();
428
+ if (!activeClient || !workflowState.picletConcept || !workflowState.imageCaption) {
429
+ throw new Error(`HunyuanTurbos service not available or no concept/caption available for prompt generation`);
430
  }
431
 
432
  // Extract the Monster Visual Description from the structured concept
433
+ const visualDescMatch = workflowState.picletConcept.match(/## Monster Visual Description\s*\n([\s\S]*?)(?=##|$)/);
434
 
435
  if (visualDescMatch && visualDescMatch[1]) {
436
+ workflowState.imagePrompt = visualDescMatch[1].trim();
437
+ console.log('Extracted visual description for image generation:', workflowState.imagePrompt);
438
  return; // Skip qwen3 call since we have the description
439
  }
440
 
 
443
 
444
  MONSTER CONCEPT:
445
  """
446
+ ${workflowState.picletConcept}
447
  """
448
 
449
  Create a concise visual description (1-3 sentences, max 100 words). Focus only on colors, shapes, materials, eyes, limbs, mouth, and distinctive features. Omit all non-visual information like abilities and backstory.`;
450
 
451
  try {
452
+ // Create the required workflowState structure based on qwen.html
453
  const defaultState = {
454
  "conversation_contexts": {},
455
  "conversations": [],
 
463
  "thinking_budget": 1
464
  };
465
 
466
+ // Create thinking button workflowState
467
  const thinkingBtnState = {
468
  "enable_thinking": true
469
  };
470
 
471
+ const responseText = await generateText(imagePromptPrompt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472
 
473
  if (!responseText || responseText.trim() === '') {
474
  throw new Error('Failed to generate image prompt');
475
  }
476
 
477
+ workflowState.imagePrompt = responseText.trim();
478
+ console.log('Image prompt generated:', workflowState.imagePrompt);
479
  } catch (error) {
480
  handleAPIError(error);
481
  }
482
  }
483
 
484
  async function generateMonsterImage() {
485
+ workflowState.currentStep = 'generating';
486
 
487
+ if (!fluxClient || !workflowState.imagePrompt || !workflowState.picletStats) {
488
  throw new Error('Image generation service not available or no prompt/stats');
489
  }
490
 
491
  // The image prompt should already be generated by generateImagePrompt() in the workflow
492
 
493
  // Get tier for image quality enhancement
494
+ const tier = workflowState.picletStats.tier || 'medium';
495
  const tierDescriptions = {
496
  low: 'simple and basic design',
497
  medium: 'detailed and well-crafted design',
 
501
 
502
  try {
503
  const output = await fluxClient.predict("/infer", [
504
+ `${workflowState.imagePrompt}\nNow generate a Pokémon-Anime-style image of the monster in an idle pose with a plain white background. This is a ${tier} tier monster with ${tierDescriptions[tier as keyof typeof tierDescriptions]}. The monster should not be attacking or in motion. The full monster must be visible within the frame.`,
505
  0, // seed
506
  true, // randomizeSeed
507
  1024, // width
 
521
  console.log('Processing image for background removal...');
522
  try {
523
  const transparentBase64 = await removeBackground(url);
524
+ workflowState.picletImage = {
525
  imageUrl: url,
526
  imageData: transparentBase64,
527
  seed: usedSeed,
528
+ prompt: workflowState.imagePrompt
529
  };
530
  console.log('Background removal completed successfully');
531
  } catch (processError) {
532
  console.error('Failed to process image for background removal:', processError);
533
  // Fallback to original image
534
+ workflowState.picletImage = {
535
  imageUrl: url,
536
  seed: usedSeed,
537
+ prompt: workflowState.imagePrompt
538
  };
539
  }
540
  } else {
 
546
  }
547
 
548
  async function generateStats() {
549
+ workflowState.currentStep = 'statsGenerating';
550
 
551
+ const activeClient = getActiveTextClient();
552
+ if (!activeClient || !workflowState.picletConcept || !workflowState.imageCaption) {
553
+ throw new Error(`${currentTextClient} service not available or no concept/caption available for stats generation`);
554
  }
555
 
556
  // Default tier (will be set from the generated stats)
557
  let tier: 'low' | 'medium' | 'high' | 'legendary' = 'medium';
558
 
559
  // Extract monster name and rarity from the structured concept
560
+ const monsterNameMatch = workflowState.picletConcept.match(/# Monster Name\s*\n([\s\S]*?)(?=^##|$)/m);
561
  const monsterName = monsterNameMatch ? monsterNameMatch[1].trim() : 'Unknown Monster';
562
 
563
+ const rarityMatch = workflowState.picletConcept.match(/# Object Rarity\s*\n([\s\S]*?)(?=^#)/m);
564
  const objectRarity = rarityMatch ? rarityMatch[1].trim().toLowerCase() : 'common';
565
 
566
  // Create comprehensive battle-ready monster prompt
567
  const statsPrompt = `Based on this detailed object description and monster concept, create a complete battle-ready monster for the Pictuary Battle System:
568
 
569
  ORIGINAL OBJECT DESCRIPTION:
570
+ "${workflowState.imageCaption}"
571
 
572
  MONSTER CONCEPT:
573
+ "${workflowState.picletConcept}"
574
 
575
  The object rarity has been assessed as: ${objectRarity}
576
 
 
776
  console.log('Generating monster stats with qwen3');
777
 
778
  try {
779
+ // Create the required workflowState structure based on qwen.html
780
  const defaultState = {
781
  "conversation_contexts": {},
782
  "conversations": [],
 
790
  "thinking_budget": 1
791
  };
792
 
793
+ // Create thinking button workflowState
794
  const thinkingBtnState = {
795
  "enable_thinking": true
796
  };
797
 
798
+ const responseText = await generateText(statsPrompt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
799
 
800
  if (!responseText || responseText.trim() === '') {
801
  throw new Error('Failed to generate monster stats');
 
911
  }
912
 
913
  const stats: PicletStats = parsedStats;
914
+ workflowState.picletStats = stats;
915
  console.log('Monster stats generated:', stats);
916
  console.log('Monster stats JSON:', JSON.stringify(stats, null, 2));
917
  } catch (parseError) {
 
924
  }
925
 
926
  async function autoSavePiclet() {
927
+ if (!workflowState.picletImage || !workflowState.imageCaption || !workflowState.picletConcept || !workflowState.imagePrompt || !workflowState.picletStats) {
928
  console.error('Cannot auto-save: missing required data');
929
  return;
930
  }
931
 
932
  try {
933
  // Create a clean copy of stats to ensure it's serializable
934
+ const cleanStats = JSON.parse(JSON.stringify(workflowState.picletStats));
935
 
936
  const picletData = {
937
+ name: workflowState.picletStats.name,
938
+ imageUrl: workflowState.picletImage.imageUrl,
939
+ imageData: workflowState.picletImage.imageData,
940
+ imageCaption: workflowState.imageCaption,
941
+ concept: workflowState.picletConcept,
942
+ imagePrompt: workflowState.imagePrompt,
943
  stats: cleanStats,
944
  createdAt: new Date()
945
  };
 
966
  } catch (err) {
967
  console.error('Failed to auto-save piclet:', err);
968
  console.error('Piclet data that failed to save:', {
969
+ name: workflowState.picletStats?.name,
970
+ hasImageUrl: !!workflowState.picletImage?.imageUrl,
971
+ hasImageData: !!workflowState.picletImage?.imageData,
972
+ hasStats: !!workflowState.picletStats
973
  });
974
 
975
  // If in trainer mode, notify failure
 
983
  }
984
 
985
  function reset() {
986
+ workflowState = {
987
  currentStep: 'upload',
988
  userImage: null,
989
  imageCaption: null,
 
1002
  trainerImagePaths.push(imagePath);
1003
 
1004
  // If this is the first image and we're not processing, start processing
1005
+ if (imageQueue.length === 1 && !workflowState.isProcessing) {
1006
  processCurrentImage();
1007
  }
1008
  }
 
1010
 
1011
  <div class="piclet-generator">
1012
 
1013
+ <!-- Text Generation Client Selector (hidden since only HunyuanTurbos is active) -->
1014
+ <!--
1015
+ {#if !isTrainerMode}
1016
+ <div class="client-selector">
1017
+ <label for="text-client">Text Generator:</label>
1018
+ <select id="text-client" bind:value={currentTextClient}>
1019
+ <option value="hunyuan">HunyuanTurbos</option>
1020
+ <option value="command">Command</option>
1021
+ <option value="zephyr">Zephyr-7B</option>
1022
+ <option value="qwen">Qwen3</option>
1023
+ <option value="dots">Dots-Demo</option>
1024
+ </select>
1025
+ </div>
1026
+ {/if}
1027
+ -->
1028
+
1029
+ {#if workflowState.currentStep !== 'upload'}
1030
+ <WorkflowProgress currentStep={workflowState.currentStep} error={workflowState.error} />
1031
  {/if}
1032
 
1033
+ {#if workflowState.currentStep === 'upload'}
1034
  <UploadStep
1035
  onImageSelected={handleImageSelected}
1036
  onImagesSelected={handleImagesSelected}
1037
+ isProcessing={workflowState.isProcessing}
1038
  imageQueue={imageQueue}
1039
  currentImageIndex={currentImageIndex}
1040
  />
1041
+ {:else if workflowState.currentStep === 'complete'}
1042
+ <PicletResult workflowState={workflowState} onReset={reset} />
1043
  {:else}
1044
  <div class="processing-container">
1045
  <div class="spinner"></div>
1046
  <p class="processing-text">
1047
+ {#if workflowState.currentStep === 'captioning'}
1048
  Analyzing your image...
1049
+ {:else if workflowState.currentStep === 'conceptualizing'}
1050
  Creating Piclet concept...
1051
+ {:else if workflowState.currentStep === 'statsGenerating'}
1052
  Generating battle stats...
1053
+ {:else if workflowState.currentStep === 'promptCrafting'}
1054
  Creating image prompt...
1055
+ {:else if workflowState.currentStep === 'generating'}
1056
  Generating your Piclet...
1057
  {/if}
1058
  </p>
 
1068
  padding: 2rem;
1069
  }
1070
 
1071
+ /* Client selector styles (hidden since only HunyuanTurbos is active) */
1072
+ /*
1073
+ .client-selector {
1074
+ display: flex;
1075
+ align-items: center;
1076
+ gap: 0.5rem;
1077
+ margin-bottom: 1rem;
1078
+ padding: 0.75rem;
1079
+ background: #f8f9fa;
1080
+ border-radius: 8px;
1081
+ border: 1px solid #dee2e6;
1082
+ }
1083
+
1084
+ .client-selector label {
1085
+ font-weight: 500;
1086
+ color: #495057;
1087
+ }
1088
+
1089
+ .client-selector select {
1090
+ padding: 0.25rem 0.5rem;
1091
+ border: 1px solid #ced4da;
1092
+ border-radius: 4px;
1093
+ background: white;
1094
+ color: #495057;
1095
+ font-size: 0.9rem;
1096
+ }
1097
+ */
1098
+
1099
 
1100
  .processing-container {
1101
  display: flex;
src/lib/types/index.ts CHANGED
@@ -98,9 +98,14 @@ export interface PicletWorkflowState {
98
 
99
  export interface PicletGeneratorProps {
100
  joyCaptionClient: GradioClient | null;
101
- zephyrClient: GradioClient | null;
102
  fluxClient: GradioClient | null;
103
- qwenClient: GradioClient | null;
 
 
 
 
 
 
104
  }
105
 
106
  // Piclet Stats Types - now compatible with battle engine
 
98
 
99
  export interface PicletGeneratorProps {
100
  joyCaptionClient: GradioClient | null;
 
101
  fluxClient: GradioClient | null;
102
+ hunyuanClient: GradioClient | null;
103
+
104
+ // Unused clients (kept for future use)
105
+ // zephyrClient: GradioClient | null;
106
+ // qwenClient: GradioClient | null;
107
+ // commandClient: GradioClient | null;
108
+ // dotsClient: GradioClient | null;
109
  }
110
 
111
  // Piclet Stats Types - now compatible with battle engine