Chris4K commited on
Commit
2bd9c51
·
verified ·
1 Parent(s): b2abc25

Update graph.py

Browse files
Files changed (1) hide show
  1. graph.py +356 -1082
graph.py CHANGED
@@ -1,5 +1,3 @@
1
-
2
-
3
  import logging
4
  import os
5
  import uuid
@@ -18,12 +16,15 @@ from pydantic import BaseModel, Field
18
  from trafilatura import extract
19
 
20
  from huggingface_hub import InferenceClient
 
 
21
 
22
  from langchain_core.messages import AIMessage, HumanMessage, AnyMessage, ToolCall, SystemMessage, ToolMessage
23
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
24
  from langchain_core.tools import tool
25
 
26
  from langchain_community.tools import TavilySearchResults
 
27
 
28
  from langgraph.graph.state import CompiledStateGraph
29
  from langgraph.graph import StateGraph, START, END, add_messages
@@ -35,14 +36,6 @@ from langgraph.checkpoint.memory import MemorySaver
35
 
36
  from langgraph.types import Command, interrupt
37
 
38
- from langchain_anthropic import ChatAnthropic
39
- from langchain_openai import ChatOpenAI
40
-
41
- from mistralai import Mistral
42
- from langchain.chat_models import init_chat_model
43
- from langchain_core.messages.utils import convert_to_openai_messages
44
-
45
-
46
 
47
  class State(TypedDict):
48
  messages: Annotated[list, add_messages]
@@ -70,20 +63,14 @@ except Exception as e:
70
  def evaluate_idea_completion(response) -> bool:
71
  """
72
  Evaluates whether the assistant's response indicates a complete DIY project idea.
73
- You can customize the logic based on your specific criteria.
74
  """
75
- # Example logic: Check if the response contains certain keywords
76
  required_keywords = ["materials", "dimensions", "tools", "steps"]
77
 
78
- # Determine the type of response and extract text accordingly
79
  if isinstance(response, dict):
80
- # If response is a dictionary, extract values and join them into a single string
81
  response_text = ' '.join(str(value).lower() for value in response.values())
82
  elif isinstance(response, str):
83
- # If response is a string, convert it to lowercase
84
  response_text = response.lower()
85
  else:
86
- # If response is of an unexpected type, convert it to string and lowercase
87
  response_text = str(response).lower()
88
 
89
  return all(keyword in response_text for keyword in required_keywords)
@@ -91,7 +78,7 @@ def evaluate_idea_completion(response) -> bool:
91
  @tool
92
  async def human_assistance(query: str) -> str:
93
  """Request assistance from a human."""
94
- human_response = await interrupt({"query": query}) # async wait
95
  return human_response["data"]
96
 
97
  @tool
@@ -113,10 +100,9 @@ async def finalize_idea() -> str:
113
  """Marks the brainstorming phase as complete. This function does nothing else."""
114
  return "Brainstorming finalized."
115
 
116
- tools = [download_website_text, human_assistance,finalize_idea]
117
  memory = MemorySaver()
118
 
119
-
120
  if search_enabled:
121
  tavily_search_tool = TavilySearchResults(
122
  max_results=5,
@@ -128,71 +114,198 @@ if search_enabled:
128
  else:
129
  print("TAVILY_API_KEY environment variable not found. Websearch disabled")
130
 
131
- weak_model = ChatOpenAI(
132
- model="gpt-4o",
133
- temperature=0,
134
- max_tokens=None,
135
- timeout=None,
136
- max_retries=2,
137
- # api_key="...", # if you prefer to pass api key in directly instaed of using env vars
138
- # base_url="...",
139
- # organization="...",
140
- # other params...
141
- )
142
 
143
- api_key = os.environ["MISTRAL_API_KEY"]
144
- model = "mistral-large-latest"
145
-
146
- client = Mistral(api_key=api_key)
147
-
148
-
149
- # ChatAnthropic(
150
- # model="claude-3-5-sonnet-20240620",
151
- # temperature=0,
152
- # max_tokens=1024,
153
- # timeout=None,
154
- # max_retries=2,
155
- # # other params...
156
- # )
157
- search_enabled = bool(os.environ.get("TAVILY_API_KEY"))
158
-
159
- if not os.environ.get("OPENAI_API_KEY"):
160
- print('Open API key not found')
161
-
162
- prompt_planning_model = ChatOpenAI(
163
- model="gpt-4o",
164
- temperature=0,
165
- max_tokens=None,
166
- timeout=None,
167
- max_retries=2,
168
- # api_key="...", # if you prefer to pass api key in directly instaed of using env vars
169
- # base_url="...",
170
- # organization="...",
171
- # other params...
172
  )
173
 
174
- threed_object_gen_model = ChatOpenAI(
175
- model="gpt-4o",
176
- temperature=0,
177
- max_tokens=None,
178
- timeout=None,
179
- max_retries=2,
180
- # api_key="...", # if you prefer to pass api key in directly instaed of using env vars
181
- # base_url="...",
182
- # organization="...",
183
- # other params...
184
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
- huggingfaceclient = InferenceClient(
187
- provider="hf-inference",
188
- api_key=os.environ["HF_TOKEN"],
189
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
- model = weak_model
192
- assistant_model = weak_model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
  class GraphProcessingState(BaseModel):
195
- # user_input: str = Field(default_factory=str, description="The original user input")
196
  messages: Annotated[list[AnyMessage], add_messages] = Field(default_factory=list)
197
  prompt: str = Field(default_factory=str, description="The prompt to be used for the model")
198
  tools_enabled: dict = Field(default_factory=dict, description="The tools enabled for the assistant")
@@ -210,43 +323,13 @@ class GraphProcessingState(BaseModel):
210
  product_searching_complete: bool = Field(default=False)
211
  purchasing_complete: bool = Field(default=False)
212
 
213
-
214
  generated_image_url_from_dalle: str = Field(default="", description="The generated_image_url_from_dalle.")
215
 
216
-
217
-
218
  async def guidance_node(state: GraphProcessingState, config=None):
219
-
220
- # print(f"Prompt: {state.prompt}")
221
- # print(f"Prompt: {state.prompt}")
222
- # # print(f"Message: {state.messages}")
223
- # print(f"Tools Enabled: {state.tools_enabled}")
224
- # print(f"Search Enabled: {state.search_enabled}")
225
- # for message in state.messages:
226
- # print(f'\ncomplete message', message)
227
- # if isinstance(message, HumanMessage):
228
- # print(f"Human: {message.content}\n")
229
- # elif isinstance(message, AIMessage):
230
- # # Check if content is non-empty
231
- # if message.content:
232
- # # If content is a list (e.g., list of dicts), extract text
233
- # if isinstance(message.content, list):
234
- # texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
235
- # if texts:
236
- # print(f"AI: {' '.join(texts)}\n")
237
- # elif isinstance(message.content, str):
238
- # print(f"AI: {message.content}")
239
- # elif isinstance(message, SystemMessage):
240
- # print(f"System: {message.content}\n")
241
- # elif isinstance(message, ToolMessage):
242
- # print(f"Tool: {message.content}\n")
243
- print("\n🕵️‍♀️🕵️‍♀️ | start | progress checking nodee \n") # Added a newline for clarity
244
-
245
- # print(f"Prompt: {state.prompt}\n")
246
 
247
  if state.messages:
248
  last_message = state.messages[-1]
249
-
250
  if isinstance(last_message, HumanMessage):
251
  print(f"🧑 Human: {last_message.content}\n")
252
  elif isinstance(last_message, AIMessage):
@@ -264,8 +347,6 @@ async def guidance_node(state: GraphProcessingState, config=None):
264
  else:
265
  print("\n(No messages found.)")
266
 
267
-
268
- # Log boolean completion flags
269
  # Define the order of stages
270
  stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
271
 
@@ -273,21 +354,17 @@ async def guidance_node(state: GraphProcessingState, config=None):
273
  completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
274
  incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
275
 
276
-
277
-
278
  # Determine the next stage
279
  if not incomplete:
280
- # All stages are complete
281
  return {
282
  "messages": [AIMessage(content="All DIY project stages are complete!")],
283
  "next_stage": "end_project",
284
  "pending_approval_stage": None,
285
  }
286
  else:
287
- # Set the next stage to the first incomplete stage
288
  next_stage = incomplete[0]
289
- print(f"Next Stage: {state.next_stage}")
290
- print("\n🕵️‍♀️🕵️‍♀️ | end | progress checking nodee \n") # Added a newline for clarity
291
  return {
292
  "messages": [],
293
  "next_stage": next_stage,
@@ -295,75 +372,37 @@ async def guidance_node(state: GraphProcessingState, config=None):
295
  }
296
 
297
  def guidance_routing(state: GraphProcessingState) -> str:
298
-
299
  print("\n🔀🔀 Routing checkpoint 🔀🔀\n")
300
-
301
  print(f"Next Stage: {state.next_stage}\n")
302
-
303
  print(f"Brainstorming complete: {state.brainstorming_complete}")
304
- print(f"Prompt planing: {state.planning_complete}")
305
- print(f"Drwaing 3d model: {state.drawing_complete}")
306
- print(f"Finding products: {state.product_searching_complete}\n")
307
-
308
-
309
 
310
  next_stage = state.next_stage
311
  if next_stage == "brainstorming":
312
  return "brainstorming_node"
313
-
314
  elif next_stage == "planning":
315
- # return "generate_3d_node"
316
  return "prompt_planning_node"
317
  elif next_stage == "drawing":
318
  return "generate_3d_node"
319
  elif next_stage == "product_searching":
320
- print('\n may day may day may day may day may day')
321
-
322
- print(f"Prompt: {state.prompt}")
323
- print(f"Prompt: {state.prompt}")
324
- # print(f"Message: {state.messages}")
325
- print(f"Tools Enabled: {state.tools_enabled}")
326
- print(f"Search Enabled: {state.search_enabled}")
327
- for message in state.messages:
328
- print(f'\ncomplete message', message)
329
- if isinstance(message, HumanMessage):
330
- print(f"Human: {message.content}\n")
331
- elif isinstance(message, AIMessage):
332
- # Check if content is non-empty
333
- if message.content:
334
- # If content is a list (e.g., list of dicts), extract text
335
- if isinstance(message.content, list):
336
- texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
337
- if texts:
338
- print(f"AI: {' '.join(texts)}\n")
339
- elif isinstance(message.content, str):
340
- print(f"AI: {message.content}")
341
- elif isinstance(message, SystemMessage):
342
- print(f"System: {message.content}\n")
343
- elif isinstance(message, ToolMessage):
344
- print(f"Tool: {message.content}\n")
345
- # return "drawing_node"
346
- # elif next_stage == "product_searching":
347
- # return "product_searching"
348
- # elif next_stage == "purchasing":
349
- # return "purchasing_node"
350
- return END
351
 
352
  async def brainstorming_node(state: GraphProcessingState, config=None):
353
- print("\n🧠🧠 | start | brainstorming Node \n") # Added a newline for clarity
354
-
355
 
356
- # Check if model is available
357
  if not model:
358
  return {"messages": [AIMessage(content="Model not available for brainstorming.")]}
359
 
360
- # Filter out messages with empty content
361
  filtered_messages = [
362
  message for message in state.messages
363
  if isinstance(message, (HumanMessage, AIMessage, SystemMessage, ToolMessage)) and message.content
364
  ]
365
 
366
- # Ensure there is at least one message with content
367
  if not filtered_messages:
368
  filtered_messages.append(AIMessage(content="No valid messages provided."))
369
 
@@ -373,615 +412,138 @@ async def brainstorming_node(state: GraphProcessingState, config=None):
373
 
374
  if not incomplete:
375
  print("All stages complete!")
376
- # Handle case where all stages are complete
377
- # You might want to return a message and end, or set proposed_next_stage to a special value
378
  ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
379
  return {
380
- "messages": current_messages + [ai_all_complete_msg],
381
- "next_stage": "end_project", # Or None, or a final summary node
382
  "pending_approval_stage": None,
383
  }
384
- else:
385
- # THIS LINE DEFINES THE VARIABLE
386
- proposed_next_stage = incomplete[0]
387
-
388
- guidance_prompt_text = (
389
- """
390
- You are a warm, encouraging, and knowledgeable AI assistant, acting as a **Creative DIY Collaborator**. Your primary goal is to guide the user through a friendly and inspiring conversation to finalize **ONE specific, viable DIY project idea**. While we want to be efficient, the top priority is making the user feel heard, understood, and confident in their final choice.
391
-
392
- ⚠️ Your core directive remains speed and convergence: If you identify an idea that clearly meets ALL **Critical Criteria** and the user seems positive or neutral, you must suggest finalizing it **immediately**. Do NOT delay by offering too many alternatives once a solid candidate emerges. Your goal is to converge on a "good enough" idea the user is happy with, not to explore every possibility.
393
-
394
- **Your Conversational Style & Strategy:**
395
- 1. **Be an Active Listener:** Start by acknowledging and validating the user's input. Show you understand their core desire (e.g., "That sounds like a fun goal! Creating a custom piece for your living room is always rewarding.").
396
- 2. **Ask Inspiring, Open-Ended Questions:** Instead of generic questions, make them feel personal and insightful.
397
- * *Instead of:* "What do you want to build?"
398
- * *Try:* "What part of your home are you dreaming of improving?" or "Are you thinking of a gift for someone special, or a project just for you?"
399
- 3. **Act as a Knowledgeable Guide:** When a user is unsure, proactively suggest appealing ideas based on their subtle clues. Connect their interests to tangible projects.
400
- * *Example:* If the user mentions liking plants and having a small balcony, you could suggest: "That's great! We could think about a vertical herb garden to save space, or maybe some simple, stylish hanging macrame planters. Does either of those spark your interest?"
401
- 4. **Guide, Don't Just Gatekeep:** When an idea *almost* meets the criteria, don't just reject it. Gently guide it towards feasibility.
402
- * *Example:* "A full-sized dining table might require some specialized tools. How about we adapt that idea into a beautiful, buildable coffee table or a set of side tables using similar techniques?"
403
-
404
- **Critical Criteria for the Final DIY Project Idea (Your non-negotiable checklist):**
405
- 1. **Buildable:** Achievable by an average person with basic DIY skills.
406
- 2. **Common Materials/Tools:** Uses only materials (e.g., wood, screws, glue, paint, fabric, cardboard) and basic hand tools (e.g., screwdrivers, hammers, saws, drills) commonly available in general hardware stores, craft stores, or supermarkets worldwide.
407
- 3. **Avoid Specializations:** Explicitly AVOID projects requiring specialized electronic components, 3D printing, specific brand items not universally available, or complex machinery.
408
- 4. **Tangible Product:** The final result must be a physical, tangible item.
409
-
410
- **Your Internal Process (How you think on each turn):**
411
-
412
- 1. **THOUGHT:**
413
- * Clearly state your understanding of the user’s current input and conversational state.
414
- * Outline your plan: Engage with their latest input using your **Conversational Style**. Propose or refine an idea to meet the **Critical Criteria**.
415
- * **Tool Identification (`human_assistance`):** Decide if you need to ask a question. The question should be formulated according to the "Inspiring, Open-Ended Questions" principle. Clearly state your intention to use the `human_assistance` tool with the exact friendly and natural-sounding question as the `query`.
416
- * **Idea Finalization Check:** Check if the current idea satisfies ALL **Critical Criteria**. If yes, and the user shows no objection, move to finalize immediately. Remember: **good enough is final enough**.
417
-
418
- 2. **TOOL USE (`human_assistance` - If Needed):**
419
- * Invoke `human_assistance` with your well-formulated, friendly query.
420
-
421
- 3. **RESPONSE SYNTHESIS / IDEA FINALIZATION:**
422
- * **If an idea is finalized:** Respond *only* with the exact phrase:
423
- `IDEA FINALIZED: [Name of the Idea]`
424
- (e.g., `IDEA FINALIZED: Simple Wooden Spice Rack`)
425
- * **If brainstorming continues:**
426
- * Provide your engaging suggestions or refinements based on your **Conversational Style**.
427
- * Await the user response.
428
-
429
- **General Guidelines (Your core principles):**
430
- * **Empathy Over Pure Efficiency:** A positive, collaborative experience is the primary goal. Don't rush the user if they are still exploring.
431
- * **Criteria Focused:** Always gently guide ideas toward the **Critical Criteria**.
432
- * **One Main Idea at a Time:** Focus the conversation on a single project idea to avoid confusion.
433
- * **Rapid Convergence:** Despite the friendly tone, always be looking for the fastest path to a final, viable idea.
434
- """
435
- )
436
-
437
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438
 
439
  if state.prompt:
440
- final_prompt = "\n".join([ guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
441
  else:
442
- final_prompt = "\n".join([ guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
443
-
444
- prompt = ChatPromptTemplate.from_messages(
445
- [
446
- ("system", final_prompt),
447
- MessagesPlaceholder(variable_name="messages"),
448
- ]
449
- )
450
-
451
- # Tools allowed for brainstorming
452
- node_tools = [human_assistance]
453
- if state.search_enabled and tavily_search_tool: # only add search tool if enabled and initialized
454
- node_tools.append(tavily_search_tool)
455
-
456
-
457
-
458
-
459
- mistraltools = [
460
- {
461
- "type": "function",
462
- "function": {
463
- "name": "human_assistance",
464
- "description": "Ask a question from the user",
465
- "parameters": {
466
- "type": "object",
467
- "properties": {
468
- "query": {
469
- "type": "string",
470
- "query": "The transaction id.",
471
- }
472
- },
473
- "required": ["query"],
474
- },
475
- },
476
- },
477
- {
478
- "type": "function",
479
- "function": {
480
- "name": "finalize_idea",
481
- "description": "Handles finalized ideas. Saves or dispatches the confirmed idea for the next steps. but make sure you give your response with key word IDEA FINALIZED",
482
- "parameters": {
483
- "type": "object",
484
- "properties": {
485
- "idea_name": {
486
- "type": "string",
487
- "description": "The name of the finalized DIY idea.",
488
- }
489
- },
490
- "required": ["idea_name"]
491
- }
492
- }
493
- }
494
- ]
495
- llm = init_chat_model("mistral-large-latest", model_provider="mistralai")
496
-
497
- llm_with_tools = llm.bind_tools(mistraltools)
498
- chain = prompt | llm_with_tools
499
-
500
- openai_messages = convert_to_openai_messages(state.messages)
501
-
502
- openai_messages_with_prompt = [
503
- {"role": "system", "content": final_prompt}, # your guidance prompt
504
- *openai_messages # history you’ve already converted
505
- ]
506
-
507
- print('open ai formatted', openai_messages_with_prompt[-1])
508
-
509
- for msg in openai_messages_with_prompt:
510
- print(msg)
511
-
512
- mistralmodel = "mistral-saba-2502"
513
 
514
- # Pass filtered messages to the chain
515
  try:
516
-
517
- # response = await chain.ainvoke({"messages": filtered_messages}, config=config)
518
- response = client.chat.complete(
519
- model = mistralmodel,
520
- messages = openai_messages_with_prompt,
521
- tools = mistraltools,
522
- tool_choice = "any",
523
- parallel_tool_calls = False,
524
- )
525
-
526
- mistral_message = response.choices[0].message
527
- tool_call = response.choices[0].message.tool_calls[0]
528
- function_name = tool_call.function.name
529
- function_params = json.loads(tool_call.function.arguments)
530
-
531
- ai_message = AIMessage(
532
- content=mistral_message.content or "", # Use empty string if blank
533
- additional_kwargs={
534
- "tool_calls": [
535
- {
536
- "id": tool_call.id,
537
- "function": {
538
- "name": tool_call.function.name,
539
- "arguments": tool_call.function.arguments,
540
- },
541
- "type": "function", # Add this if your chain expects it
542
- }
543
- ]
544
- }
545
- )
546
-
547
  updates = {
548
  "messages": [ai_message],
549
- "tool_calls": [
550
- {
551
- "name": function_name,
552
- "arguments": function_params,
553
- }
554
- ],
555
- "next": function_name,
556
  }
557
 
558
- print("\nfunction_name: ", function_name, "\nfunction_params: ", function_params)
559
- print('\n🔍 response from brainstorm\n', updates)
560
-
561
- if function_name == "finalize_idea":
562
- print('finalazing idea')
563
- state.brainstorming_complete = True
564
- updates["brainstorming_complete"] = True
565
-
566
-
567
- if isinstance(response, AIMessage) and response.content:
568
- print(' Identified last AI message', response)
569
- if isinstance(response.content, str):
570
- content = response.content.strip()
571
- elif isinstance(response.content, list):
572
- texts = [item.get("text", "") for item in response.content if isinstance(item, dict)]
573
- content = " ".join(texts).strip()
574
- else:
575
- content = str(response.content).strip()
576
-
577
- print('content for idea finalizing:', content)
578
- if "finalize_idea:" in content: # Use 'in' instead of 'startswith'
579
- print('✅ final idea')
580
- updates.update({
581
- "brainstorming_complete": True,
582
- "tool_call_required": False,
583
- "loop_brainstorming": False,
584
- })
585
- return updates
586
-
587
- else:
588
- # tool_calls = getattr(response, "tool_calls", None)
589
-
590
-
591
- if tool_call:
592
- print('🛠️ tool call requested at brainstorming node')
593
- updates.update({
594
- "tool_call_required": True,
595
- "loop_brainstorming": False,
596
- })
597
-
598
- if tool_call:
599
- tool_call = response.choices[0].message.tool_calls[0]
600
- function_name = tool_call.function.name
601
- function_params = json.loads(tool_call.function.arguments)
602
- print("\nfunction_name: ", function_name, "\nfunction_params: ", function_params)
603
- # for tool_call in response.tool_calls:
604
- # tool_name = tool_call['name']
605
- # if tool_name == "human_assistance":
606
- # query = tool_call['args']['query']
607
- # print(f"Human input needed: {query}")
608
-
609
- # for tool_call in tool_calls:
610
- # if isinstance(tool_call, dict) and 'name' in tool_call and 'args' in tool_call:
611
- # print(f"🔧 Tool Call (Dict): {tool_call.get('name')}, Args: {tool_call.get('args')}")
612
- # else:
613
- # print(f"🔧 Unknown tool_call format: {tool_call}")
614
- else:
615
- print('💬 decided tp keep brainstorming')
616
- updates.update({
617
- "tool_call_required": False,
618
- "loop_brainstorming": True,
619
- })
620
- print(f"Brainstorming continues: {content}")
621
-
622
  else:
623
- # If no proper response, keep looping brainstorming
624
- updates["tool_call_required"] = False
625
- updates["loop_brainstorming"] = True
 
 
626
 
627
  print("\n🧠🧠 | end | brainstorming Node \n")
628
  return updates
 
629
  except Exception as e:
630
  print(f"Error: {e}")
631
  return {
632
- "messages": [AIMessage(content="Error.")],
633
  "next_stage": "brainstorming"
634
  }
635
 
636
-
637
  async def prompt_planning_node(state: GraphProcessingState, config=None):
638
- print("\n🚩🚩 | start | prompt planing Node \n")
639
- # Ensure we have a model
640
  if not model:
641
  return {"messages": [AIMessage(content="Model not available for planning.")]}
642
 
643
-
644
  filtered_messages = state.messages
645
-
646
- # Filter out empty messages
647
- # filtered_messages = [
648
- # msg for msg in state.messages
649
- # if isinstance(msg, (HumanMessage, AIMessage, SystemMessage, ToolMessage)) and msg.content
650
- # ]
651
- # filtered_messages = []
652
-
653
- # for msg in state.messages:
654
- # if isinstance(msg, ToolMessage):
655
- # # 🛠️ ToolMessage needs to be paired with a prior assistant message that called the tool
656
- # tool_name = msg.name or "unknown_tool"
657
- # tool_call_id = msg.tool_call_id or "tool_call_id_missing"
658
-
659
- # # Simulated assistant message that initiated the tool call
660
- # fake_assistant_msg = AIMessage(
661
- # content="",
662
- # additional_kwargs={
663
- # "tool_calls": [
664
- # {
665
- # "id": tool_call_id,
666
- # "type": "function",
667
- # "function": {
668
- # "name": tool_name,
669
- # "arguments": json.dumps({"content": msg.content or ""}),
670
- # }
671
- # }
672
- # ]
673
- # }
674
- # )
675
-
676
- # # Append both in correct sequence
677
- # filtered_messages.append(fake_assistant_msg)
678
- # filtered_messages.append(msg)
679
-
680
- # elif isinstance(msg, (HumanMessage, AIMessage, SystemMessage)) and msg.content:
681
- # filtered_messages.append(msg)
682
-
683
- # Fallback if list ends up empty
684
  if not filtered_messages:
685
  filtered_messages.append(AIMessage(content="No valid messages provided."))
686
-
687
 
688
- # Define the system prompt for planning
689
  guidance_prompt_text = """
690
- You are a creative and helpful AI assistant acting as a **DIY Project Brainstorming & 3D-Prompt Generator**. Your mission is to collaborate with the user to:
691
 
692
  1. Brainstorm and refine one specific, viable DIY project idea.
693
  2. Identify the single key component from that idea that should be 3D-modeled.
694
- 3. Produce a final, precise text prompt for an OpenAI 3D-generation endpoint.
695
 
696
- ---
697
- **Critical Criteria for the DIY Project** (must be met):
698
- Buildable by an average person with only basic DIY skills.
699
- Uses common materials/tools (e.g., wood, screws, glue, paint; hammer, saw, drill).
700
- • No specialized electronics, 3D printers, or proprietary parts.
701
  • Results in a tangible, physical item.
702
 
703
- ---
704
- **Available Tools**
705
- • human_assistance – ask the user clarifying questions.
706
- • (optional) your project-specific search tool – look up inspiration or standard dimensions if needed.
707
-
708
- ---
709
- **When the DIY idea is fully detailed and meets all criteria, output exactly and only:**
710
-
711
  ACCURATE PROMPT FOR MODEL GENERATING: [Your final single-paragraph prompt here]
712
  """
713
 
714
- # Build final prompt
715
  if state.prompt:
716
  final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
717
  else:
718
  final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
719
 
720
- prompt = ChatPromptTemplate.from_messages([
721
- ("system", final_prompt),
722
- MessagesPlaceholder(variable_name="messages"),
723
- ])
724
-
725
- # Bind tools
726
- node_tools = [human_assistance]
727
- if state.search_enabled and tavily_search_tool:
728
- node_tools.append(tavily_search_tool)
729
-
730
- llm_with_tools = prompt_planning_model.bind_tools(node_tools)
731
- chain = prompt | llm_with_tools
732
-
733
- # print(' 👾👾👾👾Debugging the request going in to prompt planing model')
734
- # print("Prompt: ", prompt)
735
- # print("chain: ", chain)
736
-
737
- for msg in filtered_messages:
738
- print('✨msg : ',msg)
739
- print('\n')
740
-
741
  try:
742
- response = await chain.ainvoke({"messages": filtered_messages}, config=config)
743
-
744
- print('\nresponse ->: ', response)
745
-
746
- # Log any required human assistance query
747
- if hasattr(response, "tool_calls"):
748
- for call in response.tool_calls:
749
- if call.get("name") == "human_assistance":
750
- print(f"Human input needed: {call['args']['query']}")
751
-
752
 
753
-
 
754
  updates = {"messages": [response]}
755
 
756
- # Extract response text
757
- content = ""
758
- if isinstance(response.content, str):
759
- content = response.content.strip()
760
- elif isinstance(response.content, list):
761
- content = " ".join(item.get("text","") for item in response.content if isinstance(item, dict)).strip()
762
-
763
- # Check for finalization signalif "finalize_idea:" in content:
764
- if "ACCURATE PROMPT FOR MODEL GENERATING" in content:
765
- dalle_prompt_text = content.replace("ACCURATE PROMPT FOR MODEL GENERATING:", "").strip()
766
- print(f"\n🤖🤖🤖🤖Extracted DALL-E prompt: {dalle_prompt_text}")
767
-
768
- generated_image_url = None
769
- generated_3d_model_url = None # This will store the final 3D model URL
770
-
771
- # --- START: New code for DALL-E and Trellis API calls ---
772
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
773
- if not OPENAI_API_KEY:
774
- print("Error: OPENAI_API_KEY environment variable not set.")
775
- updates["messages"].append(AIMessage(content="OpenAI API key not configured. Cannot generate image."))
776
- else:
777
-
778
- # try:
779
- # --- Your existing client setup ---
780
-
781
- # prompt = dalle_prompt_text
782
- # model_id = "black-forest-labs/FLUX.1-dev" # or any other model
783
-
784
- # print(f"Generating image for prompt: '{prompt}' with model '{model_id}'...")
785
- # # output is a PIL.Image object
786
- # image = huggingfaceclient.text_to_image(
787
- # prompt,
788
- # model=model_id,
789
- # )
790
- # print("Image generated successfully.")
791
-
792
- # # --- Code to save the image ---
793
-
794
- # # 1. Define the directory name
795
- # output_directory = "files"
796
-
797
- # os.makedirs(output_directory, exist_ok=True)
798
- # print(f"Ensured directory '{output_directory}' exists.")
799
-
800
- # image_filename = "astronaut_horse.png"
801
-
802
- # full_save_path = os.path.join(output_directory, image_filename)
803
-
804
- # # 5. Save the PIL.Image object
805
- # # The image object (if it's a PIL.Image) has a .save() method
806
- # image.save(full_save_path)
807
-
808
- # print(f"Image saved successfully to: {full_save_path}")
809
-
810
- # if image:
811
- # print("\nAttempting to upload generated image to Supabase...")
812
-
813
- # # Define the filename for Supabase (can include a path prefix)
814
- # supabase_target_filename = f"hf_generated_{uuid}" # Example: put in a 'hf_generated' folder
815
-
816
- # # 1. Save the PIL image to a temporary in-memory buffer
817
- # img_byte_arr = io.BytesIO()
818
- # image.save(img_byte_arr, format='JPEG') # Match Dart's 'image/jpeg'
819
- # img_byte_arr.seek(0) # Reset buffer's position to the beginning
820
-
821
- # # Prepare the file for the multipart/form-data request
822
- # # The field name 'file' and 'filename' should match what your Edge Function expects.
823
- # files_payload = {
824
- # 'file': (supabase_target_filename, img_byte_arr, 'image/jpeg')
825
- # }
826
-
827
- # # Headers (Content-Type for multipart/form-data is set automatically by requests
828
- # # when using the `files` parameter, but you can set other headers if your edge function needs them)
829
- # upload_headers = {
830
- # # 'Authorization': 'Bearer YOUR_SUPABASE_ANON_KEY_OR_SERVICE_KEY_IF_EDGE_FUNCTION_NEEDS_IT'
831
- # }
832
-
833
- # print(f"Uploading image to Supabase Edge Function: as {supabase_target_filename}...")
834
- # supabase_public_url = None
835
- # try:
836
- # upload_response = requests.post(
837
- # 'https://yqewezudxihyadvmfovd.supabase.co/functions/v1/storage-upload',
838
- # files=files_payload,
839
- # headers=upload_headers
840
- # )
841
- # upload_response.raise_for_status() # Raise an HTTPError for bad responses (4XX or 5XX)
842
-
843
- # # 2. Parse the response from the Edge Function
844
- # # The Dart code expects: imgresponse.data['data']['path']
845
- # response_json = upload_response.json()
846
- # if 'data' in response_json and 'path' in response_json['data']:
847
- # raw_path = response_json['data']['path']
848
- # print(f"Edge function returned raw path: {raw_path}")
849
-
850
- # # 3. Construct the public URL
851
- # # The public URL format for Supabase Storage is:
852
- # # SUPABASE_URL/storage/v1/object/public/BUCKET_NAME/FILE_PATH
853
- # # The FILE_PATH needs to be URL encoded.
854
- # encoded_path = quote(raw_path)
855
- # # generated_image_url = f"{encoded_path}"'https://yqewezudxihyadvmfovd.supabase.co/storage/v1/object/public/product_images/$encodedPath';
856
- # generated_image_url =f"https://yqewezudxihyadvmfovd.supabase.co/storage/v1/object/public/product_images/{encoded_path}"
857
-
858
- # print(f"\nSuccessfully uploaded to Supabase!")
859
- # print(f"Public URL: {generated_image_url}")
860
- # else:
861
- # print(f"Error: Unexpected response format from Edge Function: {response_json}")
862
- # print("\nFailed to upload image to Supabase.")
863
-
864
- # except requests.exceptions.RequestException as e_upload:
865
- # print(f"Error uploading to Supabase: {e_upload}")
866
- # if hasattr(e_upload, 'response') and e_upload.response is not None:
867
- # print(f"Supabase Response status: {e_upload.response.status_code}")
868
- # print(f"Supabase Response text: {e_upload.response.text}")
869
- # print("\nFailed to upload image to Supabase.")
870
- # except Exception as e_upload_generic:
871
- # print(f"An unexpected error occurred during Supabase upload: {e_upload_generic}")
872
- # print("\nFailed to upload image to Supabase.")
873
- # else:
874
- # print("No image was generated, skipping Supabase upload.")
875
-
876
- # except KeyError:
877
- # print("Error: The HF_TOKEN environment variable is not set.")
878
- # print("Please set it before running the script. For example:")
879
- # print(" export HF_TOKEN='your_hugging_face_api_token'")
880
- # except ImportError:
881
- # print("Error: The Pillow (PIL) library might not be installed correctly.")
882
- # print("If 'image' is a PIL.Image object, Pillow is required to save it.")
883
- # print("You might need to install it: pip install Pillow huggingface_hub")
884
- # except Exception as e:
885
- # print(f"An error occurred: {e}")
886
- # print("Make sure your API token is valid, has the necessary permissions,")
887
- # print(f"and the model '{model_id}' is accessible and compatible.")
888
- # 1. Call DALL-E API
889
- dalle_api_url = "https://api.openai.com/v1/images/generations"
890
- dalle_headers = {
891
- "Content-Type": "application/json",
892
- "Authorization": f"Bearer {OPENAI_API_KEY}"
893
- }
894
-
895
- _model_to_use_for_dalle_call = "dall-e-2" # <<< IMPORTANT: Set this to "dall-e-2" or "dall-e-3"
896
 
897
-
898
- _processed_prompt_text = dalle_prompt_text # Start with the original prompt
899
- _prompt_was_trimmed_or_issue_found = False
900
- _warning_or_error_message_for_updates = None
901
-
902
- max_prompt_lengths = {
903
- "dall-e-2": 1000,
904
- "dall-e-3": 4000,
905
- "gpt-image-1": 32000 # Included for completeness, though payload is for DALL-E
906
- }
907
-
908
- if not _processed_prompt_text: # Check for empty prompt
909
- _message = f"Error: The DALL-E prompt for model '{_model_to_use_for_dalle_call}' cannot be empty. API call will likely fail."
910
- print(f"\n🛑🛑🛑🛑 {_message}")
911
- _warning_or_error_message_for_updates = _message
912
- _prompt_was_trimmed_or_issue_found = True
913
- # NOTE: OpenAI API will return an error for an empty prompt.
914
- # If you want to prevent the call entirely here, you could add:
915
- # updates["messages"].append(AIMessage(content=_message))
916
- # return # or raise an exception
917
-
918
- elif _model_to_use_for_dalle_call in max_prompt_lengths:
919
- _max_len = max_prompt_lengths[_model_to_use_for_dalle_call]
920
- _original_len = len(_processed_prompt_text)
921
-
922
- if _original_len > _max_len:
923
- _processed_prompt_text = _processed_prompt_text[:_max_len]
924
- _message = (
925
- f"Warning: Prompt for DALL-E ({_model_to_use_for_dalle_call}) was {_original_len} characters. "
926
- f"It has been TRUNCATED to the maximum of {_max_len} characters."
927
- )
928
- print(f"\n⚠️⚠️⚠️⚠️ {_message}")
929
- _warning_or_error_message_for_updates = _message
930
- _prompt_was_trimmed_or_issue_found = True
931
- else:
932
- # Model specified in _model_to_use_for_dalle_call is not in our length check dictionary
933
- _message = (
934
- f"Notice: Model '{_model_to_use_for_dalle_call}' not found in pre-defined prompt length limits. "
935
- "Proceeding with the original prompt. API may reject if prompt is too long for this model."
936
- )
937
- print(f"\nℹ️ℹ️ℹ️ℹ️ {_message}")
938
- # You might not want to add this specific notice to 'updates["messages"]' unless it's critical
939
- # _warning_or_error_message_for_updates = _message
940
- # _prompt_was_trimmed_or_issue_found = True # Or not, depending on how you view this
941
-
942
- # Add warning/error to updates if one was generated
943
- if _warning_or_error_message_for_updates:
944
- # Check if 'updates' and 'AIMessage' are available in the current scope to avoid errors
945
- if 'updates' in locals() and isinstance(updates, dict) and 'messages' in updates and 'AIMessage' in globals():
946
- updates["messages"].append(AIMessage(content=_warning_or_error_message_for_updates))
947
- elif 'updates' in globals() and isinstance(updates, dict) and 'messages' in updates: # If AIMessage isn't defined, just append string
948
- updates["messages"].append(_warning_or_error_message_for_updates)
949
-
950
-
951
- # --- Prompt Trimming Logic END ---
952
-
953
- dalle_payload = {
954
- "model": _model_to_use_for_dalle_call, # Use the model determined above
955
- "prompt": _processed_prompt_text, # Use the processed (potentially trimmed) prompt
956
- "n": 1,
957
- "size": "1024x1024"
958
- # You can add other DALL-E 3 specific params if _model_to_use_for_dalle_call is "dall-e-3"
959
- # e.g., "quality": "hd", "style": "vivid"
960
- }
961
-
962
- print(f"\n🤖🤖🤖🤖Calling DALL-E with prompt: {dalle_prompt_text}")
963
- async with aiohttp.ClientSession() as session:
964
- try:
965
- async with session.post(dalle_api_url, headers=dalle_headers, json=dalle_payload) as dalle_response:
966
- dalle_response.raise_for_status() # Raise an exception for HTTP errors
967
- dalle_data = await dalle_response.json()
968
- if dalle_data.get("data") and len(dalle_data["data"]) > 0:
969
- generated_image_url = dalle_data["data"][0].get("url")
970
- print(f"DALL-E generated image URL: {generated_image_url}")
971
- updates["messages"].append(AIMessage(content=f"Image generated by DALL-E: {generated_image_url}"))
972
- else:
973
- print("Error: DALL-E API did not return image data.")
974
- updates["messages"].append(AIMessage(content="Failed to get image from DALL-E."))
975
- except aiohttp.ClientError as e:
976
- print(f"DALL-E API call error: {e}")
977
- updates["messages"].append(AIMessage(content=f"Error calling DALL-E: {e}"))
978
- except json.JSONDecodeError as e:
979
- print(f"DALL-E API JSON decode error: {e}. Response: {await dalle_response.text()}")
980
- updates["messages"].append(AIMessage(content=f"Error decoding DALL-E response: {e}"))
981
- except Exception as e:
982
- print(f"Unexpected error during DALL-E processing: {e}")
983
- updates["messages"].append(AIMessage(content=f"Unexpected error with DALL-E: {e}"))
984
 
 
 
 
 
 
 
985
  updates.update({
986
  "generated_image_url_from_dalle": generated_image_url,
987
  "planning_complete": True,
@@ -990,7 +552,8 @@ ACCURATE PROMPT FOR MODEL GENERATING: [Your final single-paragraph prompt here]
990
  })
991
  else:
992
  # Check if a tool call was requested
993
- if getattr(response, "tool_calls", None):
 
994
  updates.update({
995
  "tool_call_required": True,
996
  "loop_planning": False,
@@ -1001,7 +564,7 @@ ACCURATE PROMPT FOR MODEL GENERATING: [Your final single-paragraph prompt here]
1001
  "loop_planning": True,
1002
  })
1003
 
1004
- print("\n🚩🚩 | end | prompt planing Node \n")
1005
  return updates
1006
 
1007
  except Exception as e:
@@ -1013,154 +576,76 @@ ACCURATE PROMPT FOR MODEL GENERATING: [Your final single-paragraph prompt here]
1013
 
1014
  async def generate_3d_node(state: GraphProcessingState, config=None):
1015
  print("\n🚀🚀🚀 | start | Generate 3D Node 🚀🚀🚀\n")
1016
- # 1. Get the image URL
1017
- # For now, using a hardcoded URL as requested for testing.
1018
- # In a real scenario, you might get this from the state:
1019
- # image_url = state.get("image_url_for_3d")
1020
- # if not image_url:
1021
- # print("No image_url_for_3d found in state.")
1022
- # return {"messages": [AIMessage(content="No image URL found for 3D generation.")]}
1023
-
1024
  hardcoded_image_url = state.generated_image_url_from_dalle
1025
- print(f"Using hardcoded image_url: {hardcoded_image_url}")
1026
-
1027
- # 2. Define API endpoint and parameters
1028
- api_base_url = "https://wishwa-code--trellis-3d-model-generate-dev.modal.run/"
1029
- params = {
1030
- "image_url": hardcoded_image_url,
1031
- "simplify": "0.95",
1032
- "texture_size": "1024",
1033
- "sparse_sampling_steps": "12",
1034
- "sparse_sampling_cfg": "7.5",
1035
- "slat_sampling_steps": "12",
1036
- "slat_sampling_cfg": "3",
1037
- "seed": "42",
1038
- "output_format": "glb"
1039
- }
1040
-
1041
- # Create a directory to store generated models if it doesn't exist
1042
- output_dir = "generated_3d_models"
1043
- os.makedirs(output_dir, exist_ok=True)
1044
-
1045
- # 3. Attempt generation with retries
1046
- for attempt in range(1, 2):
1047
- print(f"Attempt {attempt} to call 3D generation API...")
1048
- try:
1049
- # Note: The API call can take a long time (1.5 mins in your curl example)
1050
- # Ensure your HTTP client timeout is sufficient.
1051
- # httpx default timeout is 5 seconds, which is too short.
1052
- async with httpx.AsyncClient(timeout=120.0) as client: # Timeout set to 120 seconds
1053
- response = await client.get(api_base_url, params=params)
1054
- response.raise_for_status() # Raises an HTTPStatusError for 4XX/5XX responses
1055
-
1056
- # Successfully got a response
1057
- if response.status_code == 200:
1058
- # Assuming the response body is the .glb file content
1059
- file_name = f"model_{uuid.uuid4()}.glb"
1060
- file_path = os.path.join(output_dir, file_name)
1061
-
1062
- with open(file_path, "wb") as f:
1063
- f.write(response.content)
1064
-
1065
- print(f"Success: 3D model saved to {file_path}")
1066
- return {
1067
- "messages": [AIMessage(content=f"3D object generation successful: {file_path}")],
1068
- "generate_3d_complete": True,
1069
- "three_d_model_path": file_path,
1070
- "next_stage": state.get("next_stage") or 'end' # Use .get for safer access
1071
- }
1072
- else:
1073
- # This case might not be reached if raise_for_status() is used effectively,
1074
- # but good for explicit handling.
1075
- error_message = f"API returned status {response.status_code}: {response.text}"
1076
- print(error_message)
1077
- if attempt == 3: # Last attempt
1078
- return {"messages": [AIMessage(content=f"Failed to generate 3D object. Last error: {error_message}")]}
1079
-
1080
- except httpx.HTTPStatusError as e:
1081
- error_message = f"HTTP error occurred: {e.response.status_code} - {e.response.text}"
1082
- print(error_message)
1083
- if attempt == 3:
1084
- return {"messages": [AIMessage(content=f"Failed to generate 3D object after 3 attempts. Last HTTP error: {error_message}")]}
1085
- except httpx.RequestError as e: # Catches network errors, timeout errors etc.
1086
- error_message = f"Request error occurred: {str(e)}"
1087
- print(error_message)
1088
- if attempt == 3:
1089
- return {"messages": [AIMessage(content=f"Failed to generate 3D object after 3 attempts. Last request error: {error_message}")]}
1090
- except Exception as e:
1091
- error_message = f"An unexpected error occurred: {str(e)}"
1092
- print(error_message)
1093
- if attempt == 3:
1094
- return {"messages": [AIMessage(content=f"Failed to generate 3D object after 3 attempts. Last unexpected error: {error_message}")]}
1095
-
1096
- if attempt < 2:
1097
- print("Retrying...")
1098
- else:
1099
- print("Max retries reached.")
1100
-
1101
 
1102
- # Failed after retries (this path should ideally be covered by returns in the loop)
1103
- return {"messages": [AIMessage(content="Failed to generate a valid 3D object after 3 attempts.")]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1104
 
1105
  def define_workflow() -> CompiledStateGraph:
1106
  """Defines the workflow graph"""
1107
- # Initialize the graph
1108
  workflow = StateGraph(GraphProcessingState)
1109
 
1110
  # Add nodes
1111
  workflow.add_node("tools", DebugToolNode(tools))
1112
-
1113
  workflow.add_node("guidance_node", guidance_node)
1114
  workflow.add_node("brainstorming_node", brainstorming_node)
1115
  workflow.add_node("prompt_planning_node", prompt_planning_node)
1116
  workflow.add_node("generate_3d_node", generate_3d_node)
1117
 
1118
- # workflow.add_node("planning_node", planning_node)
1119
-
1120
  # Edges
1121
-
1122
  workflow.add_conditional_edges(
1123
  "guidance_node",
1124
  guidance_routing,
1125
- {
1126
- "brainstorming_node" : "brainstorming_node",
1127
- "prompt_planning_node" : "prompt_planning_node",
1128
- "generate_3d_node" : "generate_3d_node"
1129
- }
1130
- )
1131
-
1132
- workflow.add_conditional_edges(
1133
- "brainstorming_node",
1134
- tools_condition,
1135
  )
1136
 
1137
- workflow.add_conditional_edges(
1138
- "prompt_planning_node",
1139
- tools_condition,
1140
- )
1141
  workflow.add_edge("tools", "guidance_node")
1142
  workflow.add_edge("brainstorming_node", "guidance_node")
1143
  workflow.add_edge("prompt_planning_node", "guidance_node")
1144
  workflow.add_edge("generate_3d_node", "guidance_node")
1145
 
1146
-
1147
- # workflow.add_conditional_edges(
1148
- # "guidance_node", # The source node
1149
- # custom_route_after_guidance, # Your custom condition function
1150
- # {
1151
- # # "Path name": "Destination node name"
1152
- # "execute_tools": "tools", # If function returns "execute_tools"
1153
- # "proceed_to_next_stage": "planning_node" # If function returns "proceed_to_next_stage"
1154
- # # Or this could be another router, or END
1155
- # }
1156
- # )
1157
- # workflow.add_conditional_edges("guidance_node", guidance_routing)
1158
- # workflow.add_conditional_edges("brainstorming_node", brainstorming_routing)
1159
-
1160
- # # Set end nodes
1161
  workflow.set_entry_point("guidance_node")
1162
- # workflow.set_finish_point("assistant_node")
1163
  compiled_graph = workflow.compile(checkpointer=memory)
 
1164
  try:
1165
  img_bytes = compiled_graph.get_graph().draw_mermaid_png()
1166
  with open("graph.png", "wb") as f:
@@ -1170,250 +655,39 @@ def define_workflow() -> CompiledStateGraph:
1170
  print("Can't print the graph:")
1171
  print(e)
1172
 
1173
-
1174
  return compiled_graph
1175
 
1176
  graph = define_workflow()
1177
 
1178
-
1179
-
1180
-
1181
-
1182
-
1183
-
1184
-
1185
-
1186
-
1187
-
1188
-
1189
-
1190
-
1191
-
1192
-
1193
- # async def assistant_node(state: GraphProcessingState, config=None):
1194
- # print("\n--- Assistance Node (Debug via print) ---") # Added a newline for clarity
1195
-
1196
-
1197
- # print(f"Prompt: {state.prompt}")
1198
-
1199
- # print(f"Tools Enabled: {state.tools_enabled}")
1200
- # print(f"Search Enabled: {state.search_enabled}")
1201
- # print(f"Next Stage: {state.next_stage}")
1202
-
1203
-
1204
- # # Log boolean completion flags
1205
- # print(f"Idea Complete: {state.idea_complete}")
1206
- # print(f"Brainstorming Complete: {state.brainstorming_complete}")
1207
- # print(f"Planning Complete: {state.planning_complete}")
1208
- # print(f"Drawing Complete: {state.drawing_complete}")
1209
- # print(f"Product Searching Complete: {state.product_searching_complete}")
1210
- # print(f"Purchasing Complete: {state.purchasing_complete}")
1211
- # print("--- End Guidance Node Debug ---") # Added for clarity
1212
- # print(f"\nMessage: {state.messages}")
1213
- # assistant_tools = []
1214
- # if state.tools_enabled.get("download_website_text", True):
1215
- # assistant_tools.append(download_website_text)
1216
- # if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
1217
- # assistant_tools.append(tavily_search_tool)
1218
- # assistant_model = model.bind_tools(assistant_tools)
1219
- # if state.prompt:
1220
- # final_prompt = "\n".join([state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
1221
- # else:
1222
- # final_prompt = ASSISTANT_SYSTEM_PROMPT_BASE
1223
-
1224
- # prompt = ChatPromptTemplate.from_messages(
1225
- # [
1226
- # ("system", final_prompt),
1227
- # MessagesPlaceholder(variable_name="messages"),
1228
- # ]
1229
- # )
1230
- # chain = prompt | assistant_model
1231
- # response = await chain.ainvoke({"messages": state.messages}, config=config)
1232
-
1233
- # for msg in response:
1234
- # if isinstance(msg, HumanMessage):
1235
- # print("Human:", msg.content)
1236
- # elif isinstance(msg, AIMessage):
1237
- # if isinstance(msg.content, list):
1238
- # ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
1239
- # print("AI:", " ".join(ai_texts))
1240
- # else:
1241
- # print("AI:", msg.content)
1242
-
1243
- # idea_complete = evaluate_idea_completion(response)
1244
-
1245
- # return {
1246
- # "messages": response,
1247
- # "idea_complete": idea_complete
1248
- # }
1249
-
1250
- # # message = llm_with_tools.invoke(state["messages"])
1251
- # # Because we will be interrupting during tool execution,
1252
- # # we disable parallel tool calling to avoid repeating any
1253
- # # tool invocations when we resume.
1254
- # assert len(response.tool_calls) <= 1
1255
- # idea_complete = evaluate_idea_completion(response)
1256
-
1257
- # return {
1258
- # "messages": response,
1259
- # "idea_complete": idea_complete
1260
- # }
1261
-
1262
-
1263
-
1264
-
1265
- #
1266
-
1267
-
1268
- # async def planning_node(state: GraphProcessingState, config=None):
1269
- # # Define the system prompt for planning
1270
- # planning_prompt = "Based on the user's idea, create a detailed step-by-step plan to build the DIY product."
1271
-
1272
- # # Combine the planning prompt with any existing prompts
1273
- # if state.prompt:
1274
- # final_prompt = "\n".join([planning_prompt, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
1275
- # else:
1276
- # final_prompt = "\n".join([planning_prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
1277
-
1278
- # # Create the prompt template
1279
- # prompt = ChatPromptTemplate.from_messages(
1280
- # [
1281
- # ("system", final_prompt),
1282
- # MessagesPlaceholder(variable_name="messages"),
1283
- # ]
1284
- # )
1285
-
1286
- # # Bind tools if necessary
1287
- # assistant_tools = []
1288
- # if state.tools_enabled.get("download_website_text", True):
1289
- # assistant_tools.append(download_website_text)
1290
- # if search_enabled and state.tools_enabled.get("tavily_search_results_json", True):
1291
- # assistant_tools.append(tavily_search_tool)
1292
- # assistant_model = model.bind_tools(assistant_tools)
1293
-
1294
- # # Create the chain and invoke it
1295
- # chain = prompt | assistant_model
1296
- # response = await chain.ainvoke({"messages": state.messages}, config=config)
1297
-
1298
- # return {
1299
- # "messages": response
1300
- # }
1301
-
1302
-
1303
-
1304
- # async def guidance_node(state: GraphProcessingState, config=None):
1305
- # print("\n--- Guidance Node (Debug via print) ---")
1306
-
1307
- # print(f"Prompt: {state.prompt}")
1308
- # for message in state.messages:
1309
- # if isinstance(message, HumanMessage):
1310
- # print(f"Human: {message.content}")
1311
- # elif isinstance(message, AIMessage):
1312
- # if message.content:
1313
- # if isinstance(message.content, list):
1314
- # texts = [item.get('text', '') for item in message.content if isinstance(item, dict) and 'text' in item]
1315
- # if texts:
1316
- # print(f"AI: {' '.join(texts)}")
1317
- # elif isinstance(message.content, str):
1318
- # print(f"AI: {message.content}")
1319
- # elif isinstance(message, SystemMessage):
1320
- # print(f"System: {message.content}")
1321
- # elif isinstance(message, ToolMessage):
1322
- # print(f"Tool: {message.content}")
1323
-
1324
- # print(f"Tools Enabled: {state.tools_enabled}")
1325
- # print(f"Search Enabled: {state.search_enabled}")
1326
- # print(f"Next Stage: {state.next_stage}")
1327
-
1328
-
1329
- # print(f"Brainstorming Complete: {state.brainstorming_complete}")
1330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1331
 
1332
- # guidance_node.count = getattr(guidance_node, 'count', 0) + 1
1333
- # print('\nGuidance Node called count', guidance_node.count)
1334
- # print("\n--- End Guidance Node Debug ---")
1335
-
1336
- # stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
1337
- # completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
1338
- # incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
1339
-
1340
- # if not incomplete:
1341
- # print("All stages complete!")
1342
- # # Handle case where all stages are complete
1343
- # # You might want to return a message and end, or set proposed_next_stage to a special value
1344
- # ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
1345
- # return {
1346
- # "messages": current_messages + [ai_all_complete_msg],
1347
- # "next_stage": "end_project", # Or None, or a final summary node
1348
- # "pending_approval_stage": None,
1349
- # }
1350
- # else:
1351
- # # THIS LINE DEFINES THE VARIABLE
1352
- # proposed_next_stage = incomplete[0]
1353
-
1354
- # print(f"Proposed next stage: {proposed_next_stage}")
1355
-
1356
- # status_summary = f"Completed stages: {completed}\nIncomplete stages: {incomplete}"
1357
-
1358
- # guidance_prompt_text = (
1359
- # "You are the Guiding Assistant for a DIY project. Your primary responsibility is to determine the next logical step "
1360
- # "and then **obtain the user's explicit approval** before proceeding.\n\n"
1361
- # f"CURRENT PROJECT STATUS:\n{status_summary}\n\n"
1362
- # f"Based on the status, the most logical next stage appears to be: **'{proposed_next_stage}'**.\n\n"
1363
- # "YOUR TASK:\n"
1364
- # f"1. Formulate a clear and concise question for the user, asking if they agree to proceed to the **'{proposed_next_stage}'** stage. For example: 'It looks like '{proposed_next_stage}' is next. Shall we proceed with that?' or 'Are you ready to move on to {proposed_next_stage}?'\n"
1365
- # "2. **You MUST use the 'human_assistance' tool to ask this question.** Do not answer directly. Invoke the tool with your question.\n"
1366
- # "Example of tool usage (though you don't write this, you *call* the tool):\n"
1367
- # "Tool Call: human_assistance(query='The next stage is planning. Do you want to proceed with planning?')\n\n"
1368
- # "Consider the user's most recent message if it provides any preference."
1369
- # )
1370
-
1371
- # if state.prompt:
1372
- # final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
1373
- # else:
1374
- # final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
1375
-
1376
- # prompt = ChatPromptTemplate.from_messages(
1377
- # [
1378
- # ("system", final_prompt),
1379
- # MessagesPlaceholder(variable_name="messages"),
1380
- # ]
1381
- # )
1382
-
1383
- # assistant_model = model.bind_tools([human_assistance])
1384
-
1385
- # chain = prompt | assistant_model
1386
-
1387
- # try:
1388
- # response = await chain.ainvoke({"messages": state.messages}, config=config)
1389
-
1390
- # for msg in response:
1391
- # if isinstance(msg, HumanMessage):
1392
- # print("Human:", msg.content)
1393
- # elif isinstance(msg, AIMessage):
1394
- # if isinstance(msg.content, list):
1395
- # ai_texts = [part.get("text", "") for part in msg.content if isinstance(part, dict)]
1396
- # print("AI:", " ".join(ai_texts))
1397
- # else:
1398
- # print("AI:", msg.content)
1399
-
1400
- # # Check for tool calls in the response
1401
- # if hasattr(response, "tool_calls"):
1402
- # for tool_call in response.tool_calls:
1403
- # tool_name = tool_call['name']
1404
- # if tool_name == "human_assistance":
1405
- # query = tool_call['args']['query']
1406
- # print(f"Human input needed: {query}")
1407
- # # Handle human assistance tool call
1408
- # # You can pause execution and wait for user input here
1409
-
1410
- # return {
1411
- # "messages": [response],
1412
- # "next_stage": incomplete[0] if incomplete else "brainstorming"
1413
- # }
1414
- # except Exception as e:
1415
- # print(f"Error in guidance node: {e}")
1416
- # return {
1417
- # "messages": [AIMessage(content="Error in guidance node.")],
1418
- # "next_stage": "brainstorming"
1419
- # }
 
 
 
1
  import logging
2
  import os
3
  import uuid
 
16
  from trafilatura import extract
17
 
18
  from huggingface_hub import InferenceClient
19
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
20
+ import torch
21
 
22
  from langchain_core.messages import AIMessage, HumanMessage, AnyMessage, ToolCall, SystemMessage, ToolMessage
23
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
24
  from langchain_core.tools import tool
25
 
26
  from langchain_community.tools import TavilySearchResults
27
+ from langchain_huggingface import HuggingFacePipeline
28
 
29
  from langgraph.graph.state import CompiledStateGraph
30
  from langgraph.graph import StateGraph, START, END, add_messages
 
36
 
37
  from langgraph.types import Command, interrupt
38
 
 
 
 
 
 
 
 
 
39
 
40
  class State(TypedDict):
41
  messages: Annotated[list, add_messages]
 
63
  def evaluate_idea_completion(response) -> bool:
64
  """
65
  Evaluates whether the assistant's response indicates a complete DIY project idea.
 
66
  """
 
67
  required_keywords = ["materials", "dimensions", "tools", "steps"]
68
 
 
69
  if isinstance(response, dict):
 
70
  response_text = ' '.join(str(value).lower() for value in response.values())
71
  elif isinstance(response, str):
 
72
  response_text = response.lower()
73
  else:
 
74
  response_text = str(response).lower()
75
 
76
  return all(keyword in response_text for keyword in required_keywords)
 
78
  @tool
79
  async def human_assistance(query: str) -> str:
80
  """Request assistance from a human."""
81
+ human_response = await interrupt({"query": query})
82
  return human_response["data"]
83
 
84
  @tool
 
100
  """Marks the brainstorming phase as complete. This function does nothing else."""
101
  return "Brainstorming finalized."
102
 
103
+ tools = [download_website_text, human_assistance, finalize_idea]
104
  memory = MemorySaver()
105
 
 
106
  if search_enabled:
107
  tavily_search_tool = TavilySearchResults(
108
  max_results=5,
 
114
  else:
115
  print("TAVILY_API_KEY environment variable not found. Websearch disabled")
116
 
117
+ # Initialize Hugging Face models
118
+ print("Loading transformer models...")
 
 
 
 
 
 
 
 
 
119
 
120
+ # Option 1: Use Hugging Face Inference API (recommended for production)
121
+ def create_hf_inference_model(model_name="microsoft/DialoGPT-medium"):
122
+ """Create a Hugging Face Inference API client"""
123
+ hf_token = os.environ.get("HF_TOKEN")
124
+ if not hf_token:
125
+ print("Warning: HF_TOKEN not found. Some features may not work.")
126
+ return None
127
+
128
+ return InferenceClient(
129
+ model=model_name,
130
+ token=hf_token,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  )
132
 
133
+ # Option 2: Load local model (for offline use)
134
+ def create_local_model(model_name="microsoft/DialoGPT-small"):
135
+ """Create a local transformer model"""
136
+ try:
137
+ device = "cuda" if torch.cuda.is_available() else "cpu"
138
+ print(f"Loading {model_name} on {device}")
139
+
140
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
141
+ model = AutoModelForCausalLM.from_pretrained(
142
+ model_name,
143
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
144
+ device_map="auto" if device == "cuda" else None,
145
+ )
146
+
147
+ # Add padding token if missing
148
+ if tokenizer.pad_token is None:
149
+ tokenizer.pad_token = tokenizer.eos_token
150
+
151
+ text_generator = pipeline(
152
+ "text-generation",
153
+ model=model,
154
+ tokenizer=tokenizer,
155
+ device_map="auto" if device == "cuda" else None,
156
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
157
+ max_new_tokens=512,
158
+ do_sample=True,
159
+ temperature=0.7,
160
+ pad_token_id=tokenizer.eos_token_id,
161
+ )
162
+
163
+ return HuggingFacePipeline(pipeline=text_generator)
164
+ except Exception as e:
165
+ print(f"Error loading local model: {e}")
166
+ return None
167
 
168
+ # Option 3: Use Llama via Hugging Face (requires more resources)
169
+ def create_llama_model():
170
+ """Create Llama model - requires significant GPU memory"""
171
+ try:
172
+ model_name = "meta-llama/Llama-2-7b-chat-hf" # or "meta-llama/Llama-3.2-3B"
173
+
174
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
175
+ model = AutoModelForCausalLM.from_pretrained(
176
+ model_name,
177
+ torch_dtype=torch.float16,
178
+ device_map="auto",
179
+ load_in_8bit=True, # Use 8-bit quantization to save memory
180
+ )
181
+
182
+ if tokenizer.pad_token is None:
183
+ tokenizer.pad_token = tokenizer.eos_token
184
+
185
+ text_generator = pipeline(
186
+ "text-generation",
187
+ model=model,
188
+ tokenizer=tokenizer,
189
+ device_map="auto",
190
+ torch_dtype=torch.float16,
191
+ max_new_tokens=512,
192
+ do_sample=True,
193
+ temperature=0.7,
194
+ )
195
+
196
+ return HuggingFacePipeline(pipeline=text_generator)
197
+ except Exception as e:
198
+ print(f"Error loading Llama model: {e}")
199
+ return None
200
+
201
+ # Choose which model to use
202
+ MODEL_TYPE = os.environ.get("MODEL_TYPE", "local") # Options: "inference", "local", "llama"
203
+
204
+ if MODEL_TYPE == "inference":
205
+ # Use Hugging Face Inference API
206
+ hf_client = create_hf_inference_model("microsoft/DialoGPT-medium")
207
+ model = hf_client
208
+ elif MODEL_TYPE == "llama":
209
+ # Use local Llama model
210
+ model = create_llama_model()
211
+ elif MODEL_TYPE == "local":
212
+ # Use local lightweight model
213
+ model = create_local_model("microsoft/DialoGPT-small")
214
+ else:
215
+ print("Invalid MODEL_TYPE. Using local model as fallback.")
216
+ model = create_local_model("microsoft/DialoGPT-small")
217
+
218
+ # Fallback to a simple model if primary model fails
219
+ if model is None:
220
+ print("Primary model failed to load. Using fallback model...")
221
+ model = create_local_model("distilgpt2")
222
+
223
+ # Set all model references to use the same transformer model
224
+ weak_model = model
225
+ assistant_model = model
226
+ prompt_planning_model = model
227
+ threed_object_gen_model = model
228
+
229
+ print(f"Using model type: {MODEL_TYPE}")
230
+ print(f"Model loaded successfully: {model is not None}")
231
+
232
+ # Custom function to generate responses with transformer models
233
+ async def generate_with_transformer(prompt_text, messages, max_length=512):
234
+ """Generate response using transformer model"""
235
+ try:
236
+ # Combine messages into a single prompt
237
+ conversation = ""
238
+ for msg in messages:
239
+ if isinstance(msg, HumanMessage):
240
+ conversation += f"Human: {msg.content}\n"
241
+ elif isinstance(msg, AIMessage):
242
+ if isinstance(msg.content, str):
243
+ conversation += f"Assistant: {msg.content}\n"
244
+ elif isinstance(msg.content, list):
245
+ content = " ".join([item.get("text", "") for item in msg.content if isinstance(item, dict)])
246
+ conversation += f"Assistant: {content}\n"
247
+ elif isinstance(msg, SystemMessage):
248
+ conversation += f"System: {msg.content}\n"
249
+
250
+ # Add the current prompt
251
+ full_prompt = f"{prompt_text}\n\nConversation:\n{conversation}\nAssistant:"
252
+
253
+ if MODEL_TYPE == "inference" and hf_client:
254
+ # Use Hugging Face Inference API
255
+ response = await hf_client.text_generation(
256
+ full_prompt,
257
+ max_new_tokens=max_length,
258
+ temperature=0.7,
259
+ do_sample=True,
260
+ stop_sequences=["Human:", "System:"]
261
+ )
262
+ return response
263
+ else:
264
+ # Use local model
265
+ if hasattr(model, 'invoke'):
266
+ response = model.invoke(full_prompt)
267
+ return response
268
+ elif hasattr(model, '__call__'):
269
+ response = model(full_prompt)
270
+ if isinstance(response, list) and len(response) > 0:
271
+ return response[0].get('generated_text', '').replace(full_prompt, '').strip()
272
+ return str(response)
273
+ else:
274
+ return "Model not properly configured"
275
+
276
+ except Exception as e:
277
+ logger.error(f"Error generating with transformer: {e}")
278
+ return f"Error generating response: {e}"
279
 
280
+ # Custom tool calling simulation for transformer models
281
+ def simulate_tool_calls(response_text):
282
+ """Simulate tool calls by parsing response text for specific patterns"""
283
+ tool_calls = []
284
+
285
+ # Look for patterns like "CALL_TOOL: human_assistance(query='...')"
286
+ if "human_assistance" in response_text.lower():
287
+ # Extract query from response
288
+ import re
289
+ pattern = r"human_assistance.*?[\(\"']([^\"']+)[\)\"']"
290
+ match = re.search(pattern, response_text, re.IGNORECASE)
291
+ if match:
292
+ query = match.group(1)
293
+ tool_calls.append({
294
+ "name": "human_assistance",
295
+ "arguments": {"query": query},
296
+ "id": f"call_{uuid.uuid4()}"
297
+ })
298
+
299
+ if "finalize_idea" in response_text.lower() or "idea finalized" in response_text.lower():
300
+ tool_calls.append({
301
+ "name": "finalize_idea",
302
+ "arguments": {"idea_name": "Generated Idea"},
303
+ "id": f"call_{uuid.uuid4()}"
304
+ })
305
+
306
+ return tool_calls
307
 
308
  class GraphProcessingState(BaseModel):
 
309
  messages: Annotated[list[AnyMessage], add_messages] = Field(default_factory=list)
310
  prompt: str = Field(default_factory=str, description="The prompt to be used for the model")
311
  tools_enabled: dict = Field(default_factory=dict, description="The tools enabled for the assistant")
 
323
  product_searching_complete: bool = Field(default=False)
324
  purchasing_complete: bool = Field(default=False)
325
 
 
326
  generated_image_url_from_dalle: str = Field(default="", description="The generated_image_url_from_dalle.")
327
 
 
 
328
  async def guidance_node(state: GraphProcessingState, config=None):
329
+ print("\n🕵️‍♀️🕵️‍♀️ | start | progress checking node \n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
 
331
  if state.messages:
332
  last_message = state.messages[-1]
 
333
  if isinstance(last_message, HumanMessage):
334
  print(f"🧑 Human: {last_message.content}\n")
335
  elif isinstance(last_message, AIMessage):
 
347
  else:
348
  print("\n(No messages found.)")
349
 
 
 
350
  # Define the order of stages
351
  stage_order = ["brainstorming", "planning", "drawing", "product_searching", "purchasing"]
352
 
 
354
  completed = [stage for stage in stage_order if getattr(state, f"{stage}_complete", False)]
355
  incomplete = [stage for stage in stage_order if not getattr(state, f"{stage}_complete", False)]
356
 
 
 
357
  # Determine the next stage
358
  if not incomplete:
 
359
  return {
360
  "messages": [AIMessage(content="All DIY project stages are complete!")],
361
  "next_stage": "end_project",
362
  "pending_approval_stage": None,
363
  }
364
  else:
 
365
  next_stage = incomplete[0]
366
+ print(f"Next Stage: {next_stage}")
367
+ print("\n🕵️‍♀️🕵️‍♀️ | end | progress checking node \n")
368
  return {
369
  "messages": [],
370
  "next_stage": next_stage,
 
372
  }
373
 
374
  def guidance_routing(state: GraphProcessingState) -> str:
 
375
  print("\n🔀🔀 Routing checkpoint 🔀🔀\n")
 
376
  print(f"Next Stage: {state.next_stage}\n")
 
377
  print(f"Brainstorming complete: {state.brainstorming_complete}")
378
+ print(f"Planning complete: {state.planning_complete}")
379
+ print(f"Drawing complete: {state.drawing_complete}")
380
+ print(f"Product searching complete: {state.product_searching_complete}\n")
 
 
381
 
382
  next_stage = state.next_stage
383
  if next_stage == "brainstorming":
384
  return "brainstorming_node"
 
385
  elif next_stage == "planning":
 
386
  return "prompt_planning_node"
387
  elif next_stage == "drawing":
388
  return "generate_3d_node"
389
  elif next_stage == "product_searching":
390
+ print('\n Product searching stage reached')
391
+ return END
392
+ else:
393
+ return END
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394
 
395
  async def brainstorming_node(state: GraphProcessingState, config=None):
396
+ print("\n🧠🧠 | start | brainstorming Node \n")
 
397
 
 
398
  if not model:
399
  return {"messages": [AIMessage(content="Model not available for brainstorming.")]}
400
 
 
401
  filtered_messages = [
402
  message for message in state.messages
403
  if isinstance(message, (HumanMessage, AIMessage, SystemMessage, ToolMessage)) and message.content
404
  ]
405
 
 
406
  if not filtered_messages:
407
  filtered_messages.append(AIMessage(content="No valid messages provided."))
408
 
 
412
 
413
  if not incomplete:
414
  print("All stages complete!")
 
 
415
  ai_all_complete_msg = AIMessage(content="All DIY project stages are complete!")
416
  return {
417
+ "messages": [ai_all_complete_msg],
418
+ "next_stage": "end_project",
419
  "pending_approval_stage": None,
420
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421
 
422
+ guidance_prompt_text = """
423
+ You are a warm, encouraging, and knowledgeable AI assistant, acting as a Creative DIY Collaborator. Your primary goal is to guide the user through a friendly and inspiring conversation to finalize ONE specific, viable DIY project idea.
424
+
425
+ Your Conversational Style & Strategy:
426
+ 1. Be an Active Listener: Start by acknowledging and validating the user's input.
427
+ 2. Ask Inspiring, Open-Ended Questions: Make them feel personal and insightful.
428
+ 3. Act as a Knowledgeable Guide: When a user is unsure, proactively suggest appealing ideas.
429
+ 4. Guide, Don't Just Gatekeep: When an idea almost meets criteria, guide it towards feasibility.
430
+
431
+ Critical Criteria for the Final DIY Project Idea:
432
+ 1. Buildable: Achievable by an average person with basic DIY skills.
433
+ 2. Common Materials/Tools: Uses only materials and basic tools commonly available.
434
+ 3. Avoid Specializations: No specialized electronics, 3D printing, or complex machinery.
435
+ 4. Tangible Product: The final result must be a physical, tangible item.
436
+
437
+ If you need to ask the user a question, respond with: "CALL_TOOL: human_assistance(query='your question here')"
438
+ If an idea is finalized, respond with: "IDEA FINALIZED: [Name of the Idea]"
439
+ """
440
 
441
  if state.prompt:
442
+ final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
443
  else:
444
+ final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445
 
 
446
  try:
447
+ # Generate response using transformer model
448
+ response_text = await generate_with_transformer(final_prompt, filtered_messages)
449
+
450
+ # Simulate tool calls
451
+ tool_calls = simulate_tool_calls(response_text)
452
+
453
+ # Create AI message
454
+ ai_message = AIMessage(content=response_text)
455
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  updates = {
457
  "messages": [ai_message],
458
+ "tool_calls": tool_calls,
 
 
 
 
 
 
459
  }
460
 
461
+ print(f'\n🔍 response from brainstorm: {response_text}')
462
+
463
+ # Check for finalization
464
+ if "IDEA FINALIZED:" in response_text.upper():
465
+ print(' final idea')
466
+ updates.update({
467
+ "brainstorming_complete": True,
468
+ "tool_call_required": False,
469
+ "loop_brainstorming": False,
470
+ })
471
+ elif tool_calls:
472
+ print('🛠️ tool call requested at brainstorming node')
473
+ updates.update({
474
+ "tool_call_required": True,
475
+ "loop_brainstorming": False,
476
+ })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
477
  else:
478
+ print('💬 decided to keep brainstorming')
479
+ updates.update({
480
+ "tool_call_required": False,
481
+ "loop_brainstorming": True,
482
+ })
483
 
484
  print("\n🧠🧠 | end | brainstorming Node \n")
485
  return updates
486
+
487
  except Exception as e:
488
  print(f"Error: {e}")
489
  return {
490
+ "messages": [AIMessage(content="Error in brainstorming.")],
491
  "next_stage": "brainstorming"
492
  }
493
 
 
494
  async def prompt_planning_node(state: GraphProcessingState, config=None):
495
+ print("\n🚩🚩 | start | prompt planning Node \n")
496
+
497
  if not model:
498
  return {"messages": [AIMessage(content="Model not available for planning.")]}
499
 
 
500
  filtered_messages = state.messages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501
  if not filtered_messages:
502
  filtered_messages.append(AIMessage(content="No valid messages provided."))
 
503
 
 
504
  guidance_prompt_text = """
505
+ You are a creative AI assistant acting as a DIY Project Brainstorming & 3D-Prompt Generator. Your mission is to:
506
 
507
  1. Brainstorm and refine one specific, viable DIY project idea.
508
  2. Identify the single key component from that idea that should be 3D-modeled.
509
+ 3. Produce a final, precise text prompt for a 3D-generation endpoint.
510
 
511
+ Critical Criteria for the DIY Project:
512
+ Buildable by an average person with only basic DIY skills.
513
+ Uses common materials/tools (e.g., wood, screws, glue, paint; hammer, saw, drill).
514
+ No specialized electronics, 3D printers, or proprietary parts.
 
515
  • Results in a tangible, physical item.
516
 
517
+ When the DIY idea is fully detailed and meets all criteria, output exactly:
 
 
 
 
 
 
 
518
  ACCURATE PROMPT FOR MODEL GENERATING: [Your final single-paragraph prompt here]
519
  """
520
 
 
521
  if state.prompt:
522
  final_prompt = "\n".join([guidance_prompt_text, state.prompt, ASSISTANT_SYSTEM_PROMPT_BASE])
523
  else:
524
  final_prompt = "\n".join([guidance_prompt_text, ASSISTANT_SYSTEM_PROMPT_BASE])
525
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
526
  try:
527
+ # Generate response using transformer model
528
+ response_text = await generate_with_transformer(final_prompt, filtered_messages)
 
 
 
 
 
 
 
 
529
 
530
+ # Create AI message
531
+ response = AIMessage(content=response_text)
532
  updates = {"messages": [response]}
533
 
534
+ print(f'\nResponse: {response_text}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535
 
536
+ # Check for finalization signal
537
+ if "ACCURATE PROMPT FOR MODEL GENERATING" in response_text:
538
+ dalle_prompt_text = response_text.replace("ACCURATE PROMPT FOR MODEL GENERATING:", "").strip()
539
+ print(f"\n🤖🤖🤖🤖Extracted prompt: {dalle_prompt_text}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
540
 
541
+ # For this example, we'll simulate image generation
542
+ # In practice, you would call your image generation API here
543
+ generated_image_url = "https://example.com/generated_image.jpg" # Placeholder
544
+
545
+ updates["messages"].append(AIMessage(content=f"Image generation prompt created: {dalle_prompt_text}"))
546
+
547
  updates.update({
548
  "generated_image_url_from_dalle": generated_image_url,
549
  "planning_complete": True,
 
552
  })
553
  else:
554
  # Check if a tool call was requested
555
+ tool_calls = simulate_tool_calls(response_text)
556
+ if tool_calls:
557
  updates.update({
558
  "tool_call_required": True,
559
  "loop_planning": False,
 
564
  "loop_planning": True,
565
  })
566
 
567
+ print("\n🚩🚩 | end | prompt planning Node \n")
568
  return updates
569
 
570
  except Exception as e:
 
576
 
577
  async def generate_3d_node(state: GraphProcessingState, config=None):
578
  print("\n🚀🚀🚀 | start | Generate 3D Node 🚀🚀🚀\n")
579
+
580
+ # Get the image URL
 
 
 
 
 
 
581
  hardcoded_image_url = state.generated_image_url_from_dalle
582
+ print(f"Using image_url: {hardcoded_image_url}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583
 
584
+ # For this example, we'll simulate 3D generation
585
+ # In practice, you would call your 3D generation API here
586
+
587
+ try:
588
+ # Simulate 3D model generation
589
+ print("Simulating 3D model generation...")
590
+
591
+ # Create output directory
592
+ output_dir = "generated_3d_models"
593
+ os.makedirs(output_dir, exist_ok=True)
594
+
595
+ # Simulate successful generation
596
+ file_name = f"model_{uuid.uuid4()}.glb"
597
+ file_path = os.path.join(output_dir, file_name)
598
+
599
+ # Create a placeholder file
600
+ with open(file_path, "w") as f:
601
+ f.write("# Simulated 3D model file\n")
602
+
603
+ print(f"Success: 3D model saved to {file_path}")
604
+ return {
605
+ "messages": [AIMessage(content=f"3D object generation successful: {file_path}")],
606
+ "drawing_complete": True,
607
+ "three_d_model_path": file_path,
608
+ "next_stage": state.get("next_stage") or 'end'
609
+ }
610
+
611
+ except Exception as e:
612
+ error_message = f"An error occurred: {str(e)}"
613
+ print(error_message)
614
+ return {"messages": [AIMessage(content=f"Failed to generate 3D object: {error_message}")]}
615
 
616
  def define_workflow() -> CompiledStateGraph:
617
  """Defines the workflow graph"""
 
618
  workflow = StateGraph(GraphProcessingState)
619
 
620
  # Add nodes
621
  workflow.add_node("tools", DebugToolNode(tools))
 
622
  workflow.add_node("guidance_node", guidance_node)
623
  workflow.add_node("brainstorming_node", brainstorming_node)
624
  workflow.add_node("prompt_planning_node", prompt_planning_node)
625
  workflow.add_node("generate_3d_node", generate_3d_node)
626
 
 
 
627
  # Edges
 
628
  workflow.add_conditional_edges(
629
  "guidance_node",
630
  guidance_routing,
631
+ {
632
+ "brainstorming_node": "brainstorming_node",
633
+ "prompt_planning_node": "prompt_planning_node",
634
+ "generate_3d_node": "generate_3d_node"
635
+ }
 
 
 
 
 
636
  )
637
 
638
+ workflow.add_conditional_edges("brainstorming_node", tools_condition)
639
+ workflow.add_conditional_edges("prompt_planning_node", tools_condition)
640
+
 
641
  workflow.add_edge("tools", "guidance_node")
642
  workflow.add_edge("brainstorming_node", "guidance_node")
643
  workflow.add_edge("prompt_planning_node", "guidance_node")
644
  workflow.add_edge("generate_3d_node", "guidance_node")
645
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646
  workflow.set_entry_point("guidance_node")
 
647
  compiled_graph = workflow.compile(checkpointer=memory)
648
+
649
  try:
650
  img_bytes = compiled_graph.get_graph().draw_mermaid_png()
651
  with open("graph.png", "wb") as f:
 
655
  print("Can't print the graph:")
656
  print(e)
657
 
 
658
  return compiled_graph
659
 
660
  graph = define_workflow()
661
 
662
+ # Example usage function
663
+ async def run_diy_assistant(user_input: str):
664
+ """Run the DIY assistant with user input"""
665
+ config = {"configurable": {"thread_id": "1"}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
666
 
667
+ initial_state = GraphProcessingState(
668
+ messages=[HumanMessage(content=user_input)],
669
+ prompt="",
670
+ tools_enabled={"download_website_text": True, "tavily_search_results_json": search_enabled},
671
+ search_enabled=search_enabled
672
+ )
673
+
674
+ try:
675
+ result = await graph.ainvoke(initial_state, config)
676
+ return result
677
+ except Exception as e:
678
+ print(f"Error running DIY assistant: {e}")
679
+ return {"error": str(e)}
680
 
681
+ # Example of how to run
682
+ if __name__ == "__main__":
683
+ import asyncio
684
+
685
+ async def main():
686
+ user_input = "I want to build something for my garden"
687
+ result = await run_diy_assistant(user_input)
688
+ print("Final result:", result)
689
+
690
+ # asyncio.run(main())
691
+ print("DIY Assistant with transformer models loaded successfully!")
692
+ print(f"Available model: {model}")
693
+ print("Use the graph object to run your workflow.")