baconnier commited on
Commit
092d495
·
verified ·
1 Parent(s): bbf15ac

Update art_explorer.py

Browse files
Files changed (1) hide show
  1. art_explorer.py +105 -37
art_explorer.py CHANGED
@@ -6,12 +6,10 @@ from prompts import SYSTEM_PROMPT, format_exploration_prompt, DEFAULT_RESPONSE
6
 
7
  class ExplorationNode(BaseModel):
8
  id: Optional[str] = None
9
- node_id: Optional[str] = None
10
- title: Optional[str] = Field(None, alias='content')
11
- description: str = ""
12
  connections: List[Dict[str, Any]] = Field(default_factory=list)
13
- depth: Optional[int] = 0
14
- content: Optional[str] = None
15
 
16
  class ExplorationPath(BaseModel):
17
  nodes: List[ExplorationNode]
@@ -22,6 +20,80 @@ class ExplorationPath(BaseModel):
22
  populate_by_name = True
23
  arbitrary_types_allowed = True
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  class ExplorationPathGenerator:
26
  def __init__(self, api_key: str):
27
  self.client = OpenAI(
@@ -41,7 +113,6 @@ class ExplorationPathGenerator:
41
  selected_path = selected_path or []
42
  exploration_parameters = exploration_parameters or {}
43
 
44
- # Format the exploration prompt using the helper function
45
  formatted_prompt = format_exploration_prompt(
46
  user_query=query,
47
  selected_path=selected_path,
@@ -49,25 +120,15 @@ class ExplorationPathGenerator:
49
  )
50
 
51
  print("\n=== Formatted Request ===")
52
- print("System Prompt:", SYSTEM_PROMPT) # First 200 chars
53
- print("\nFormatted Prompt (excerpt):", formatted_prompt)
54
 
55
- messages = [
56
- {"role": "system", "content": SYSTEM_PROMPT},
57
- {"role": "user", "content": formatted_prompt}
58
- ]
59
-
60
- print("\n=== API Request Parameters ===")
61
- print(json.dumps({
62
- "model": "mixtral-8x7b-32768",
63
- "messages": [{"role": m["role"], "content": m["content"]} for m in messages],
64
- "temperature": 0.7,
65
- "max_tokens": 2000
66
- }, indent=2))
67
-
68
  response = self.client.chat.completions.create(
69
  model="mixtral-8x7b-32768",
70
- messages=messages,
 
 
 
71
  temperature=0.7,
72
  max_tokens=2000
73
  )
@@ -75,28 +136,35 @@ class ExplorationPathGenerator:
75
  print("\n=== API Response ===")
76
  print("Raw response content:", response.choices[0].message.content)
77
 
78
- # Parse the response
79
  try:
80
  result = json.loads(response.choices[0].message.content)
81
  print("\n=== Parsed Response ===")
82
  print(json.dumps(result, indent=2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  except json.JSONDecodeError as e:
84
  print(f"\n=== JSON Parse Error ===\n{str(e)}")
85
  print("Using default response")
86
- result = DEFAULT_RESPONSE
87
-
88
- # Convert to ExplorationPath model
89
- exploration_path = ExplorationPath(
90
- nodes=result.get("nodes", []), # Changed from list comprehension to simple get
91
- query=query,
92
- domain=exploration_parameters.get("domain")
93
- )
94
-
95
- final_result = exploration_path.model_dump()
96
- print("\n=== Final Result ===")
97
- print(json.dumps(final_result, indent=2))
98
-
99
- return final_result
100
 
101
  except Exception as e:
102
  print(f"\n=== Error ===\n{str(e)}")
 
6
 
7
  class ExplorationNode(BaseModel):
8
  id: Optional[str] = None
9
+ title: str
10
+ description: str
 
11
  connections: List[Dict[str, Any]] = Field(default_factory=list)
12
+ depth: int = 0
 
13
 
14
  class ExplorationPath(BaseModel):
15
  nodes: List[ExplorationNode]
 
20
  populate_by_name = True
21
  arbitrary_types_allowed = True
22
 
23
+ def transform_response_to_nodes(api_response: Dict[str, Any]) -> List[Dict[str, Any]]:
24
+ """Transform the API response into a list of ExplorationNode-compatible dictionaries"""
25
+ nodes = []
26
+
27
+ # Add main exploration summary as root node
28
+ if "exploration_summary" in api_response:
29
+ nodes.append({
30
+ "id": "root",
31
+ "title": "Exploration Overview",
32
+ "description": api_response["exploration_summary"]["current_context"],
33
+ "depth": 0,
34
+ "connections": []
35
+ })
36
+
37
+ # Transform standard axes into nodes
38
+ if "knowledge_axes" in api_response and "standard_axes" in api_response["knowledge_axes"]:
39
+ for axis in api_response["knowledge_axes"]["standard_axes"]:
40
+ # Create node for the axis itself
41
+ axis_node = {
42
+ "id": f"axis_{axis['name']}",
43
+ "title": axis['name'],
44
+ "description": f"Standard exploration axis: {axis['name']}",
45
+ "depth": 1,
46
+ "connections": []
47
+ }
48
+ nodes.append(axis_node)
49
+
50
+ # Create nodes for potential values
51
+ for idx, value in enumerate(axis.get("potential_values", [])):
52
+ value_node = {
53
+ "id": f"value_{axis['name']}_{idx}",
54
+ "title": value["value"],
55
+ "description": value["contextual_rationale"],
56
+ "depth": 2,
57
+ "connections": []
58
+ }
59
+ nodes.append(value_node)
60
+ # Add connection to axis node
61
+ axis_node["connections"].append({
62
+ "target_id": value_node["id"],
63
+ "relevance_score": value["relevance_score"]
64
+ })
65
+
66
+ # Transform emergent axes into nodes
67
+ if "knowledge_axes" in api_response and "emergent_axes" in api_response["knowledge_axes"]:
68
+ for e_axis in api_response["knowledge_axes"]["emergent_axes"]:
69
+ # Create node for emergent axis
70
+ e_axis_node = {
71
+ "id": f"emergent_{e_axis['name']}",
72
+ "title": f"{e_axis['name']} (Emergent)",
73
+ "description": f"Emergent axis derived from {e_axis['parent_axis']}",
74
+ "depth": 2,
75
+ "connections": []
76
+ }
77
+ nodes.append(e_axis_node)
78
+
79
+ # Create nodes for innovative values
80
+ for idx, value in enumerate(e_axis.get("innovative_values", [])):
81
+ value_node = {
82
+ "id": f"innovative_{e_axis['name']}_{idx}",
83
+ "title": value["value"],
84
+ "description": value["discovery_potential"],
85
+ "depth": 3,
86
+ "connections": []
87
+ }
88
+ nodes.append(value_node)
89
+ # Add connection to emergent axis node
90
+ e_axis_node["connections"].append({
91
+ "target_id": value_node["id"],
92
+ "innovation_score": value["innovation_score"]
93
+ })
94
+
95
+ return nodes
96
+
97
  class ExplorationPathGenerator:
98
  def __init__(self, api_key: str):
99
  self.client = OpenAI(
 
113
  selected_path = selected_path or []
114
  exploration_parameters = exploration_parameters or {}
115
 
 
116
  formatted_prompt = format_exploration_prompt(
117
  user_query=query,
118
  selected_path=selected_path,
 
120
  )
121
 
122
  print("\n=== Formatted Request ===")
123
+ print("System Prompt:", SYSTEM_PROMPT[:200] + "...")
124
+ print("\nFormatted Prompt (excerpt):", formatted_prompt[:200] + "...")
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  response = self.client.chat.completions.create(
127
  model="mixtral-8x7b-32768",
128
+ messages=[
129
+ {"role": "system", "content": SYSTEM_PROMPT},
130
+ {"role": "user", "content": formatted_prompt}
131
+ ],
132
  temperature=0.7,
133
  max_tokens=2000
134
  )
 
136
  print("\n=== API Response ===")
137
  print("Raw response content:", response.choices[0].message.content)
138
 
 
139
  try:
140
  result = json.loads(response.choices[0].message.content)
141
  print("\n=== Parsed Response ===")
142
  print(json.dumps(result, indent=2))
143
+
144
+ # Transform the API response into nodes
145
+ nodes = transform_response_to_nodes(result)
146
+
147
+ # Create ExplorationPath with transformed nodes
148
+ exploration_path = ExplorationPath(
149
+ nodes=nodes,
150
+ query=query,
151
+ domain=exploration_parameters.get("domain")
152
+ )
153
+
154
+ final_result = exploration_path.model_dump()
155
+ print("\n=== Final Result ===")
156
+ print(json.dumps(final_result, indent=2))
157
+
158
+ return final_result
159
+
160
  except json.JSONDecodeError as e:
161
  print(f"\n=== JSON Parse Error ===\n{str(e)}")
162
  print("Using default response")
163
+ return {
164
+ "nodes": [],
165
+ "query": query,
166
+ "domain": exploration_parameters.get("domain")
167
+ }
 
 
 
 
 
 
 
 
 
168
 
169
  except Exception as e:
170
  print(f"\n=== Error ===\n{str(e)}")