Spaces:
Sleeping
Sleeping
from typing import Dict, Any, List, Optional | |
from openai import OpenAI | |
import json | |
from pydantic import BaseModel, Field | |
from prompts import SYSTEM_PROMPT, format_exploration_prompt, DEFAULT_RESPONSE | |
class ExplorationNode(BaseModel): | |
id: Optional[str] = None | |
title: str | |
description: str | |
connections: List[Dict[str, Any]] = Field(default_factory=list) | |
depth: int = 0 | |
class ExplorationPath(BaseModel): | |
nodes: List[ExplorationNode] | |
query: str | |
domain: Optional[str] = None | |
class Config: | |
populate_by_name = True | |
arbitrary_types_allowed = True | |
def transform_response_to_nodes(api_response: Dict[str, Any]) -> List[Dict[str, Any]]: | |
"""Transform the API response into a list of ExplorationNode-compatible dictionaries""" | |
nodes = [] | |
# Add main exploration summary as root node | |
if "exploration_summary" in api_response: | |
nodes.append({ | |
"id": "root", | |
"title": "Exploration Overview", | |
"description": api_response["exploration_summary"]["current_context"], | |
"depth": 0, | |
"connections": [] | |
}) | |
# Transform standard axes into nodes | |
if "knowledge_axes" in api_response and "standard_axes" in api_response["knowledge_axes"]: | |
for axis in api_response["knowledge_axes"]["standard_axes"]: | |
# Create node for the axis itself | |
axis_node = { | |
"id": f"axis_{axis['name']}", | |
"title": axis['name'], | |
"description": f"Standard exploration axis: {axis['name']}", | |
"depth": 1, | |
"connections": [] | |
} | |
nodes.append(axis_node) | |
# Create nodes for potential values | |
for idx, value in enumerate(axis.get("potential_values", [])): | |
value_node = { | |
"id": f"value_{axis['name']}_{idx}", | |
"title": value["value"], | |
"description": value["contextual_rationale"], | |
"depth": 2, | |
"connections": [] | |
} | |
nodes.append(value_node) | |
# Add connection to axis node | |
axis_node["connections"].append({ | |
"target_id": value_node["id"], | |
"relevance_score": value["relevance_score"] | |
}) | |
# Transform emergent axes into nodes | |
if "knowledge_axes" in api_response and "emergent_axes" in api_response["knowledge_axes"]: | |
for e_axis in api_response["knowledge_axes"]["emergent_axes"]: | |
# Create node for emergent axis | |
e_axis_node = { | |
"id": f"emergent_{e_axis['name']}", | |
"title": f"{e_axis['name']} (Emergent)", | |
"description": f"Emergent axis derived from {e_axis['parent_axis']}", | |
"depth": 2, | |
"connections": [] | |
} | |
nodes.append(e_axis_node) | |
# Create nodes for innovative values | |
for idx, value in enumerate(e_axis.get("innovative_values", [])): | |
value_node = { | |
"id": f"innovative_{e_axis['name']}_{idx}", | |
"title": value["value"], | |
"description": value["discovery_potential"], | |
"depth": 3, | |
"connections": [] | |
} | |
nodes.append(value_node) | |
# Add connection to emergent axis node | |
e_axis_node["connections"].append({ | |
"target_id": value_node["id"], | |
"innovation_score": value["innovation_score"] | |
}) | |
return nodes | |
class ExplorationPathGenerator: | |
def __init__(self, api_key: str): | |
self.client = OpenAI( | |
api_key=api_key, | |
base_url="https://api.groq.com/openai/v1" | |
) | |
def generate_exploration_path( | |
self, | |
query: str, | |
selected_path: List[Dict[str, Any]] = None, | |
exploration_parameters: Dict[str, Any] = None | |
) -> Dict[str, Any]: | |
"""Generate an exploration path based on the query and parameters""" | |
try: | |
print("\n=== Starting API Request ===") | |
selected_path = selected_path or [] | |
exploration_parameters = exploration_parameters or {} | |
formatted_prompt = format_exploration_prompt( | |
user_query=query, | |
selected_path=selected_path, | |
exploration_parameters=exploration_parameters | |
) | |
print("\n=== Formatted Request ===") | |
print("System Prompt:", SYSTEM_PROMPT[:200] + "...") | |
print("\nFormatted Prompt (excerpt):", formatted_prompt[:200] + "...") | |
response = self.client.chat.completions.create( | |
model="mixtral-8x7b-32768", | |
messages=[ | |
{"role": "system", "content": SYSTEM_PROMPT}, | |
{"role": "user", "content": formatted_prompt} | |
], | |
temperature=0.7, | |
max_tokens=2000 | |
) | |
print("\n=== API Response ===") | |
print("Raw response content:", response.choices[0].message.content) | |
try: | |
result = json.loads(response.choices[0].message.content) | |
print("\n=== Parsed Response ===") | |
print(json.dumps(result, indent=2)) | |
# Transform the API response into nodes | |
nodes = transform_response_to_nodes(result) | |
# Create ExplorationPath with transformed nodes | |
exploration_path = ExplorationPath( | |
nodes=nodes, | |
query=query, | |
domain=exploration_parameters.get("domain") | |
) | |
final_result = exploration_path.model_dump() | |
print("\n=== Final Result ===") | |
print(json.dumps(final_result, indent=2)) | |
return final_result | |
except json.JSONDecodeError as e: | |
print(f"\n=== JSON Parse Error ===\n{str(e)}") | |
print("Using default response") | |
return { | |
"nodes": [], | |
"query": query, | |
"domain": exploration_parameters.get("domain") | |
} | |
except Exception as e: | |
print(f"\n=== Error ===\n{str(e)}") | |
return { | |
"error": str(e), | |
"status": "failed", | |
"message": "Failed to generate exploration path", | |
"default_response": DEFAULT_RESPONSE | |
} |