Spaces:
Running
Running
Update src/chimera/core/orchestrator.py
Browse files
src/chimera/core/orchestrator.py
CHANGED
@@ -29,22 +29,25 @@ async def run_analysis(user_query: str) -> str:
|
|
29 |
# logger.info("Adding Weather task.")
|
30 |
# tasks.append(asyncio.create_task(external_apis.get_weather(location)))
|
31 |
|
|
|
32 |
if not tasks:
|
33 |
-
logger.warning("No relevant APIs identified for the query.")
|
34 |
# Fallback: Just send the raw query to Gemini? Or ask user for clarification?
|
35 |
# For now, just send the query directly for general knowledge analysis
|
36 |
-
|
|
|
37 |
|
|
|
38 |
# Step 2 & 3: Call APIs Concurrently and Gather Data
|
39 |
-
api_results = {}
|
40 |
-
if tasks:
|
41 |
logger.info(f"Gathering data from {len(tasks)} API(s)...")
|
42 |
results = await asyncio.gather(*tasks, return_exceptions=True) # Collect all results/exceptions
|
43 |
logger.info("API data gathering complete.")
|
44 |
|
45 |
# Process results (basic example)
|
46 |
-
#
|
47 |
-
if isinstance(results[0], dict) and "organic_results" in results[0]:
|
48 |
api_results["serp"] = results[0]
|
49 |
# Add checks and assignments for other potential API results
|
50 |
|
@@ -57,9 +60,11 @@ async def run_analysis(user_query: str) -> str:
|
|
57 |
elif isinstance(result, dict) and "error" in result:
|
58 |
logger.error(f"API call task {i} reported an error: {result['error']}")
|
59 |
|
|
|
|
|
60 |
# Step 4: Format Data and Create Gemini Prompt
|
61 |
# Process the gathered data into a readable format for the LLM
|
62 |
-
formatted_data = data_processing.format_api_data_for_llm(api_results)
|
63 |
|
64 |
# Construct the final prompt
|
65 |
# This is CRITICAL - prompt engineering is key here!
|
|
|
29 |
# logger.info("Adding Weather task.")
|
30 |
# tasks.append(asyncio.create_task(external_apis.get_weather(location)))
|
31 |
|
32 |
+
# --- Start of Corrected Section ---
|
33 |
if not tasks:
|
34 |
+
logger.warning("No relevant APIs identified for the query. Proceeding without external API data.")
|
35 |
# Fallback: Just send the raw query to Gemini? Or ask user for clarification?
|
36 |
# For now, just send the query directly for general knowledge analysis
|
37 |
+
pass # This 'pass' is now correctly indented under the 'if not tasks:' block.
|
38 |
+
# It signifies doing nothing specific if no tasks were added.
|
39 |
|
40 |
+
# --- Lines below are now correctly dedented ---
|
41 |
# Step 2 & 3: Call APIs Concurrently and Gather Data
|
42 |
+
api_results = {} # Initialize api_results regardless of whether tasks were added
|
43 |
+
if tasks: # Only execute gather if there are tasks
|
44 |
logger.info(f"Gathering data from {len(tasks)} API(s)...")
|
45 |
results = await asyncio.gather(*tasks, return_exceptions=True) # Collect all results/exceptions
|
46 |
logger.info("API data gathering complete.")
|
47 |
|
48 |
# Process results (basic example)
|
49 |
+
# Make sure results list is not empty before accessing results[0]
|
50 |
+
if results and isinstance(results[0], dict) and "organic_results" in results[0]:
|
51 |
api_results["serp"] = results[0]
|
52 |
# Add checks and assignments for other potential API results
|
53 |
|
|
|
60 |
elif isinstance(result, dict) and "error" in result:
|
61 |
logger.error(f"API call task {i} reported an error: {result['error']}")
|
62 |
|
63 |
+
# --- End of Corrected Section ---
|
64 |
+
|
65 |
# Step 4: Format Data and Create Gemini Prompt
|
66 |
# Process the gathered data into a readable format for the LLM
|
67 |
+
formatted_data = data_processing.format_api_data_for_llm(api_results) # Pass potentially empty api_results
|
68 |
|
69 |
# Construct the final prompt
|
70 |
# This is CRITICAL - prompt engineering is key here!
|