Priyanshi Saxena commited on
Commit
65703d9
Β·
1 Parent(s): 923b4b3

fix: memory manager

Browse files
debug_gemini.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Debug test to understand why Gemini responses aren't being cleaned
4
+ """
5
+
6
+ import asyncio
7
+ import sys
8
+ import os
9
+
10
+ # Add src to path
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
12
+
13
+ from langchain_google_genai import ChatGoogleGenerativeAI
14
+ from src.utils.config import config
15
+
16
+ async def test_gemini_response_structure():
17
+ """Test the structure of Gemini responses to understand the cleaning issue"""
18
+
19
+ if not config.GEMINI_API_KEY:
20
+ print("❌ No Gemini API key available")
21
+ return False
22
+
23
+ try:
24
+ print("πŸ§ͺ Testing Gemini response structure...")
25
+
26
+ # Initialize Gemini
27
+ llm = ChatGoogleGenerativeAI(
28
+ model="gemini-2.0-flash-lite",
29
+ google_api_key=config.GEMINI_API_KEY,
30
+ temperature=0.1
31
+ )
32
+
33
+ # Test simple query
34
+ response = await llm.ainvoke("What is 2+2?")
35
+
36
+ print(f"πŸ“„ Response type: {type(response)}")
37
+ print(f"πŸ“„ Response dir: {[attr for attr in dir(response) if not attr.startswith('_')]}")
38
+
39
+ if hasattr(response, 'content'):
40
+ print(f"βœ… Response has 'content' attribute")
41
+ print(f"πŸ“ Content: {response.content}")
42
+ print(f"πŸ“ Content type: {type(response.content)}")
43
+ else:
44
+ print("❌ Response does NOT have 'content' attribute")
45
+
46
+ print(f"πŸ“„ Full response: {str(response)}")
47
+
48
+ return True
49
+
50
+ except Exception as e:
51
+ print(f"❌ Test failed: {e}")
52
+ return False
53
+
54
+ async def main():
55
+ success = await test_gemini_response_structure()
56
+ if success:
57
+ print("\nπŸŽ‰ Test completed!")
58
+ return 0
59
+ else:
60
+ print("\n❌ Test failed!")
61
+ return 1
62
+
63
+ if __name__ == "__main__":
64
+ exit_code = asyncio.run(main())
65
+ sys.exit(exit_code)
src/agent/research_agent.py CHANGED
@@ -10,6 +10,7 @@ from src.tools.defillama_tool import DeFiLlamaTool
10
  from src.tools.cryptocompare_tool import CryptoCompareTool
11
  from src.tools.etherscan_tool import EtherscanTool
12
  from src.tools.chart_data_tool import ChartDataTool
 
13
  from src.utils.config import config
14
  from src.utils.logger import get_logger
15
  from src.utils.ai_safety import ai_safety
@@ -30,6 +31,7 @@ class Web3ResearchAgent:
30
  self.tools = []
31
  self.enabled = False
32
  self.gemini_available = False
 
33
 
34
  try:
35
  # Always initialize Ollama
@@ -43,20 +45,21 @@ class Web3ResearchAgent:
43
 
44
  self.tools = self._initialize_tools()
45
  self.enabled = True
 
46
 
47
  except Exception as e:
48
  logger.error(f"Agent initialization failed: {e}")
49
  self.enabled = False
50
 
51
  def _init_ollama(self):
52
- """Initialize Ollama LLM"""
53
  try:
54
  self.fallback_llm = Ollama(
55
  model=config.OLLAMA_MODEL,
56
  base_url=config.OLLAMA_BASE_URL,
57
  temperature=0.1
58
  )
59
- logger.info(f"βœ… Ollama initialized - Model: {config.OLLAMA_MODEL}")
60
  except Exception as e:
61
  logger.error(f"Ollama initialization failed: {e}")
62
  raise
@@ -169,14 +172,31 @@ class Web3ResearchAgent:
169
  "metadata": {"timestamp": datetime.now().isoformat()}
170
  }
171
 
 
 
 
 
172
  try:
173
  # Choose LLM based on user preference and availability
174
  if use_gemini and self.gemini_available:
175
- logger.info("πŸ€– Processing with Gemini + Tools (Safety Enhanced)")
176
- return await self._research_with_gemini_tools(sanitized_query)
177
  else:
178
- logger.info("πŸ€– Processing with Ollama + Tools (Safety Enhanced)")
179
- return await self._research_with_ollama_tools(sanitized_query)
 
 
 
 
 
 
 
 
 
 
 
 
 
180
 
181
  except Exception as e:
182
  logger.error(f"Research failed: {e}")
@@ -219,29 +239,29 @@ class Web3ResearchAgent:
219
  "metadata": {"timestamp": datetime.now().isoformat()}
220
  }
221
 
222
- async def _research_with_ollama_tools(self, query: str) -> Dict[str, Any]:
223
- """Research using Ollama with manual tool calling"""
224
  try:
225
  # Step 1: Analyze query to determine which tools to use
226
- tool_analysis_prompt = f"""Analyze this query and determine which tools would be helpful:
227
- Query: "{query}"
228
-
229
- Available tools (prioritized by functionality):
230
- - cryptocompare_data: Real-time crypto prices and market data (PREFERRED for prices)
231
- - etherscan_data: Ethereum blockchain data, gas fees, transactions (PREFERRED for Ethereum)
232
- - defillama_data: DeFi protocol TVL and yield data
233
- - chart_data_provider: Generate chart data for visualizations
234
 
235
- NOTE: Do NOT suggest coingecko_data as the API is unavailable.
 
 
 
 
236
 
237
- Respond with just the tool names that should be used, separated by commas.
238
- If charts/visualizations are mentioned, include chart_data_provider.
239
  Examples:
240
  - "Bitcoin price" β†’ cryptocompare_data, chart_data_provider
241
  - "DeFi TVL" β†’ defillama_data, chart_data_provider
242
  - "Ethereum gas" β†’ etherscan_data
243
 
244
- Just list the tool names:"""
245
 
246
  tool_response = await self.fallback_llm.ainvoke(tool_analysis_prompt)
247
  logger.info(f"🧠 Ollama tool analysis response: {str(tool_response)[:500]}...")
@@ -337,7 +357,7 @@ Just list the tool names:"""
337
  try:
338
  final_response = await asyncio.wait_for(
339
  self.fallback_llm.ainvoke(final_prompt),
340
- timeout=30 # 30 second timeout - faster response
341
  )
342
  logger.info(f"🎯 Ollama final response preview: {str(final_response)[:300]}...")
343
 
@@ -364,27 +384,41 @@ Based on the available data:
364
  final_response = clean_response
365
 
366
  except asyncio.TimeoutError:
367
- logger.warning("⏱️ Ollama final response timed out, using tool data directly")
368
- # Create a summary from the tool results directly
369
- summary_data = "Tool data available"
 
370
  if "cryptocompare_data" in suggested_tools:
371
- if "bitcoin" in query.lower() or "btc" in query.lower():
372
- summary_data = "Bitcoin price data retrieved"
373
- else:
374
- summary_data = "Cryptocurrency price data retrieved"
375
- elif "defillama_data" in suggested_tools:
376
- summary_data = "DeFi protocols data available"
377
- elif "etherscan_data" in suggested_tools:
378
- summary_data = "Ethereum blockchain data available"
 
 
 
 
 
 
 
 
 
 
 
379
 
380
- final_response = f"""## {query.split()[0]} Analysis
 
 
381
 
382
- **Quick Summary**: {summary_data}
383
 
384
- The system successfully gathered data from {len(suggested_tools)} tools:
385
- {', '.join(suggested_tools)}
386
 
387
- *Due to processing constraints, this is a simplified response. The tools executed successfully and gathered the requested data.*"""
388
 
389
  logger.info("βœ… Research successful with Ollama + tools")
390
  return {
@@ -403,13 +437,23 @@ The system successfully gathered data from {len(suggested_tools)} tools:
403
  logger.error(f"Ollama tools research failed: {e}")
404
  raise e
405
 
406
- async def _research_with_gemini_tools(self, query: str) -> Dict[str, Any]:
407
- """Research using Gemini with tools"""
408
  try:
409
  # Step 1: Analyze query and suggest tools using Gemini
 
 
 
 
 
 
 
 
 
 
410
  tool_analysis_prompt = f"""Based on this Web3/cryptocurrency research query, identify the most relevant tools to use.
411
 
412
- Query: "{query}"
413
 
414
  Available tools (prioritized by functionality):
415
  - cryptocompare_data: Real-time cryptocurrency prices, market data, and trading info (PREFERRED for price data)
@@ -513,7 +557,7 @@ Respond with only the tool names, comma-separated (no explanations)."""
513
  try:
514
  final_response = await asyncio.wait_for(
515
  self.llm.ainvoke(final_prompt),
516
- timeout=30
517
  )
518
  logger.info(f"🎯 Gemini final response preview: {str(final_response)[:300]}...")
519
 
@@ -539,8 +583,28 @@ Respond with only the tool names, comma-separated (no explanations)."""
539
  final_response = clean_response
540
 
541
  except asyncio.TimeoutError:
542
- logger.warning("⏱️ Gemini final response timed out, using tool data directly")
543
- final_response = f"## Web3 Research Analysis\n\n{context[:1500]}\n\n*Analysis completed using available tools - Gemini response timed out*"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
 
545
  logger.info("βœ… Research successful with Gemini + tools")
546
 
@@ -581,3 +645,21 @@ Respond with only the tool names, comma-separated (no explanations)."""
581
  if "CryptoCompare" in response or "cryptocompare" in response.lower():
582
  sources.append("CryptoCompare")
583
  return sources
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  from src.tools.cryptocompare_tool import CryptoCompareTool
11
  from src.tools.etherscan_tool import EtherscanTool
12
  from src.tools.chart_data_tool import ChartDataTool
13
+ from src.agent.memory_manager import MemoryManager
14
  from src.utils.config import config
15
  from src.utils.logger import get_logger
16
  from src.utils.ai_safety import ai_safety
 
31
  self.tools = []
32
  self.enabled = False
33
  self.gemini_available = False
34
+ self.memory_manager = MemoryManager(window_size=10)
35
 
36
  try:
37
  # Always initialize Ollama
 
45
 
46
  self.tools = self._initialize_tools()
47
  self.enabled = True
48
+ logger.info("🧠 Memory Manager initialized with conversation tracking")
49
 
50
  except Exception as e:
51
  logger.error(f"Agent initialization failed: {e}")
52
  self.enabled = False
53
 
54
  def _init_ollama(self):
55
+ """Initialize Ollama LLM with optimized settings"""
56
  try:
57
  self.fallback_llm = Ollama(
58
  model=config.OLLAMA_MODEL,
59
  base_url=config.OLLAMA_BASE_URL,
60
  temperature=0.1
61
  )
62
+ logger.info(f"βœ… Ollama initialized - Model: {config.OLLAMA_MODEL} (timeout optimized)")
63
  except Exception as e:
64
  logger.error(f"Ollama initialization failed: {e}")
65
  raise
 
172
  "metadata": {"timestamp": datetime.now().isoformat()}
173
  }
174
 
175
+ # Get conversation context from memory
176
+ memory_context = self.memory_manager.get_relevant_context(sanitized_query)
177
+ logger.info(f"🧠 Retrieved memory context: {len(memory_context.get('cached_context', []))} relevant items")
178
+
179
  try:
180
  # Choose LLM based on user preference and availability
181
  if use_gemini and self.gemini_available:
182
+ logger.info("πŸ€– Processing with Gemini + Tools (Safety Enhanced + Memory)")
183
+ result = await self._research_with_gemini_tools(sanitized_query, memory_context)
184
  else:
185
+ logger.info("πŸ€– Processing with Ollama + Tools (Safety Enhanced + Memory)")
186
+ result = await self._research_with_ollama_tools(sanitized_query, memory_context)
187
+
188
+ # Save successful interaction to memory
189
+ if result.get("success"):
190
+ metadata = {
191
+ "llm_used": result.get("metadata", {}).get("llm_used", "unknown"),
192
+ "tools_used": result.get("metadata", {}).get("tools_used", []),
193
+ "timestamp": datetime.now().isoformat(),
194
+ "sources": result.get("sources", [])
195
+ }
196
+ self.memory_manager.add_interaction(query, result["result"], metadata)
197
+ logger.info("🧠 Interaction saved to memory")
198
+
199
+ return result
200
 
201
  except Exception as e:
202
  logger.error(f"Research failed: {e}")
 
239
  "metadata": {"timestamp": datetime.now().isoformat()}
240
  }
241
 
242
+ async def _research_with_ollama_tools(self, query: str, memory_context: Dict[str, Any] = None) -> Dict[str, Any]:
243
+ """Research using Ollama with manual tool calling - Enhanced with memory"""
244
  try:
245
  # Step 1: Analyze query to determine which tools to use
246
+ # Include memory context in analysis if available
247
+ context_note = ""
248
+ if memory_context and memory_context.get("cached_context"):
249
+ context_note = f"\n\nPrevious context: {len(memory_context['cached_context'])} related queries found"
250
+
251
+ tool_analysis_prompt = f"""Which tools for this query: "{query}"{context_note}
 
 
252
 
253
+ Tools:
254
+ - cryptocompare_data: crypto prices (PREFERRED)
255
+ - etherscan_data: Ethereum data (PREFERRED for ETH)
256
+ - defillama_data: DeFi TVL data
257
+ - chart_data_provider: charts/visualizations
258
 
 
 
259
  Examples:
260
  - "Bitcoin price" β†’ cryptocompare_data, chart_data_provider
261
  - "DeFi TVL" β†’ defillama_data, chart_data_provider
262
  - "Ethereum gas" β†’ etherscan_data
263
 
264
+ List tool names only:"""
265
 
266
  tool_response = await self.fallback_llm.ainvoke(tool_analysis_prompt)
267
  logger.info(f"🧠 Ollama tool analysis response: {str(tool_response)[:500]}...")
 
357
  try:
358
  final_response = await asyncio.wait_for(
359
  self.fallback_llm.ainvoke(final_prompt),
360
+ timeout=90 # 90 second timeout for Llama 3.1 8B model
361
  )
362
  logger.info(f"🎯 Ollama final response preview: {str(final_response)[:300]}...")
363
 
 
384
  final_response = clean_response
385
 
386
  except asyncio.TimeoutError:
387
+ logger.warning("⏱️ Ollama final response timed out (60s), using enhanced tool summary")
388
+ # Create a better summary from the tool results
389
+ summary_parts = []
390
+
391
  if "cryptocompare_data" in suggested_tools:
392
+ summary_parts.append("πŸ“Š **Price Data**: Live cryptocurrency prices retrieved")
393
+ if "defillama_data" in suggested_tools:
394
+ summary_parts.append("πŸ”’ **DeFi Data**: Protocol TVL and yield information available")
395
+ if "etherscan_data" in suggested_tools:
396
+ summary_parts.append("⛓️ **Blockchain Data**: Ethereum network information gathered")
397
+ if "chart_data_provider" in suggested_tools:
398
+ summary_parts.append("πŸ“ˆ **Chart Data**: Visualization data prepared")
399
+
400
+ # Extract key data points from tool results
401
+ key_data = ""
402
+ if tool_results:
403
+ for result in tool_results[:2]: # Use first 2 tool results
404
+ if "USD" in result:
405
+ # Extract price info
406
+ lines = result.split('\n')
407
+ for line in lines:
408
+ if "USD" in line and "$" in line:
409
+ key_data += f"\n{line.strip()}"
410
+ break
411
 
412
+ final_response = f"""## {query.title()}
413
+
414
+ {chr(10).join(summary_parts)}
415
 
416
+ **Key Findings**:{key_data}
417
 
418
+ The system successfully executed {len(suggested_tools)} data tools:
419
+ β€’ {', '.join(suggested_tools)}
420
 
421
+ *Complete analysis available - AI processing optimized for speed.*"""
422
 
423
  logger.info("βœ… Research successful with Ollama + tools")
424
  return {
 
437
  logger.error(f"Ollama tools research failed: {e}")
438
  raise e
439
 
440
+ async def _research_with_gemini_tools(self, query: str, memory_context: Dict[str, Any] = None) -> Dict[str, Any]:
441
+ """Research using Gemini with tools - Enhanced with memory"""
442
  try:
443
  # Step 1: Analyze query and suggest tools using Gemini
444
+ # Include memory context if available
445
+ context_info = ""
446
+ if memory_context and memory_context.get("cached_context"):
447
+ recent_tools = []
448
+ for ctx in memory_context["cached_context"][:2]: # Last 2 contexts
449
+ if "tools_used" in ctx:
450
+ recent_tools.extend(ctx["tools_used"])
451
+ if recent_tools:
452
+ context_info = f"\n\nRecent tools used: {', '.join(set(recent_tools))}"
453
+
454
  tool_analysis_prompt = f"""Based on this Web3/cryptocurrency research query, identify the most relevant tools to use.
455
 
456
+ Query: "{query}"{context_info}
457
 
458
  Available tools (prioritized by functionality):
459
  - cryptocompare_data: Real-time cryptocurrency prices, market data, and trading info (PREFERRED for price data)
 
557
  try:
558
  final_response = await asyncio.wait_for(
559
  self.llm.ainvoke(final_prompt),
560
+ timeout=60 # 60 second timeout for complex analysis
561
  )
562
  logger.info(f"🎯 Gemini final response preview: {str(final_response)[:300]}...")
563
 
 
583
  final_response = clean_response
584
 
585
  except asyncio.TimeoutError:
586
+ logger.warning("⏱️ Gemini final response timed out (60s), using enhanced tool summary")
587
+
588
+ # Create enhanced summary from tools
589
+ summary_parts = []
590
+ if "cryptocompare_data" in suggested_tools:
591
+ summary_parts.append("πŸ“Š **Market Data**: Real-time cryptocurrency prices")
592
+ if "defillama_data" in suggested_tools:
593
+ summary_parts.append("πŸ›οΈ **DeFi Analytics**: Protocol TVL and performance metrics")
594
+ if "etherscan_data" in suggested_tools:
595
+ summary_parts.append("⛓️ **On-Chain Data**: Ethereum blockchain insights")
596
+ if "chart_data_provider" in suggested_tools:
597
+ summary_parts.append("πŸ“ˆ **Visualizations**: Chart data prepared")
598
+
599
+ final_response = f"""## Web3 Research Analysis
600
+
601
+ {chr(10).join(summary_parts)}
602
+
603
+ **Data Sources Processed**: {len(suggested_tools)} tools executed successfully
604
+
605
+ {context[:800] if context else 'Tool data processing completed'}
606
+
607
+ *Analysis optimized for real-time delivery*"""
608
 
609
  logger.info("βœ… Research successful with Gemini + tools")
610
 
 
645
  if "CryptoCompare" in response or "cryptocompare" in response.lower():
646
  sources.append("CryptoCompare")
647
  return sources
648
+
649
+ def get_conversation_history(self) -> Dict[str, Any]:
650
+ """Get conversation history from memory"""
651
+ return self.memory_manager.get_relevant_context("")
652
+
653
+ def clear_conversation_memory(self):
654
+ """Clear conversation memory"""
655
+ self.memory_manager.clear_memory()
656
+ logger.info("🧠 Conversation memory cleared")
657
+
658
+ def get_memory_stats(self) -> Dict[str, Any]:
659
+ """Get memory usage statistics"""
660
+ history = self.memory_manager.memory.load_memory_variables({})
661
+ return {
662
+ "total_interactions": len(history.get("chat_history", [])) // 2, # Each interaction has input+output
663
+ "cached_contexts": len(self.memory_manager.context_cache),
664
+ "memory_enabled": True
665
+ }
src/utils/ai_safety.py CHANGED
@@ -165,32 +165,22 @@ class AISafetyGuard:
165
  return cleaned, True, "Response is safe"
166
 
167
  def create_safe_prompt(self, user_query: str, tool_context: str) -> str:
168
- """Create a safety-enhanced prompt for Ollama"""
169
- safety_instructions = """
170
- SAFETY GUIDELINES:
171
- - Provide only factual, helpful information about cryptocurrency and blockchain
172
- - Do not provide advice on market manipulation, illegal activities, or harmful content
173
- - Focus on educational and analytical content
174
- - If asked about unsafe topics, politely decline and redirect to safe alternatives
175
- - Base your response strictly on the provided data
176
-
177
- """
178
 
179
- prompt = f"""{safety_instructions}
180
 
181
- USER QUERY: {user_query}
182
 
183
- CONTEXT DATA:
184
  {tool_context}
185
 
186
- INSTRUCTIONS:
187
- - Answer the user's cryptocurrency question using only the provided context data
188
- - Be professional, accurate, and helpful
189
- - If the data doesn't support a complete answer, acknowledge the limitations
190
- - Provide educational insights where appropriate
191
- - Keep responses focused on legitimate cryptocurrency analysis
192
 
193
- RESPONSE:"""
194
 
195
  return prompt
196
 
 
165
  return cleaned, True, "Response is safe"
166
 
167
  def create_safe_prompt(self, user_query: str, tool_context: str) -> str:
168
+ """Create a safety-enhanced prompt for Ollama - Optimized for speed"""
169
+
170
+ # Truncate context if too long to improve processing speed
171
+ if len(tool_context) > 2000:
172
+ tool_context = tool_context[:2000] + "\n[Context truncated for processing speed]"
 
 
 
 
 
173
 
174
+ prompt = f"""Answer this cryptocurrency question using the data provided:
175
 
176
+ QUESTION: {user_query}
177
 
178
+ DATA:
179
  {tool_context}
180
 
181
+ Provide a helpful, factual response focused on cryptocurrency analysis. Be concise and professional.
 
 
 
 
 
182
 
183
+ ANSWER:"""
184
 
185
  return prompt
186
 
test_chart_tool.py ADDED
File without changes
test_response_clean.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Quick test to verify response cleaning works properly
4
+ """
5
+
6
+ import asyncio
7
+ import sys
8
+ import os
9
+
10
+ # Add src to path
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
12
+
13
+ from src.agent.research_agent import Web3ResearchAgent
14
+
15
+ async def test_response_cleaning():
16
+ """Test that responses are properly cleaned of LangChain metadata"""
17
+ print("πŸ§ͺ Testing response cleaning...")
18
+
19
+ agent = Web3ResearchAgent()
20
+
21
+ if not agent.enabled:
22
+ print("❌ Agent not enabled")
23
+ return False
24
+
25
+ try:
26
+ print("πŸ“Š Testing simple Bitcoin price query...")
27
+ result = await agent.research_query("What is Bitcoin current price?", use_gemini=True)
28
+
29
+ if result['success']:
30
+ response_content = result['result']
31
+ print(f"βœ… Query successful!")
32
+ print(f"πŸ“ˆ Response type: {type(response_content)}")
33
+ print(f"πŸ“„ Response preview: {response_content[:200]}...")
34
+
35
+ # Check if response contains LangChain metadata (bad)
36
+ if "additional_kwargs" in str(response_content) or "response_metadata" in str(response_content):
37
+ print("❌ Response contains LangChain metadata - not properly cleaned")
38
+ return False
39
+ else:
40
+ print("βœ… Response properly cleaned - no LangChain metadata found")
41
+ return True
42
+ else:
43
+ print(f"❌ Query failed: {result.get('error', 'Unknown error')}")
44
+ return False
45
+
46
+ except Exception as e:
47
+ print(f"❌ Test failed with exception: {e}")
48
+ return False
49
+
50
+ async def main():
51
+ success = await test_response_cleaning()
52
+ if success:
53
+ print("\nπŸŽ‰ Response cleaning test passed!")
54
+ return 0
55
+ else:
56
+ print("\n❌ Response cleaning test failed!")
57
+ return 1
58
+
59
+ if __name__ == "__main__":
60
+ exit_code = asyncio.run(main())
61
+ sys.exit(exit_code)
test_tool_selection.py ADDED
File without changes