Spaces:
Running
Running
File size: 6,533 Bytes
c02fe07 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
#!/usr/bin/env python3
"""
Complete pipeline test for Web3 Research Agent with Ollama fallback
Tests the entire flow: API calls β LLM processing β Response generation
"""
import asyncio
import sys
import os
sys.path.append('.')
async def test_complete_pipeline():
print("π§ͺ Testing Complete Web3 Research Pipeline with Ollama Fallback")
print("=" * 60)
# Test 1: Initialize the research agent
print("\n1οΈβ£ Testing Research Agent Initialization...")
try:
from src.agent.research_agent import Web3ResearchAgent
agent = Web3ResearchAgent()
if agent.enabled:
print(f"β
Primary LLM (Gemini) initialized successfully")
else:
print("β οΈ Primary LLM failed, will test Ollama fallback")
print(f"β
Agent initialized with {len(agent.tools)} tools")
for tool in agent.tools:
print(f" - {tool.name}")
except Exception as e:
print(f"β Agent initialization failed: {e}")
return False
# Test 2: Test Ollama connection
print("\n2οΈβ£ Testing Ollama Connection...")
try:
import requests
response = requests.get("http://localhost:11434/api/tags", timeout=5)
if response.status_code == 200:
models = response.json().get("models", [])
print(f"β
Ollama connected. Available models: {[m['name'] for m in models]}")
# Test direct Ollama inference
test_response = requests.post(
"http://localhost:11434/api/generate",
json={
"model": "llama3.1:8b",
"prompt": "What is DeFi in one sentence?",
"stream": False
},
timeout=30
)
if test_response.status_code == 200:
result = test_response.json()
print(f"β
Ollama inference test: {result['response'][:100]}...")
else:
print(f"β Ollama inference failed: {test_response.status_code}")
else:
print(f"β Ollama connection failed: {response.status_code}")
except Exception as e:
print(f"β Ollama test failed: {e}")
# Test 3: Test API integrations
print("\n3οΈβ£ Testing API Integrations...")
# Test DeFiLlama
try:
from src.tools.defillama_tool import DeFiLlamaTool
defillama = DeFiLlamaTool()
result = await defillama._arun("top 3 defi protocols")
if result and "β οΈ" not in result:
print(f"β
DeFiLlama API: {result[:80]}...")
else:
print(f"β οΈ DeFiLlama API: {result[:80]}...")
except Exception as e:
print(f"β DeFiLlama test failed: {e}")
# Test CoinGecko
try:
from src.tools.coingecko_tool import CoinGeckoTool
coingecko = CoinGeckoTool()
result = await coingecko._arun("bitcoin price")
if result and "β οΈ" not in result:
print(f"β
CoinGecko API: {result[:80]}...")
else:
print(f"β οΈ CoinGecko API: {result[:80]}...")
except Exception as e:
print(f"β CoinGecko test failed: {e}")
# Test Chart Data
try:
from src.tools.chart_data_tool import ChartDataTool
chart_tool = ChartDataTool()
result = await chart_tool._arun("price_chart", "bitcoin", "7d")
if result and len(result) > 100:
print(f"β
Chart Data: Generated {len(result)} chars of chart data")
else:
print(f"β οΈ Chart Data: {result[:80]}...")
except Exception as e:
print(f"β Chart Data test failed: {e}")
# Test 4: Test complete research query
print("\n4οΈβ£ Testing Complete Research Query...")
try:
# Force Ollama fallback by setting GEMINI_API_KEY to invalid
original_key = os.environ.get('GEMINI_API_KEY')
os.environ['GEMINI_API_KEY'] = 'invalid_key_for_testing'
# Reinitialize agent to trigger fallback
agent_fallback = Web3ResearchAgent()
if agent_fallback.fallback_llm and agent_fallback.ollama_available:
print("β
Ollama fallback initialized successfully")
# Test with simple query first
simple_result = await agent_fallback.research_query(
"What is Bitcoin? Give a brief answer."
)
if simple_result and simple_result.get('success'):
response_text = simple_result.get('result', simple_result.get('response', 'No response text'))
llm_used = simple_result.get('metadata', {}).get('llm_used', 'Unknown')
print(f"β
Query successful with {llm_used}: {response_text[:100]}...")
# Now test with Web3 data integration
web3_result = await agent_fallback.research_query(
"Get Bitcoin price and explain current market trends"
)
if web3_result and web3_result.get('success'):
web3_response = web3_result.get('result', web3_result.get('response', 'No response text'))
web3_llm = web3_result.get('metadata', {}).get('llm_used', 'Unknown')
print(f"β
Web3 integration with {web3_llm}: {web3_response[:100]}...")
print(f" Sources: {web3_result.get('sources', [])}")
visualizations = web3_result.get('visualizations', web3_result.get('metadata', {}).get('visualizations', []))
print(f" Visualizations: {len(visualizations)}")
else:
print(f"β οΈ Web3 integration: {web3_result}")
else:
print(f"β Query failed: {simple_result}")
else:
print("β Ollama fallback initialization failed")
# Restore original key
if original_key:
os.environ['GEMINI_API_KEY'] = original_key
else:
os.environ.pop('GEMINI_API_KEY', None)
except Exception as e:
print(f"β Complete query test failed: {e}")
import traceback
traceback.print_exc()
print("\n" + "=" * 60)
print("π Pipeline Test Complete!")
return True
if __name__ == "__main__":
asyncio.run(test_complete_pipeline())
|