Spaces:
Runtime error
Runtime error
""" | |
Ollama Setup Helper for Windows (SmoLagents Native) | |
=================================================== | |
This script helps you set up Ollama for free AI analysis using SmoLagents' native Ollama support | |
""" | |
import subprocess | |
import time | |
import requests | |
import os | |
def check_ollama_installed(): | |
"""Check if Ollama is installed""" | |
try: | |
result = subprocess.run(['ollama', '--version'], | |
capture_output=True, text=True, timeout=10) | |
if result.returncode == 0: | |
print(f"β Ollama is installed: {result.stdout.strip()}") | |
return True | |
else: | |
print("β Ollama is not installed or not working properly") | |
return False | |
except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError): | |
print("β Ollama is not installed") | |
return False | |
def check_ollama_running(): | |
"""Check if Ollama server is running""" | |
try: | |
response = requests.get("http://localhost:11434", timeout=5) | |
if response.status_code == 200: | |
print("β Ollama server is running") | |
return True | |
else: | |
print("β οΈ Ollama server is not responding properly") | |
return False | |
except requests.exceptions.RequestException: | |
print("β Ollama server is not running") | |
return False | |
def start_ollama_server(): | |
"""Start Ollama server""" | |
try: | |
print("π Starting Ollama server...") | |
# Start Ollama server in background | |
process = subprocess.Popen(['ollama', 'serve'], | |
stdout=subprocess.PIPE, | |
stderr=subprocess.PIPE) | |
# Wait a bit for server to start | |
time.sleep(3) | |
if check_ollama_running(): | |
print("β Ollama server started successfully") | |
return True | |
else: | |
print("β Failed to start Ollama server") | |
return False | |
except Exception as e: | |
print(f"β Error starting Ollama server: {e}") | |
return False | |
def download_model(model_name="llama2"): | |
"""Download a model for Ollama""" | |
try: | |
print(f"π₯ Downloading {model_name} model (this may take a while)...") | |
result = subprocess.run(['ollama', 'pull', model_name], | |
capture_output=True, text=True, timeout=600) | |
if result.returncode == 0: | |
print(f"β {model_name} model downloaded successfully") | |
return True | |
else: | |
print(f"β Failed to download {model_name} model") | |
print(f"Error: {result.stderr}") | |
return False | |
except subprocess.TimeoutExpired: | |
print(f"β° Download timeout for {model_name} model") | |
return False | |
except Exception as e: | |
print(f"β Error downloading {model_name} model: {e}") | |
return False | |
def list_available_models(): | |
"""List downloaded models""" | |
try: | |
result = subprocess.run(['ollama', 'list'], | |
capture_output=True, text=True, timeout=10) | |
if result.returncode == 0: | |
print("π Available models:") | |
print(result.stdout) | |
return True | |
else: | |
print("β Failed to list models") | |
return False | |
except Exception as e: | |
print(f"β Error listing models: {e}") | |
return False | |
def test_ollama_chat(model_name="llama2"): | |
"""Test Ollama with a simple chat""" | |
try: | |
print(f"π§ͺ Testing {model_name} model...") | |
test_prompt = "Hello, can you help me analyze data? Just say yes or no." | |
result = subprocess.run(['ollama', 'run', model_name, test_prompt], | |
capture_output=True, text=True, timeout=30) | |
if result.returncode == 0: | |
print("β Ollama model test successful!") | |
print(f"Response: {result.stdout.strip()}") | |
return True | |
else: | |
print("β Ollama model test failed") | |
print(f"Error: {result.stderr}") | |
return False | |
except subprocess.TimeoutExpired: | |
print("β° Ollama model test timeout") | |
return False | |
except Exception as e: | |
print(f"β Error testing Ollama model: {e}") | |
return False | |
def setup_ollama(): | |
"""Complete Ollama setup process""" | |
print("π€ OLLAMA SETUP FOR FREE AI ANALYSIS") | |
print("=" * 40) | |
# Step 1: Check installation | |
if not check_ollama_installed(): | |
print("\nπ Installation Instructions:") | |
print("1. Go to https://ollama.ai/") | |
print("2. Download the Windows installer") | |
print("3. Run the installer") | |
print("4. Restart your terminal/command prompt") | |
print("5. Run this script again") | |
return False | |
# Step 2: Start server | |
if not check_ollama_running(): | |
if not start_ollama_server(): | |
print("\nπ§ Manual server start:") | |
print("Open a new terminal and run: ollama serve") | |
return False | |
# Step 3: Download model | |
print(f"\nπ Checking available models...") | |
list_available_models() | |
# Check if llama2 is available | |
result = subprocess.run(['ollama', 'list'], capture_output=True, text=True) | |
if 'llama2' not in result.stdout: | |
print("\nπ₯ Downloading llama2 model...") | |
if not download_model("llama2"): | |
# Try a smaller model if llama2 fails | |
print("π Trying smaller model (phi)...") | |
if not download_model("phi"): | |
print("β Failed to download any model") | |
return False | |
# Step 4: Test the setup | |
print(f"\nπ§ͺ Testing setup...") | |
model_to_test = "llama2" if 'llama2' in result.stdout else "phi" | |
if test_ollama_chat(model_to_test): | |
print("\nπ OLLAMA SETUP COMPLETE!") | |
print("You can now use AI analysis in your upload.py script") | |
return True | |
else: | |
print("β Setup incomplete - model test failed") | |
return False | |
if __name__ == "__main__": | |
success = setup_ollama() | |
if success: | |
print("\nπ Next Steps:") | |
print("1. Run: python upload.py") | |
print("2. Choose option 2 (Enhanced interactive mode)") | |
print("3. Use menu option 4 for AI analysis") | |
print("\nπ‘ Your script is already configured for Ollama!") | |
else: | |
print("\nπ§ Setup incomplete. Please follow the instructions above.") | |