Spaces:
Runtime error
Runtime error
File size: 6,544 Bytes
c69ba8c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
"""
Ollama Setup Helper for Windows (SmoLagents Native)
===================================================
This script helps you set up Ollama for free AI analysis using SmoLagents' native Ollama support
"""
import subprocess
import time
import requests
import os
def check_ollama_installed():
"""Check if Ollama is installed"""
try:
result = subprocess.run(['ollama', '--version'],
capture_output=True, text=True, timeout=10)
if result.returncode == 0:
print(f"β
Ollama is installed: {result.stdout.strip()}")
return True
else:
print("β Ollama is not installed or not working properly")
return False
except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError):
print("β Ollama is not installed")
return False
def check_ollama_running():
"""Check if Ollama server is running"""
try:
response = requests.get("http://localhost:11434", timeout=5)
if response.status_code == 200:
print("β
Ollama server is running")
return True
else:
print("β οΈ Ollama server is not responding properly")
return False
except requests.exceptions.RequestException:
print("β Ollama server is not running")
return False
def start_ollama_server():
"""Start Ollama server"""
try:
print("π Starting Ollama server...")
# Start Ollama server in background
process = subprocess.Popen(['ollama', 'serve'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait a bit for server to start
time.sleep(3)
if check_ollama_running():
print("β
Ollama server started successfully")
return True
else:
print("β Failed to start Ollama server")
return False
except Exception as e:
print(f"β Error starting Ollama server: {e}")
return False
def download_model(model_name="llama2"):
"""Download a model for Ollama"""
try:
print(f"π₯ Downloading {model_name} model (this may take a while)...")
result = subprocess.run(['ollama', 'pull', model_name],
capture_output=True, text=True, timeout=600)
if result.returncode == 0:
print(f"β
{model_name} model downloaded successfully")
return True
else:
print(f"β Failed to download {model_name} model")
print(f"Error: {result.stderr}")
return False
except subprocess.TimeoutExpired:
print(f"β° Download timeout for {model_name} model")
return False
except Exception as e:
print(f"β Error downloading {model_name} model: {e}")
return False
def list_available_models():
"""List downloaded models"""
try:
result = subprocess.run(['ollama', 'list'],
capture_output=True, text=True, timeout=10)
if result.returncode == 0:
print("π Available models:")
print(result.stdout)
return True
else:
print("β Failed to list models")
return False
except Exception as e:
print(f"β Error listing models: {e}")
return False
def test_ollama_chat(model_name="llama2"):
"""Test Ollama with a simple chat"""
try:
print(f"π§ͺ Testing {model_name} model...")
test_prompt = "Hello, can you help me analyze data? Just say yes or no."
result = subprocess.run(['ollama', 'run', model_name, test_prompt],
capture_output=True, text=True, timeout=30)
if result.returncode == 0:
print("β
Ollama model test successful!")
print(f"Response: {result.stdout.strip()}")
return True
else:
print("β Ollama model test failed")
print(f"Error: {result.stderr}")
return False
except subprocess.TimeoutExpired:
print("β° Ollama model test timeout")
return False
except Exception as e:
print(f"β Error testing Ollama model: {e}")
return False
def setup_ollama():
"""Complete Ollama setup process"""
print("π€ OLLAMA SETUP FOR FREE AI ANALYSIS")
print("=" * 40)
# Step 1: Check installation
if not check_ollama_installed():
print("\nπ Installation Instructions:")
print("1. Go to https://ollama.ai/")
print("2. Download the Windows installer")
print("3. Run the installer")
print("4. Restart your terminal/command prompt")
print("5. Run this script again")
return False
# Step 2: Start server
if not check_ollama_running():
if not start_ollama_server():
print("\nπ§ Manual server start:")
print("Open a new terminal and run: ollama serve")
return False
# Step 3: Download model
print(f"\nπ Checking available models...")
list_available_models()
# Check if llama2 is available
result = subprocess.run(['ollama', 'list'], capture_output=True, text=True)
if 'llama2' not in result.stdout:
print("\nπ₯ Downloading llama2 model...")
if not download_model("llama2"):
# Try a smaller model if llama2 fails
print("π Trying smaller model (phi)...")
if not download_model("phi"):
print("β Failed to download any model")
return False
# Step 4: Test the setup
print(f"\nπ§ͺ Testing setup...")
model_to_test = "llama2" if 'llama2' in result.stdout else "phi"
if test_ollama_chat(model_to_test):
print("\nπ OLLAMA SETUP COMPLETE!")
print("You can now use AI analysis in your upload.py script")
return True
else:
print("β Setup incomplete - model test failed")
return False
if __name__ == "__main__":
success = setup_ollama()
if success:
print("\nπ Next Steps:")
print("1. Run: python upload.py")
print("2. Choose option 2 (Enhanced interactive mode)")
print("3. Use menu option 4 for AI analysis")
print("\nπ‘ Your script is already configured for Ollama!")
else:
print("\nπ§ Setup incomplete. Please follow the instructions above.")
|