Singtel_Use_Case1 / setup_ollama.py
cosmoruler
problems fixed
c69ba8c
"""
Ollama Setup Helper for Windows (SmoLagents Native)
===================================================
This script helps you set up Ollama for free AI analysis using SmoLagents' native Ollama support
"""
import subprocess
import time
import requests
import os
def check_ollama_installed():
"""Check if Ollama is installed"""
try:
result = subprocess.run(['ollama', '--version'],
capture_output=True, text=True, timeout=10)
if result.returncode == 0:
print(f"βœ… Ollama is installed: {result.stdout.strip()}")
return True
else:
print("❌ Ollama is not installed or not working properly")
return False
except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError):
print("❌ Ollama is not installed")
return False
def check_ollama_running():
"""Check if Ollama server is running"""
try:
response = requests.get("http://localhost:11434", timeout=5)
if response.status_code == 200:
print("βœ… Ollama server is running")
return True
else:
print("⚠️ Ollama server is not responding properly")
return False
except requests.exceptions.RequestException:
print("❌ Ollama server is not running")
return False
def start_ollama_server():
"""Start Ollama server"""
try:
print("πŸš€ Starting Ollama server...")
# Start Ollama server in background
process = subprocess.Popen(['ollama', 'serve'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait a bit for server to start
time.sleep(3)
if check_ollama_running():
print("βœ… Ollama server started successfully")
return True
else:
print("❌ Failed to start Ollama server")
return False
except Exception as e:
print(f"❌ Error starting Ollama server: {e}")
return False
def download_model(model_name="llama2"):
"""Download a model for Ollama"""
try:
print(f"πŸ“₯ Downloading {model_name} model (this may take a while)...")
result = subprocess.run(['ollama', 'pull', model_name],
capture_output=True, text=True, timeout=600)
if result.returncode == 0:
print(f"βœ… {model_name} model downloaded successfully")
return True
else:
print(f"❌ Failed to download {model_name} model")
print(f"Error: {result.stderr}")
return False
except subprocess.TimeoutExpired:
print(f"⏰ Download timeout for {model_name} model")
return False
except Exception as e:
print(f"❌ Error downloading {model_name} model: {e}")
return False
def list_available_models():
"""List downloaded models"""
try:
result = subprocess.run(['ollama', 'list'],
capture_output=True, text=True, timeout=10)
if result.returncode == 0:
print("πŸ“‹ Available models:")
print(result.stdout)
return True
else:
print("❌ Failed to list models")
return False
except Exception as e:
print(f"❌ Error listing models: {e}")
return False
def test_ollama_chat(model_name="llama2"):
"""Test Ollama with a simple chat"""
try:
print(f"πŸ§ͺ Testing {model_name} model...")
test_prompt = "Hello, can you help me analyze data? Just say yes or no."
result = subprocess.run(['ollama', 'run', model_name, test_prompt],
capture_output=True, text=True, timeout=30)
if result.returncode == 0:
print("βœ… Ollama model test successful!")
print(f"Response: {result.stdout.strip()}")
return True
else:
print("❌ Ollama model test failed")
print(f"Error: {result.stderr}")
return False
except subprocess.TimeoutExpired:
print("⏰ Ollama model test timeout")
return False
except Exception as e:
print(f"❌ Error testing Ollama model: {e}")
return False
def setup_ollama():
"""Complete Ollama setup process"""
print("πŸ€– OLLAMA SETUP FOR FREE AI ANALYSIS")
print("=" * 40)
# Step 1: Check installation
if not check_ollama_installed():
print("\nπŸ“ Installation Instructions:")
print("1. Go to https://ollama.ai/")
print("2. Download the Windows installer")
print("3. Run the installer")
print("4. Restart your terminal/command prompt")
print("5. Run this script again")
return False
# Step 2: Start server
if not check_ollama_running():
if not start_ollama_server():
print("\nπŸ”§ Manual server start:")
print("Open a new terminal and run: ollama serve")
return False
# Step 3: Download model
print(f"\nπŸ“‹ Checking available models...")
list_available_models()
# Check if llama2 is available
result = subprocess.run(['ollama', 'list'], capture_output=True, text=True)
if 'llama2' not in result.stdout:
print("\nπŸ“₯ Downloading llama2 model...")
if not download_model("llama2"):
# Try a smaller model if llama2 fails
print("πŸ”„ Trying smaller model (phi)...")
if not download_model("phi"):
print("❌ Failed to download any model")
return False
# Step 4: Test the setup
print(f"\nπŸ§ͺ Testing setup...")
model_to_test = "llama2" if 'llama2' in result.stdout else "phi"
if test_ollama_chat(model_to_test):
print("\nπŸŽ‰ OLLAMA SETUP COMPLETE!")
print("You can now use AI analysis in your upload.py script")
return True
else:
print("❌ Setup incomplete - model test failed")
return False
if __name__ == "__main__":
success = setup_ollama()
if success:
print("\nπŸš€ Next Steps:")
print("1. Run: python upload.py")
print("2. Choose option 2 (Enhanced interactive mode)")
print("3. Use menu option 4 for AI analysis")
print("\nπŸ’‘ Your script is already configured for Ollama!")
else:
print("\nπŸ”§ Setup incomplete. Please follow the instructions above.")