#!/bin/bash # Start Ollama in the background ollama serve & # Wait for Ollama to initialize echo "Waiting for Ollama to start..." sleep 5 # Pull required models echo "Pulling Ollama models..." ollama pull llama2 #ollama pull llama3.1 #ollama pull llama3.2 #ollama pull mistral ollama list curl -X POST http://localhost:11434/api/generate \ -H "Content-Type: application/json" \ -d '{ "model": "llama2", "prompt": "Hello, world!" }' # Start supervisord to manage processes echo "Starting supervisord..." exec /usr/bin/supervisord -c /etc/supervisor/conf.d/supervisord.conf