Spaces:
Running
Running
# Set environment variables for the ollama server | |
export OLLAMA_HOST=0.0.0.0 | |
export OLLAMA_ORIGINS=https://projects.blender.org | |
# Start the Ollama service in the background | |
ollama serve & | |
# Wait for the service to initialize | |
sleep 10 | |
# Download the required file | |
curl -fsSL https://huggingface.co/GPT4All-Community/Meta-Llama-3.1-8B-Instruct-128k-GGUF/resolve/main/Meta-Llama-3.1-8B-Instruct-128k-Q4_0.gguf?download=true -o llama.gguf | |
# Create the model using Ollama | |
ollama create llama3.1 -f Modelfile | |
# Keep the container running indefinitely | |
tail -f /dev/null | |