File size: 822 Bytes
0879be9
 
fb249e4
 
 
0879be9
fb249e4
0879be9
 
 
 
 
 
 
 
 
fb249e4
 
 
 
 
 
0879be9
fb249e4
0879be9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
#!/bin/bash

# Exit immediately if a command exits with a non-zero status.
set -e

# Start the Ollama server in the background.
# The OLLAMA_HOST environment variable is already set in the Dockerfile, so it will listen on all interfaces.
ollama serve &

# Wait for the Ollama server to become available.
echo "Waiting for Ollama to start..."
while ! curl -s http://localhost:11434 > /dev/null; do
  sleep 1
done
echo "Ollama started."

# Create the model from the Modelfile. This registers the downloaded GGUF file with Ollama.
# 'gemma-unsloth' is the name we will use to refer to this model in our application.
echo "Creating model 'gemma-unsloth'..."
ollama create gemma-unsloth -f Modelfile
echo "Model created."

# Run the Gradio application.
# It will connect to the Ollama server running on localhost.
python app.py