Spaces:
Running
Running
Update run.sh
Browse files
run.sh
CHANGED
@@ -1,17 +1,25 @@
|
|
1 |
#!/bin/bash
|
2 |
|
|
|
|
|
|
|
3 |
# Start the Ollama server in the background.
|
4 |
-
# The
|
5 |
ollama serve &
|
6 |
|
7 |
# Wait for the Ollama server to become available.
|
8 |
-
# We loop and check the Ollama API endpoint until it responds.
|
9 |
echo "Waiting for Ollama to start..."
|
10 |
while ! curl -s http://localhost:11434 > /dev/null; do
|
11 |
sleep 1
|
12 |
done
|
13 |
echo "Ollama started."
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# Run the Gradio application.
|
16 |
-
#
|
17 |
python app.py
|
|
|
1 |
#!/bin/bash
|
2 |
|
3 |
+
# Exit immediately if a command exits with a non-zero status.
|
4 |
+
set -e
|
5 |
+
|
6 |
# Start the Ollama server in the background.
|
7 |
+
# The OLLAMA_HOST environment variable is already set in the Dockerfile, so it will listen on all interfaces.
|
8 |
ollama serve &
|
9 |
|
10 |
# Wait for the Ollama server to become available.
|
|
|
11 |
echo "Waiting for Ollama to start..."
|
12 |
while ! curl -s http://localhost:11434 > /dev/null; do
|
13 |
sleep 1
|
14 |
done
|
15 |
echo "Ollama started."
|
16 |
|
17 |
+
# Create the model from the Modelfile. This registers the downloaded GGUF file with Ollama.
|
18 |
+
# 'gemma-unsloth' is the name we will use to refer to this model in our application.
|
19 |
+
echo "Creating model 'gemma-unsloth'..."
|
20 |
+
ollama create gemma-unsloth -f Modelfile
|
21 |
+
echo "Model created."
|
22 |
+
|
23 |
# Run the Gradio application.
|
24 |
+
# It will connect to the Ollama server running on localhost.
|
25 |
python app.py
|