chatCPU / run.sh
SkyNetWalker's picture
Create run.sh
0879be9 verified
raw
history blame
710 Bytes
#!/bin/bash
# Start the Ollama server in the background.
# The 'serve' command starts the Ollama API server.
ollama serve &
# Wait for the Ollama server to become available.
# We loop and check the Ollama API endpoint until it responds.
echo "Waiting for Ollama to start..."
while ! curl -s http://localhost:11434 > /dev/null; do
sleep 1
done
echo "Ollama started."
# Create and register the custom model with Ollama.
# 'gemma-unsloth' is the name you will use to refer to this model in your application.
# It uses the Modelfile we created.
ollama create gemma-unsloth -f Modelfile
# Run the Gradio application.
# The Gradio app will then communicate with the Ollama server running locally.
python app.py