Spaces:
Running
Running
Create Dockerfile
Browse files- Dockerfile +45 -0
Dockerfile
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use a base Ubuntu image
|
2 |
+
FROM ubuntu:22.04
|
3 |
+
|
4 |
+
# Set environment variables to prevent interactive prompts during apt operations
|
5 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
6 |
+
|
7 |
+
# Install system dependencies: curl for Ollama, wget for model download, git (optional, but good for debugging), python3 and pip for the application
|
8 |
+
RUN apt update && apt install -y curl wget git python3 python3-pip
|
9 |
+
|
10 |
+
# Install Ollama using its official installation script
|
11 |
+
# This script automatically detects the system architecture and installs the correct binary.
|
12 |
+
RUN curl -fsSL https://ollama.com/install.sh | sh
|
13 |
+
|
14 |
+
# Set the working directory inside the container
|
15 |
+
WORKDIR /app
|
16 |
+
|
17 |
+
# Define environment variables for the model repository and filename
|
18 |
+
# This makes it easy to change the model later without editing the RUN command directly.
|
19 |
+
ENV MODEL_REPO="unsloth/gemma-3-4b-it-qat-GGUF"
|
20 |
+
ENV MODEL_FILENAME="gemma-3-4b-it-qat.Q4_K_M.gguf"
|
21 |
+
|
22 |
+
# Download the specific GGUF model file directly from Hugging Face Hub.
|
23 |
+
# We use 'wget -O' to save the file with the desired filename in the current directory.
|
24 |
+
# The 'resolve/main' path ensures we get the raw file content.
|
25 |
+
RUN wget -O ${MODEL_FILENAME} https://huggingface.co/${MODEL_REPO}/resolve/main/${MODEL_FILENAME}
|
26 |
+
|
27 |
+
# Copy the Modelfile, application script, Python requirements, and the startup script into the container
|
28 |
+
COPY Modelfile .
|
29 |
+
COPY app.py .
|
30 |
+
COPY requirements.txt .
|
31 |
+
COPY run.sh .
|
32 |
+
|
33 |
+
# Install Python dependencies required by your Gradio application
|
34 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
35 |
+
|
36 |
+
# Make the startup script executable
|
37 |
+
RUN chmod +x run.sh
|
38 |
+
|
39 |
+
# Expose the port that your Gradio application will listen on.
|
40 |
+
# Hugging Face Spaces typically use port 7860 for Gradio apps.
|
41 |
+
EXPOSE 7860
|
42 |
+
|
43 |
+
# Set the entrypoint for the container to execute our startup script.
|
44 |
+
# This script will start Ollama and then your application.
|
45 |
+
CMD ["./run.sh"]
|