Delete dockerfile
Browse files- dockerfile +0 -32
dockerfile
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
# Use an official Python runtime as the base image
|
2 |
-
FROM python:3.11-slim
|
3 |
-
|
4 |
-
# Set working directory
|
5 |
-
WORKDIR /app
|
6 |
-
|
7 |
-
# Install system dependencies (curl for Ollama installation)
|
8 |
-
RUN apt-get update && apt-get install -y \
|
9 |
-
curl \
|
10 |
-
&& rm -rf /var/lib/apt/lists/*
|
11 |
-
|
12 |
-
# Install Ollama
|
13 |
-
RUN curl -fsSL https://ollama.com/install.sh | sh
|
14 |
-
|
15 |
-
# Copy requirements file first (optimization for caching)
|
16 |
-
COPY requirements.txt .
|
17 |
-
|
18 |
-
# Install Python dependencies
|
19 |
-
RUN pip install --no-cache-dir -r requirements.txt
|
20 |
-
|
21 |
-
# Copy the rest of your application code
|
22 |
-
COPY . .
|
23 |
-
|
24 |
-
# Expose the port Hugging Face Spaces expects
|
25 |
-
EXPOSE 7860
|
26 |
-
|
27 |
-
# Set environment variables
|
28 |
-
ENV OLLAMA_HOST=0.0.0.0
|
29 |
-
ENV OLLAMA_PORT=11434
|
30 |
-
|
31 |
-
# Start Ollama and pull the model at runtime, then run Streamlit
|
32 |
-
CMD bash -c "ollama serve & sleep 10 && ollama pull hf.co/bartowski/Llama-3.2-1B-Instruct-GGUF:Llama-3.2-1B-Instruct-Q8_0.gguf && streamlit run app.py --server.port 7860 --server.address 0.0.0.0"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|