File size: 1,326 Bytes
9fbe5ee
 
7149301
 
 
 
 
 
 
 
 
 
 
 
 
9fbe5ee
7149301
 
 
 
 
9fbe5ee
7149301
 
 
 
 
 
 
 
 
9fbe5ee
7149301
 
 
 
 
 
 
 
 
9fbe5ee
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# Use an official Python runtime matching Hugging Face's environment
FROM python:3.10-slim

# Set working directory
WORKDIR /app

# Install system dependencies (curl for Ollama, git for pip)
RUN apt-get update && apt-get install -y \
    curl \
    git \
    && rm -rf /var/lib/apt/lists/*

# Install Ollama
RUN curl -fsSL https://ollama.com/install.sh | sh

# Pre-pull llama3 during build to avoid runtime delays
RUN ollama serve & \
    until curl -s http://localhost:11434 > /dev/null; do \
        echo 'Waiting for Ollama...'; sleep 1; \
    done && \
    ollama pull llama3 && \
    echo "Model pulled successfully" || echo "Model pull failed" && \
    ollama list > /app/models.txt && \
    cat /app/models.txt

# Copy requirements file first (optimization for caching)
COPY requirements.txt .

# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt

# Copy only the app file
COPY app.py .

# Expose the port Hugging Face Spaces expects
EXPOSE 7860

# Set environment variables for Ollama
ENV OLLAMA_HOST=0.0.0.0
ENV OLLAMA_PORT=11434

# Start Ollama and Streamlit with a more robust wait
CMD bash -c "ollama serve & sleep 5 && until curl -s http://localhost:11434 > /dev/null; do echo 'Waiting for Ollama...'; sleep 1; done && streamlit run app.py --server.port 7860 --server.address 0.0.0.0"