File size: 2,829 Bytes
c02fe07
f104fee
 
c02fe07
 
 
 
 
f82980e
c02fe07
 
ea46ec8
f104fee
 
c02fe07
ea46ec8
 
c02fe07
 
 
ea46ec8
f104fee
c02fe07
 
f104fee
c02fe07
 
 
ea46ec8
 
f104fee
 
f82980e
 
 
 
f104fee
c02fe07
 
 
 
 
 
 
69f1893
f82980e
 
 
 
69f1893
 
 
 
 
f82980e
69f1893
c02fe07
69f1893
c02fe07
 
 
 
 
 
 
 
 
 
 
 
 
69f1893
c02fe07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ea46ec8
c02fe07
 
ea46ec8
c02fe07
 
ea46ec8
f104fee
c02fe07
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
# Multi-stage Dockerfile for HuggingFace Spaces with Ollama
FROM python:3.11-slim

# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV DEBIAN_FRONTEND=noninteractive
ENV OLLAMA_HOST=0.0.0.0
ENV OLLAMA_PORT=11434
ENV OLLAMA_HOME=/app/.ollama
ENV PYTHONPATH=/app

# Set working directory
WORKDIR /app

# Install system dependencies including Ollama requirements
RUN apt-get update && apt-get install -y \
    curl \
    wget \
    build-essential \
    git \
    && rm -rf /var/lib/apt/lists/*

# Install Ollama
RUN curl -fsSL https://ollama.ai/install.sh | sh

# Copy requirements first for better Docker caching
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

# Copy application code
COPY . .

# Create necessary directories including Ollama data directory
RUN mkdir -p logs cache templates static $OLLAMA_HOME \
    && chown -R 1000:1000 /app \
    && chmod -R 755 /app

# Expose ports for both app and Ollama
EXPOSE 7860 11434

# Create startup script
RUN echo '#!/bin/bash\n\
echo "πŸš€ Starting HuggingFace Spaces Web3 Research Co-Pilot..."\n\
\n\
# Set Ollama environment variables\n\
export OLLAMA_HOME=/app/.ollama\n\
export OLLAMA_HOST=0.0.0.0\n\
export OLLAMA_PORT=11434\n\
\n\
# Create and set permissions for Ollama directory\n\
echo "πŸ—‚οΈ  Setting up Ollama data directory..."\n\
mkdir -p /app/.ollama\n\
chmod -R 755 /app/.ollama\n\
ls -la /app/.ollama\n\
\n\
# Start Ollama server in background with explicit data directory\n\
echo "πŸ“¦ Starting Ollama server..."\n\
OLLAMA_HOME=/app/.ollama ollama serve &\n\
OLLAMA_PID=$!\n\
\n\
# Wait for Ollama to be ready\n\
echo "⏳ Waiting for Ollama to be ready..."\n\
while ! curl -s http://localhost:11434/api/tags > /dev/null; do\n\
  sleep 2\n\
  echo "   ... still waiting for Ollama"\n\
done\n\
\n\
echo "βœ… Ollama server is ready!"\n\
\n\
# Pull the Llama 3.1 8B model\n\
echo "πŸ“₯ Pulling llama3.1:8b model (this may take a few minutes)..."\n\
OLLAMA_HOME=/app/.ollama ollama pull llama3.1:8b\n\
echo "βœ… Model llama3.1:8b ready!"\n\
\n\
# Start the main application\n\
echo "🌐 Starting Web3 Research Co-Pilot web application..."\n\
python app.py &\n\
APP_PID=$!\n\
\n\
# Function to handle shutdown\n\
cleanup() {\n\
    echo "πŸ›‘ Shutting down gracefully..."\n\
    kill $APP_PID $OLLAMA_PID 2>/dev/null || true\n\
    wait $APP_PID $OLLAMA_PID 2>/dev/null || true\n\
    echo "βœ… Shutdown complete"\n\
}\n\
\n\
# Set up signal handlers\n\
trap cleanup SIGTERM SIGINT\n\
\n\
# Wait for processes\n\
wait $APP_PID $OLLAMA_PID' > start.sh

# Make startup script executable
RUN chmod +x start.sh

# Health check with longer startup time for model download
HEALTHCHECK --interval=30s --timeout=10s --start-period=600s --retries=3 \
    CMD curl -f http://localhost:7860/health || exit 1

# Start command
CMD ["./start.sh"]