File size: 3,003 Bytes
c02fe07
f104fee
 
c02fe07
 
 
 
 
f82980e
06a2cb8
c02fe07
 
ea46ec8
f104fee
 
c02fe07
ea46ec8
 
c02fe07
 
 
ea46ec8
f104fee
c02fe07
 
f104fee
c02fe07
 
 
ea46ec8
 
f104fee
 
f82980e
 
 
 
f104fee
c02fe07
 
 
 
 
06a2cb8
c02fe07
 
06a2cb8
69f1893
 
 
06a2cb8
 
f82980e
06a2cb8
 
 
 
 
 
c02fe07
 
 
 
 
 
 
 
 
 
 
 
 
06a2cb8
 
 
 
c02fe07
 
 
 
fb7643a
 
c02fe07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ea46ec8
c02fe07
 
ea46ec8
c02fe07
 
ea46ec8
f104fee
c02fe07
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# Multi-stage Dockerfile for HuggingFace Spaces with Ollama
FROM python:3.11-slim

# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV DEBIAN_FRONTEND=noninteractive
ENV OLLAMA_HOST=0.0.0.0
ENV OLLAMA_PORT=11434
ENV OLLAMA_HOME=/app/.ollama
ENV HOME=/app
ENV PYTHONPATH=/app

# Set working directory
WORKDIR /app

# Install system dependencies including Ollama requirements
RUN apt-get update && apt-get install -y \
    curl \
    wget \
    build-essential \
    git \
    && rm -rf /var/lib/apt/lists/*

# Install Ollama
RUN curl -fsSL https://ollama.ai/install.sh | sh

# Copy requirements first for better Docker caching
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

# Copy application code
COPY . .

# Create necessary directories including Ollama data directory
RUN mkdir -p logs cache templates static $OLLAMA_HOME \
    && chown -R 1000:1000 /app \
    && chmod -R 755 /app

# Expose ports for both app and Ollama
EXPOSE 7860 11434

# Create startup script
RUN echo '#!/bin/bash\n\
set -e\n\
echo "πŸš€ Starting HuggingFace Spaces Web3 Research Co-Pilot..."\n\
\n\
# Create Ollama data directory with proper permissions\n\
echo "πŸ—‚οΈ  Setting up Ollama data directory..."\n\
mkdir -p /app/.ollama\n\
chmod -R 755 /app/.ollama\n\
chown -R $(whoami):$(whoami) /app/.ollama 2>/dev/null || true\n\
echo "Directory created: $(ls -la /app/.ollama)"\n\
\n\
# Start Ollama server with explicit home directory\n\
echo "πŸ“¦ Starting Ollama server with data directory /app/.ollama..."\n\
export HOME=/app\n\
export OLLAMA_HOME=/app/.ollama\n\
cd /app\n\
ollama serve &\n\
OLLAMA_PID=$!\n\
\n\
# Wait for Ollama to be ready\n\
echo "⏳ Waiting for Ollama to be ready..."\n\
while ! curl -s http://localhost:11434/api/tags > /dev/null; do\n\
  sleep 2\n\
  echo "   ... still waiting for Ollama"\n\
done\n\
\n\
echo "βœ… Ollama server is ready!"\n\
\n\
# Pull the Llama 3.1 8B model\n\
echo "πŸ“₯ Pulling llama3.1:8b model (this may take a few minutes)..."\n\
export HOME=/app\n\
export OLLAMA_HOME=/app/.ollama\n\
cd /app\n\
ollama pull llama3.1:8b\n\
echo "βœ… Model llama3.1:8b ready!"\n\
\n\
# Start the main application\n\
echo "🌐 Starting Web3 Research Co-Pilot web application..."\n\
echo "πŸ” Running startup validation..."\n\
python validate_startup.py || exit 1\n\
python app.py &\n\
APP_PID=$!\n\
\n\
# Function to handle shutdown\n\
cleanup() {\n\
    echo "πŸ›‘ Shutting down gracefully..."\n\
    kill $APP_PID $OLLAMA_PID 2>/dev/null || true\n\
    wait $APP_PID $OLLAMA_PID 2>/dev/null || true\n\
    echo "βœ… Shutdown complete"\n\
}\n\
\n\
# Set up signal handlers\n\
trap cleanup SIGTERM SIGINT\n\
\n\
# Wait for processes\n\
wait $APP_PID $OLLAMA_PID' > start.sh

# Make startup script executable
RUN chmod +x start.sh

# Health check with longer startup time for model download
HEALTHCHECK --interval=30s --timeout=10s --start-period=600s --retries=3 \
    CMD curl -f http://localhost:7860/health || exit 1

# Start command
CMD ["./start.sh"]