Spaces:
Sleeping
Sleeping
version: '3.8' | |
services: | |
ollama: | |
image: ollama/ollama:0.9.2 | |
container_name: ollama-server | |
restart: unless-stopped | |
ports: | |
- "11434:11434" | |
volumes: | |
- ollama_data:/root/.ollama | |
environment: | |
- OLLAMA_HOST=0.0.0.0:11434 | |
- OLLAMA_ORIGINS=* | |
healthcheck: | |
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"] | |
interval: 30s | |
timeout: 10s | |
retries: 3 | |
start_period: 30s | |
networks: | |
- fashion-analyzer | |
fastapi: | |
build: | |
context: . | |
dockerfile: Dockerfile.fastapi | |
container_name: fashion-analyzer-api | |
restart: unless-stopped | |
ports: | |
- "7860:7860" | |
environment: | |
- OLLAMA_BASE_URL=http://ollama:11434 | |
depends_on: | |
ollama: | |
condition: service_healthy | |
networks: | |
- fashion-analyzer | |
volumes: | |
- ./logs:/app/logs | |
model-loader: | |
image: ollama/ollama:0.9.2 | |
container_name: model-loader | |
restart: "no" | |
environment: | |
- OLLAMA_HOST=http://ollama:11434 | |
depends_on: | |
ollama: | |
condition: service_healthy | |
networks: | |
- fashion-analyzer | |
command: > | |
sh -c " | |
echo 'Waiting for Ollama server to be ready...' && | |
sleep 10 && | |
echo 'Pulling LLaVA model for vision analysis...' && | |
ollama pull llava:7b && | |
echo 'Model pulled successfully!' | |
" | |
volumes: | |
ollama_data: | |
driver: local | |
networks: | |
fashion-analyzer: | |
driver: bridge | |