File size: 2,106 Bytes
50a7bf0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
version: '3.8'

services:
  theoremexplain:
    build:
      context: .
      dockerfile: dockerfile
    container_name: theoremexplain-agent
    ports:
      - "7860:7860"
    volumes:
      # Mount output directory to persist generated videos
      - ./output:/app/output
      # Mount models directory if you want to use local models
      - ./models:/app/models
      # Mount data directory for RAG and datasets
      - ./data:/app/data
    environment:
      # Copy environment variables from host .env file
      - OPENAI_API_KEY=${OPENAI_API_KEY}
      - GEMINI_API_KEY=${GEMINI_API_KEY}
      # Kokoro TTS settings
      - KOKORO_MODEL_PATH=models/kokoro-v0_19.onnx
      - KOKORO_VOICES_PATH=models/voices.bin
      - KOKORO_DEFAULT_VOICE=af
      - KOKORO_DEFAULT_SPEED=1.0
      - KOKORO_DEFAULT_LANG=en-us
      # Python path
      - PYTHONPATH=/app:$PYTHONPATH
    restart: unless-stopped
    healthcheck:
      test: ["uv", "run" , "manim" ,"checkhealth"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 60s

  # Optional: Add a service for running batch generation
  theoremexplain-batch:
    build:
      context: .
      dockerfile: dockerfile
    container_name: theoremexplain-batch
    profiles:
      - batch
    volumes:
      - ./output:/app/output
      - ./models:/app/models
      - ./data:/app/data
    environment:
      # Same environment variables as main service
      - OPENAI_API_KEY=${OPENAI_API_KEY}
      - GEMINI_API_KEY=${GEMINI_API_KEY}
      - KOKORO_MODEL_PATH=models/kokoro-v0_19.onnx
      - KOKORO_VOICES_PATH=models/voices.bin
      - KOKORO_DEFAULT_VOICE=af
      - KOKORO_DEFAULT_SPEED=1.0
      - KOKORO_DEFAULT_LANG=en-us
      - PYTHONPATH=/app:$PYTHONPATH
    command: >
      uv run python generate_video.py
      --model "openai/gpt-4o-mini"
      --helper_model "openai/gpt-4o-mini"
      --output_dir "output/batch_generation"
      --theorems_path "data/thb_easy/math.json"
      --max_scene_concurrency 3
      --max_topic_concurrency 5
    restart: no