File size: 2,804 Bytes
6ec19f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
version: '3.8'

services:
  theoremexplain:
    build:
      context: .
      dockerfile: dockerfile
    container_name: theoremexplain-agent
    ports:
      - "7860:7860"
    volumes:
      # Mount output directory to persist generated videos
      - ./output:/app/output
      # Mount models directory if you want to use local models
      - ./models:/app/models
      # Mount data directory for RAG and datasets
      - ./data:/app/data
    environment:
      # Copy environment variables from host .env file
      - OPENAI_API_KEY=${OPENAI_API_KEY}
      - AZURE_API_KEY=${AZURE_API_KEY}
      - AZURE_API_BASE=${AZURE_API_BASE}
      - AZURE_API_VERSION=${AZURE_API_VERSION}
      - VERTEXAI_PROJECT=${VERTEXAI_PROJECT}
      - VERTEXAI_LOCATION=${VERTEXAI_LOCATION}
      - GOOGLE_APPLICATION_CREDENTIALS=${GOOGLE_APPLICATION_CREDENTIALS}
      - GEMINI_API_KEY=${GEMINI_API_KEY}
      # Kokoro TTS settings
      - KOKORO_MODEL_PATH=models/kokoro-v0_19.onnx
      - KOKORO_VOICES_PATH=models/voices.bin
      - KOKORO_DEFAULT_VOICE=af
      - KOKORO_DEFAULT_SPEED=1.0
      - KOKORO_DEFAULT_LANG=en-us
      # Python path
      - PYTHONPATH=/app:$PYTHONPATH
    restart: unless-stopped
    healthcheck:
      test: ["CMD", "conda", "run", "-n", "tea", "python", "-c", "import src; import manim; print('Health check passed')"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 60s

  # Optional: Add a service for running batch generation
  theoremexplain-batch:
    build:
      context: .
      dockerfile: dockerfile
    container_name: theoremexplain-batch
    profiles:
      - batch
    volumes:
      - ./output:/app/output
      - ./models:/app/models
      - ./data:/app/data
    environment:
      # Same environment variables as main service
      - OPENAI_API_KEY=${OPENAI_API_KEY}
      - AZURE_API_KEY=${AZURE_API_KEY}
      - AZURE_API_BASE=${AZURE_API_BASE}
      - AZURE_API_VERSION=${AZURE_API_VERSION}
      - VERTEXAI_PROJECT=${VERTEXAI_PROJECT}
      - VERTEXAI_LOCATION=${VERTEXAI_LOCATION}
      - GOOGLE_APPLICATION_CREDENTIALS=${GOOGLE_APPLICATION_CREDENTIALS}
      - GEMINI_API_KEY=${GEMINI_API_KEY}
      - KOKORO_MODEL_PATH=models/kokoro-v0_19.onnx
      - KOKORO_VOICES_PATH=models/voices.bin
      - KOKORO_DEFAULT_VOICE=af
      - KOKORO_DEFAULT_SPEED=1.0
      - KOKORO_DEFAULT_LANG=en-us
      - PYTHONPATH=/app:$PYTHONPATH
    command: >
      conda run --no-capture-output -n tea python generate_video.py
      --model "openai/gpt-4o-mini"
      --helper_model "openai/gpt-4o-mini"
      --output_dir "output/batch_generation"
      --theorems_path "data/thb_easy/math.json"
      --max_scene_concurrency 3
      --max_topic_concurrency 5
    restart: no