|
name: Slow tests on main |
|
|
|
on: |
|
push: |
|
branches: |
|
- main |
|
|
|
env: |
|
DIFFUSERS_IS_CI: yes |
|
HF_HOME: /mnt/cache |
|
OMP_NUM_THREADS: 8 |
|
MKL_NUM_THREADS: 8 |
|
PYTEST_TIMEOUT: 600 |
|
RUN_SLOW: yes |
|
|
|
jobs: |
|
run_slow_tests: |
|
strategy: |
|
fail-fast: false |
|
max-parallel: 1 |
|
matrix: |
|
config: |
|
- name: Slow PyTorch CUDA tests on Ubuntu |
|
framework: pytorch |
|
runner: docker-gpu |
|
image: diffusers/diffusers-pytorch-cuda |
|
report: torch_cuda |
|
- name: Slow Flax TPU tests on Ubuntu |
|
framework: flax |
|
runner: docker-tpu |
|
image: diffusers/diffusers-flax-tpu |
|
report: flax_tpu |
|
- name: Slow ONNXRuntime CUDA tests on Ubuntu |
|
framework: onnxruntime |
|
runner: docker-gpu |
|
image: diffusers/diffusers-onnxruntime-cuda |
|
report: onnx_cuda |
|
|
|
name: ${{ matrix.config.name }} |
|
|
|
runs-on: ${{ matrix.config.runner }} |
|
|
|
container: |
|
image: ${{ matrix.config.image }} |
|
options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ ${{ matrix.config.runner == 'docker-tpu' && '--privileged' || '--gpus 0'}} |
|
|
|
defaults: |
|
run: |
|
shell: bash |
|
|
|
steps: |
|
- name: Checkout diffusers |
|
uses: actions/checkout@v3 |
|
with: |
|
fetch-depth: 2 |
|
|
|
- name: NVIDIA-SMI |
|
if : ${{ matrix.config.runner == 'docker-gpu' }} |
|
run: | |
|
nvidia-smi |
|
|
|
- name: Install dependencies |
|
run: | |
|
apt-get update && apt-get install libsndfile1-dev libgl1 -y |
|
python -m pip install -e .[quality,test] |
|
|
|
- name: Environment |
|
run: | |
|
python utils/print_env.py |
|
|
|
- name: Run slow PyTorch CUDA tests |
|
if: ${{ matrix.config.framework == 'pytorch' }} |
|
env: |
|
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} |
|
|
|
CUBLAS_WORKSPACE_CONFIG: :16:8 |
|
|
|
run: | |
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ |
|
-s -v -k "not Flax and not Onnx" \ |
|
--make-reports=tests_${{ matrix.config.report }} \ |
|
tests/ |
|
|
|
- name: Run slow Flax TPU tests |
|
if: ${{ matrix.config.framework == 'flax' }} |
|
env: |
|
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} |
|
run: | |
|
python -m pytest -n 0 \ |
|
-s -v -k "Flax" \ |
|
--make-reports=tests_${{ matrix.config.report }} \ |
|
tests/ |
|
|
|
- name: Run slow ONNXRuntime CUDA tests |
|
if: ${{ matrix.config.framework == 'onnxruntime' }} |
|
env: |
|
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} |
|
run: | |
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \ |
|
-s -v -k "Onnx" \ |
|
--make-reports=tests_${{ matrix.config.report }} \ |
|
tests/ |
|
|
|
- name: Failure short reports |
|
if: ${{ failure() }} |
|
run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt |
|
|
|
- name: Test suite reports artifacts |
|
if: ${{ always() }} |
|
uses: actions/upload-artifact@v2 |
|
with: |
|
name: ${{ matrix.config.report }}_test_reports |
|
path: reports |
|
|
|
run_examples_tests: |
|
name: Examples PyTorch CUDA tests on Ubuntu |
|
|
|
runs-on: docker-gpu |
|
|
|
container: |
|
image: diffusers/diffusers-pytorch-cuda |
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ |
|
|
|
steps: |
|
- name: Checkout diffusers |
|
uses: actions/checkout@v3 |
|
with: |
|
fetch-depth: 2 |
|
|
|
- name: NVIDIA-SMI |
|
run: | |
|
nvidia-smi |
|
|
|
- name: Install dependencies |
|
run: | |
|
python -m pip install -e .[quality,test,training] |
|
|
|
- name: Environment |
|
run: | |
|
python utils/print_env.py |
|
|
|
- name: Run example tests on GPU |
|
env: |
|
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }} |
|
run: | |
|
python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/ |
|
|
|
- name: Failure short reports |
|
if: ${{ failure() }} |
|
run: cat reports/examples_torch_cuda_failures_short.txt |
|
|
|
- name: Test suite reports artifacts |
|
if: ${{ always() }} |
|
uses: actions/upload-artifact@v2 |
|
with: |
|
name: examples_test_reports |
|
path: reports |
|
|