Merge pull request #115 from OpenAccess-AI-Collective/docker-version-fixes
Browse files- .github/workflows/base.yml +9 -1
- docker/Dockerfile-base +2 -0
.github/workflows/base.yml
CHANGED
|
@@ -16,12 +16,19 @@ jobs:
|
|
| 16 |
include:
|
| 17 |
- cuda: "118"
|
| 18 |
cuda_version: 11.8.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
axolotl_extras:
|
| 20 |
- cuda: "117"
|
| 21 |
cuda_version: 11.7.0
|
|
|
|
| 22 |
pytorch: 1.13.1
|
| 23 |
axolotl_extras:
|
| 24 |
- cuda: "118"
|
|
|
|
| 25 |
cuda_version: 11.8.0
|
| 26 |
pytorch: 2.0.0
|
| 27 |
axolotl_extras: gptq
|
|
@@ -46,12 +53,13 @@ jobs:
|
|
| 46 |
context: .
|
| 47 |
file: ./docker/Dockerfile-base
|
| 48 |
push: ${{ github.event_name != 'pull_request' }}
|
| 49 |
-
tags: ${{ steps.metadata.outputs.tags }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
| 50 |
labels: ${{ steps.metadata.outputs.labels }}
|
| 51 |
cache-from: type=gha
|
| 52 |
cache-to: type=gha,mode=max
|
| 53 |
build-args: |
|
| 54 |
CUDA_VERSION=${{ matrix.cuda_version }}
|
| 55 |
CUDA=${{ matrix.cuda }}
|
|
|
|
| 56 |
PYTORCH_VERSION=${{ matrix.pytorch }}
|
| 57 |
AXOLOTL_EXTRAS=${{ matrix.axolotl_extras }}
|
|
|
|
| 16 |
include:
|
| 17 |
- cuda: "118"
|
| 18 |
cuda_version: 11.8.0
|
| 19 |
+
python_version: 3.9
|
| 20 |
+
axolotl_extras:
|
| 21 |
+
- cuda: "118"
|
| 22 |
+
cuda_version: 11.8.0
|
| 23 |
+
python_version: 3.10
|
| 24 |
axolotl_extras:
|
| 25 |
- cuda: "117"
|
| 26 |
cuda_version: 11.7.0
|
| 27 |
+
python_version: 3.9
|
| 28 |
pytorch: 1.13.1
|
| 29 |
axolotl_extras:
|
| 30 |
- cuda: "118"
|
| 31 |
+
python_version: 3.9
|
| 32 |
cuda_version: 11.8.0
|
| 33 |
pytorch: 2.0.0
|
| 34 |
axolotl_extras: gptq
|
|
|
|
| 53 |
context: .
|
| 54 |
file: ./docker/Dockerfile-base
|
| 55 |
push: ${{ github.event_name != 'pull_request' }}
|
| 56 |
+
tags: ${{ steps.metadata.outputs.tags }}-py${{ matrix.python_version }}-cu${{ matrix.cuda }}-${{ matrix.pytorch }}${{ matrix.axolotl_extras != '' && '-' || '' }}${{ matrix.axolotl_extras }}
|
| 57 |
labels: ${{ steps.metadata.outputs.labels }}
|
| 58 |
cache-from: type=gha
|
| 59 |
cache-to: type=gha,mode=max
|
| 60 |
build-args: |
|
| 61 |
CUDA_VERSION=${{ matrix.cuda_version }}
|
| 62 |
CUDA=${{ matrix.cuda }}
|
| 63 |
+
PYTHON_VERSION=${{ matrix.python_version }}
|
| 64 |
PYTORCH_VERSION=${{ matrix.pytorch }}
|
| 65 |
AXOLOTL_EXTRAS=${{ matrix.axolotl_extras }}
|
docker/Dockerfile-base
CHANGED
|
@@ -52,6 +52,8 @@ RUN git clone https://github.com/HazyResearch/flash-attention.git && \
|
|
| 52 |
|
| 53 |
FROM base-builder AS deepspeed-builder
|
| 54 |
|
|
|
|
|
|
|
| 55 |
WORKDIR /workspace
|
| 56 |
|
| 57 |
RUN git clone https://github.com/microsoft/DeepSpeed.git && \
|
|
|
|
| 52 |
|
| 53 |
FROM base-builder AS deepspeed-builder
|
| 54 |
|
| 55 |
+
ARG TORCH_CUDA_ARCH_LIST="7.0 7.5 8.0 8.6+PTX"
|
| 56 |
+
|
| 57 |
WORKDIR /workspace
|
| 58 |
|
| 59 |
RUN git clone https://github.com/microsoft/DeepSpeed.git && \
|