Spaces:
Configuration error
Configuration error
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- visual-quality-inspection/transfer-learning/.dockerignore +4 -0
- visual-quality-inspection/transfer-learning/.github/pull_request_template.md +14 -0
- visual-quality-inspection/transfer-learning/.github/workflows/build-container.yaml +50 -0
- visual-quality-inspection/transfer-learning/.github/workflows/docs-test.yaml +34 -0
- visual-quality-inspection/transfer-learning/.github/workflows/integration.yaml +41 -0
- visual-quality-inspection/transfer-learning/.github/workflows/nightly-integration.yaml +25 -0
- visual-quality-inspection/transfer-learning/.github/workflows/nightly-notebook-test.yaml +25 -0
- visual-quality-inspection/transfer-learning/.github/workflows/notebook-test.yaml +41 -0
- visual-quality-inspection/transfer-learning/.github/workflows/style-test.yaml +30 -0
- visual-quality-inspection/transfer-learning/.github/workflows/unittest.yaml +30 -0
- visual-quality-inspection/transfer-learning/.gitignore +20 -0
- visual-quality-inspection/transfer-learning/CODEOWNERS +59 -0
- visual-quality-inspection/transfer-learning/DATASETS.md +30 -0
- visual-quality-inspection/transfer-learning/GetStarted.md +263 -0
- visual-quality-inspection/transfer-learning/LICENSE +202 -0
- visual-quality-inspection/transfer-learning/Legal.md +34 -0
- visual-quality-inspection/transfer-learning/MANIFEST.in +1 -0
- visual-quality-inspection/transfer-learning/Makefile +112 -0
- visual-quality-inspection/transfer-learning/Models.md +178 -0
- visual-quality-inspection/transfer-learning/README.md +98 -0
- visual-quality-inspection/transfer-learning/SECURITY.md +12 -0
- visual-quality-inspection/transfer-learning/_config.yml +1 -0
- visual-quality-inspection/transfer-learning/api.md +4 -0
- visual-quality-inspection/transfer-learning/bandit.yaml +11 -0
- visual-quality-inspection/transfer-learning/cli.md +4 -0
- visual-quality-inspection/transfer-learning/docker/Dockerfile +143 -0
- visual-quality-inspection/transfer-learning/docker/README.md +73 -0
- visual-quality-inspection/transfer-learning/docker/chart/.helmignore +23 -0
- visual-quality-inspection/transfer-learning/docker/chart/Chart.yaml +24 -0
- visual-quality-inspection/transfer-learning/docker/chart/README.md +31 -0
- visual-quality-inspection/transfer-learning/docker/chart/templates/mpijob.yaml +92 -0
- visual-quality-inspection/transfer-learning/docker/chart/templates/pvc.yaml +25 -0
- visual-quality-inspection/transfer-learning/docker/chart/values.yaml +28 -0
- visual-quality-inspection/transfer-learning/docker/docker-compose.yml +54 -0
- visual-quality-inspection/transfer-learning/docs/.gitignore +1 -0
- visual-quality-inspection/transfer-learning/docs/DATASETS.rst +4 -0
- visual-quality-inspection/transfer-learning/docs/GetStarted.rst +2 -0
- visual-quality-inspection/transfer-learning/docs/Legal.rst +2 -0
- visual-quality-inspection/transfer-learning/docs/Makefile +43 -0
- visual-quality-inspection/transfer-learning/docs/Models.rst +2 -0
- visual-quality-inspection/transfer-learning/docs/README.md +58 -0
- visual-quality-inspection/transfer-learning/docs/_static/tlt-custom.css +43 -0
- visual-quality-inspection/transfer-learning/docs/_static/tlt-custom.js +19 -0
- visual-quality-inspection/transfer-learning/docs/_templates/footer.html +5 -0
- visual-quality-inspection/transfer-learning/docs/api.rst +132 -0
- visual-quality-inspection/transfer-learning/docs/cli.rst +7 -0
- visual-quality-inspection/transfer-learning/docs/conf.py +111 -0
- visual-quality-inspection/transfer-learning/docs/distributed.rst +4 -0
- visual-quality-inspection/transfer-learning/docs/docbuild.rst +4 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
visual-quality-inspection/transfer-learning/workflows/vision_anomaly_detection/assets/Visual_quality_inspection_layered_architecture.JPG filter=lfs diff=lfs merge=lfs -text
|
37 |
+
visual-quality-inspection/transfer-learning/workflows/vision_anomaly_detection/assets/visual_quality_inspection_pipeline.JPG filter=lfs diff=lfs merge=lfs -text
|
visual-quality-inspection/transfer-learning/.dockerignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.git*
|
2 |
+
**__pycache__**
|
3 |
+
docker
|
4 |
+
Dockerfile*
|
visual-quality-inspection/transfer-learning/.github/pull_request_template.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- Add a description of the changes made in the PR -->
|
2 |
+
|
3 |
+
**Before requesting a review:**
|
4 |
+
|
5 |
+
- [ ] I have ensured my PR title is accurate
|
6 |
+
- [ ] I wrote a description of the changes being made, if it's not obvious
|
7 |
+
- [ ] I have synced by branch with the base (i.e. `develop`)
|
8 |
+
- [ ] I ran `make lint` on my branch and it passes
|
9 |
+
- [ ] I ran the pytest tests that could reasonably be affected by my changes and they pass
|
10 |
+
- [ ] I have performed a self code review of my own code on the "Files changed" tab of the pull request
|
11 |
+
- [ ] I have commented my code in hard-to-understand areas
|
12 |
+
- [ ] I have updated the documentation (in docstrings, notebooks, and .rst files)
|
13 |
+
- [ ] I have added new tests that prove my fix is effective or that my feature works (or provide justification why not)
|
14 |
+
- [ ] I have applied the appropriate labels to the PR (if your PR is not ready for review use "WIP")
|
visual-quality-inspection/transfer-learning/.github/workflows/build-container.yaml
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: TLT Containers Weekly Builder
|
2 |
+
on:
|
3 |
+
workflow_dispatch: # Can be manually executed
|
4 |
+
schedule: # 1/week Sunday at 11:00PM
|
5 |
+
- cron: "5 23 * * 0"
|
6 |
+
jobs:
|
7 |
+
build:
|
8 |
+
container: # MLOps Dev container for Compose Automation
|
9 |
+
image: ${{ vars.GHA_CONTAINER_IMAGE }}
|
10 |
+
env: # Add ENVS to control compose building
|
11 |
+
http_proxy: ${{ secrets.HTTP_PROXY }}
|
12 |
+
https_proxy: ${{ secrets.HTTPS_PROXY }}
|
13 |
+
no_proxy: ${{ secrets.NO_PROXY }}
|
14 |
+
credentials: # CAAS Registry Creds
|
15 |
+
username: ${{ secrets.REGISTRY_USER }}
|
16 |
+
password: ${{ secrets.REGISTRY_TOKEN }}
|
17 |
+
runs-on: [aia-devops] # Runner Label
|
18 |
+
steps:
|
19 |
+
- uses: actions/checkout@v3
|
20 |
+
with:
|
21 |
+
submodules: true
|
22 |
+
set-safe-directory: true
|
23 |
+
- name: Build Container
|
24 |
+
run: docker compose build
|
25 |
+
working-directory: ./docker
|
26 |
+
push:
|
27 |
+
needs: [build]
|
28 |
+
strategy:
|
29 |
+
matrix:
|
30 |
+
container: ["tlt-devel", "tlt-prod", "tlt-dist-devel", "tlt-dist-prod"] # name of Compose container
|
31 |
+
container:
|
32 |
+
image: ${{ vars.GHA_CONTAINER_IMAGE }}
|
33 |
+
env: # Add ENVS to control compose building
|
34 |
+
http_proxy: ${{ secrets.HTTP_PROXY }}
|
35 |
+
https_proxy: ${{ secrets.HTTPS_PROXY }}
|
36 |
+
no_proxy: ${{ secrets.NO_PROXY }}
|
37 |
+
credentials: # CAAS Registry Creds
|
38 |
+
username: ${{ secrets.REGISTRY_USER }}
|
39 |
+
password: ${{ secrets.REGISTRY_TOKEN }}
|
40 |
+
runs-on: [aia-devops]
|
41 |
+
steps:
|
42 |
+
- uses: docker/login-action@v2
|
43 |
+
with: # CAAS Registry Creds
|
44 |
+
registry: ${{ vars.GHA_REGISTRY }}
|
45 |
+
username: ${{ secrets.REGISTRY_USER }}
|
46 |
+
password: ${{ secrets.REGISTRY_TOKEN }}
|
47 |
+
- name: Push Container # tlt-<num>-<container>
|
48 |
+
run: |
|
49 |
+
docker tag intel/ai-tools:${{ matrix.container }}-latest ${{ vars.GHA_REGISTRY_REPO }}:ww$(date +"%U")-${{ matrix.container }}
|
50 |
+
docker push ${{ vars.GHA_REGISTRY_REPO }}:ww$(date +"%U")-${{ matrix.container }}
|
visual-quality-inspection/transfer-learning/.github/workflows/docs-test.yaml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Build and Test docs
|
2 |
+
on:
|
3 |
+
pull_request:
|
4 |
+
types: [submitted]
|
5 |
+
# run the workflow if changes pushed to main or release branches
|
6 |
+
push:
|
7 |
+
branches:
|
8 |
+
- '**'
|
9 |
+
tags:
|
10 |
+
- '**'
|
11 |
+
paths:
|
12 |
+
- '**'
|
13 |
+
|
14 |
+
# installs dependencies, build the docs and push it to `gh-pages`
|
15 |
+
jobs:
|
16 |
+
docs-test:
|
17 |
+
runs-on: [ aia-devops ]
|
18 |
+
container:
|
19 |
+
image: ${{ vars.GHA_IMAGE }}
|
20 |
+
env:
|
21 |
+
http_proxy: ${{ secrets.HTTP_PROXY }}
|
22 |
+
https_proxy: ${{ secrets.HTTPS_PROXY }}
|
23 |
+
no_proxy: ${{ secrets.NO_PROXY }}
|
24 |
+
# credentials:
|
25 |
+
# username: ${{ secrets.REGISTRY_USER }}
|
26 |
+
# password: ${{ secrets.REGISTRY_TOKEN }}
|
27 |
+
volumes:
|
28 |
+
- /tf_dataset/dataset/transfer_learning:/tmp/data
|
29 |
+
steps:
|
30 |
+
- uses: actions/checkout@v3
|
31 |
+
# Test the docs
|
32 |
+
- name: Run documentation tests
|
33 |
+
run: |
|
34 |
+
make test_docs
|
visual-quality-inspection/transfer-learning/.github/workflows/integration.yaml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Integration Test
|
2 |
+
on:
|
3 |
+
pull_request_review:
|
4 |
+
types: [submitted]
|
5 |
+
# run the workflow if changes pushed to main or release branches
|
6 |
+
push:
|
7 |
+
branches:
|
8 |
+
- develop
|
9 |
+
- main
|
10 |
+
- r0.1
|
11 |
+
- r0.2
|
12 |
+
- r0.3
|
13 |
+
- r0.4
|
14 |
+
- r0.5
|
15 |
+
tags:
|
16 |
+
- '**'
|
17 |
+
paths:
|
18 |
+
- '**'
|
19 |
+
jobs:
|
20 |
+
integration-test:
|
21 |
+
if: github.event.review.state == 'approved' ||
|
22 |
+
github.event.pull_request.merged == true ||
|
23 |
+
github.event_name == 'push'
|
24 |
+
runs-on: [ aia-devops ]
|
25 |
+
container:
|
26 |
+
image: ${{ vars.GHA_IMAGE }}
|
27 |
+
env:
|
28 |
+
http_proxy: ${{ secrets.HTTP_PROXY }}
|
29 |
+
https_proxy: ${{ secrets.HTTPS_PROXY }}
|
30 |
+
no_proxy: ${{ secrets.NO_PROXY }}
|
31 |
+
# credentials:
|
32 |
+
# username: ${{ secrets.REGISTRY_USER }}
|
33 |
+
# password: ${{ secrets.REGISTRY_TOKEN }}
|
34 |
+
volumes:
|
35 |
+
- /tf_dataset/dataset/transfer_learning:/tmp/data
|
36 |
+
steps:
|
37 |
+
- uses: actions/checkout@v3
|
38 |
+
- name: Run Integration Tests
|
39 |
+
shell: bash
|
40 |
+
continue-on-error: false
|
41 |
+
run: make integration
|
visual-quality-inspection/transfer-learning/.github/workflows/nightly-integration.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Nightly Integration Test
|
2 |
+
on:
|
3 |
+
workflow_dispatch: # Can be manually executed
|
4 |
+
schedule: # nightly at 10:00PM
|
5 |
+
- cron: "0 22 * * *"
|
6 |
+
jobs:
|
7 |
+
nightly-test:
|
8 |
+
runs-on: [ aia-devops ]
|
9 |
+
container:
|
10 |
+
image: ${{ vars.GHA_IMAGE }}
|
11 |
+
env:
|
12 |
+
http_proxy: ${{ secrets.HTTP_PROXY }}
|
13 |
+
https_proxy: ${{ secrets.HTTPS_PROXY }}
|
14 |
+
no_proxy: ${{ secrets.NO_PROXY }}
|
15 |
+
# credentials:
|
16 |
+
# username: ${{ secrets.REGISTRY_USER }}
|
17 |
+
# password: ${{ secrets.REGISTRY_TOKEN }}
|
18 |
+
steps:
|
19 |
+
- uses: actions/checkout@v3
|
20 |
+
with:
|
21 |
+
ref: develop
|
22 |
+
- name: Run Integration Test
|
23 |
+
shell: bash
|
24 |
+
continue-on-error: false
|
25 |
+
run: make integration
|
visual-quality-inspection/transfer-learning/.github/workflows/nightly-notebook-test.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Nightly Notebooks Test
|
2 |
+
on:
|
3 |
+
workflow_dispatch: # Can be manually executed
|
4 |
+
schedule: # nightly at 11:00PM
|
5 |
+
- cron: "0 23 * * *"
|
6 |
+
jobs:
|
7 |
+
notebook-test:
|
8 |
+
runs-on: [ aia-devops ]
|
9 |
+
container:
|
10 |
+
image: ${{ vars.GHA_IMAGE }}
|
11 |
+
env:
|
12 |
+
http_proxy: ${{ secrets.HTTP_PROXY }}
|
13 |
+
https_proxy: ${{ secrets.HTTPS_PROXY }}
|
14 |
+
no_proxy: ${{ secrets.NO_PROXY }}
|
15 |
+
DATASET_DIR: /tmp/data
|
16 |
+
OUTPUT_DIR: /tmp/output
|
17 |
+
# credentials:
|
18 |
+
# username: ${{ secrets.REGISTRY_USER }}
|
19 |
+
# password: ${{ secrets.REGISTRY_TOKEN }}
|
20 |
+
steps:
|
21 |
+
- uses: actions/checkout@v3
|
22 |
+
with:
|
23 |
+
ref: develop
|
24 |
+
- name: Run Notebook Tests
|
25 |
+
run: make test_notebook_catalog
|
visual-quality-inspection/transfer-learning/.github/workflows/notebook-test.yaml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Notebooks Test
|
2 |
+
on:
|
3 |
+
pull_request_review:
|
4 |
+
types: [submitted]
|
5 |
+
# run the workflow if changes pushed to main or release branches
|
6 |
+
push:
|
7 |
+
branches:
|
8 |
+
- develop
|
9 |
+
- main
|
10 |
+
- r0.1
|
11 |
+
- r0.2
|
12 |
+
- r0.3
|
13 |
+
- r0.4
|
14 |
+
- r0.5
|
15 |
+
tags:
|
16 |
+
- '**'
|
17 |
+
paths:
|
18 |
+
- '**'
|
19 |
+
jobs:
|
20 |
+
notebook-test:
|
21 |
+
if: github.event.review.state == 'approved' ||
|
22 |
+
github.event.pull_request.merged == true ||
|
23 |
+
github.event_name == 'push'
|
24 |
+
runs-on: [ aia-devops ]
|
25 |
+
container:
|
26 |
+
image: ${{ vars.GHA_IMAGE }}
|
27 |
+
env:
|
28 |
+
http_proxy: ${{ secrets.HTTP_PROXY }}
|
29 |
+
https_proxy: ${{ secrets.HTTPS_PROXY }}
|
30 |
+
no_proxy: ${{ secrets.NO_PROXY }}
|
31 |
+
DATASET_DIR: /tmp/data
|
32 |
+
OUTPUT_DIR: /tmp/output
|
33 |
+
# credentials:
|
34 |
+
# username: ${{ secrets.REGISTRY_USER }}
|
35 |
+
# password: ${{ secrets.REGISTRY_TOKEN }}
|
36 |
+
volumes:
|
37 |
+
- /tf_dataset/dataset/transfer_learning:/tmp/data
|
38 |
+
steps:
|
39 |
+
- uses: actions/checkout@v3
|
40 |
+
- name: Run Notebook Tests
|
41 |
+
run: make test_notebook_custom
|
visual-quality-inspection/transfer-learning/.github/workflows/style-test.yaml
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Style Checks
|
2 |
+
on:
|
3 |
+
pull_request:
|
4 |
+
types: [submitted]
|
5 |
+
# run the workflow if changes pushed to main or release branches
|
6 |
+
push:
|
7 |
+
branches:
|
8 |
+
- '**'
|
9 |
+
tags:
|
10 |
+
- '**'
|
11 |
+
paths:
|
12 |
+
- '**'
|
13 |
+
# installs dependencies and runs the linter
|
14 |
+
jobs:
|
15 |
+
style-test:
|
16 |
+
runs-on: [ aia-devops ]
|
17 |
+
container:
|
18 |
+
image: ${{ vars.GHA_IMAGE }}
|
19 |
+
env:
|
20 |
+
http_proxy: ${{ secrets.HTTP_PROXY }}
|
21 |
+
https_proxy: ${{ secrets.HTTPS_PROXY }}
|
22 |
+
no_proxy: ${{ secrets.NO_PROXY }}
|
23 |
+
# credentials:
|
24 |
+
# username: ${{ secrets.REGISTRY_USER }}
|
25 |
+
# password: ${{ secrets.REGISTRY_TOKEN }}
|
26 |
+
steps:
|
27 |
+
- uses: actions/checkout@v3
|
28 |
+
- name: Run linter
|
29 |
+
run: |
|
30 |
+
make lint
|
visual-quality-inspection/transfer-learning/.github/workflows/unittest.yaml
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Unit Test
|
2 |
+
on:
|
3 |
+
pull_request:
|
4 |
+
types: [submitted]
|
5 |
+
# run the workflow if changes pushed to main or release branches
|
6 |
+
push:
|
7 |
+
branches:
|
8 |
+
- '**'
|
9 |
+
tags:
|
10 |
+
- '**'
|
11 |
+
paths:
|
12 |
+
- '**'
|
13 |
+
jobs:
|
14 |
+
unit-test:
|
15 |
+
runs-on: [ aia-devops ]
|
16 |
+
container:
|
17 |
+
image: ${{ vars.GHA_IMAGE }}
|
18 |
+
env:
|
19 |
+
http_proxy: ${{ secrets.HTTP_PROXY }}
|
20 |
+
https_proxy: ${{ secrets.HTTPS_PROXY }}
|
21 |
+
no_proxy: ${{ secrets.NO_PROXY }}
|
22 |
+
# credentials:
|
23 |
+
# username: ${{ secrets.REGISTRY_USER }}
|
24 |
+
# password: ${{ secrets.REGISTRY_TOKEN }}
|
25 |
+
steps:
|
26 |
+
- uses: actions/checkout@v3
|
27 |
+
- name: Run Unit Test
|
28 |
+
shell: bash
|
29 |
+
continue-on-error: false
|
30 |
+
run: make unittest
|
visual-quality-inspection/transfer-learning/.gitignore
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
_autosummary
|
2 |
+
.coverage
|
3 |
+
.DS_Store
|
4 |
+
.idea*
|
5 |
+
.ipynb_checkpoints
|
6 |
+
.vscode
|
7 |
+
*.egg-info/
|
8 |
+
*.pyc
|
9 |
+
**.log
|
10 |
+
**/*.cache
|
11 |
+
**/**.whl
|
12 |
+
**/**/models/
|
13 |
+
**/**venv
|
14 |
+
**venv*
|
15 |
+
build/
|
16 |
+
data
|
17 |
+
dist/
|
18 |
+
docs/_build/
|
19 |
+
nc_workspace
|
20 |
+
output
|
visual-quality-inspection/transfer-learning/CODEOWNERS
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This is a comment.
|
2 |
+
# Each line is a file pattern followed by one or more owners.
|
3 |
+
|
4 |
+
# These owners will be the default owners for everything in
|
5 |
+
# the repo. Unless a later match takes precedence,
|
6 |
+
# @global-owner1 and @global-owner2 will be requested for
|
7 |
+
# review when someone opens a pull request.
|
8 |
+
* @ashahba @dmsuehir @etcylfleet @HarshaRamayanam @mhbuehler @okhleif-IL
|
9 |
+
|
10 |
+
# Order is important; the last matching pattern takes the most
|
11 |
+
# precedence. When someone opens a pull request that only
|
12 |
+
# modifies JS files, only @js-owner and not the global
|
13 |
+
# owner(s) will be requested for a review.
|
14 |
+
# *.js @js-owner #This is an inline comment.
|
15 |
+
|
16 |
+
# You can also use email addresses if you prefer. They'll be
|
17 |
+
# used to look up users just like we do for commit author
|
18 |
+
# emails.
|
19 |
+
# *.go [email protected]
|
20 |
+
|
21 |
+
# Teams can be specified as code owners as well. Teams should
|
22 |
+
# be identified in the format @org/team-name. Teams must have
|
23 |
+
# explicit write access to the repository. In this example,
|
24 |
+
# the octocats team in the octo-org organization owns all .txt files.
|
25 |
+
# *.txt @octo-org/octocats
|
26 |
+
|
27 |
+
# In this example, @doctocat owns any files in the build/logs
|
28 |
+
# directory at the root of the repository and any of its
|
29 |
+
# subdirectories.
|
30 |
+
# /build/logs/ @doctocat
|
31 |
+
|
32 |
+
# The `docs/*` pattern will match files like
|
33 |
+
# `docs/getting-started.md` but not further nested files like
|
34 |
+
# `docs/build-app/troubleshooting.md`.
|
35 |
+
# docs/* [email protected]
|
36 |
+
|
37 |
+
# In this example, @octocat owns any file in an apps directory
|
38 |
+
# anywhere in your repository.
|
39 |
+
# apps/ @octocat
|
40 |
+
|
41 |
+
# In this example, @doctocat owns any file in the `/docs`
|
42 |
+
# directory in the root of your repository and any of its
|
43 |
+
# subdirectories.
|
44 |
+
# /docs/ @doctocat
|
45 |
+
|
46 |
+
# In this example, any change inside the `/scripts` directory
|
47 |
+
# will require approval from @doctocat or @octocat.
|
48 |
+
# /scripts/ @doctocat @octocat
|
49 |
+
|
50 |
+
# In this example, @octocat owns any file in a `/logs` directory such as
|
51 |
+
# `/build/logs`, `/scripts/logs`, and `/deeply/nested/logs`. Any changes
|
52 |
+
# in a `/logs` directory will require approval from @octocat.
|
53 |
+
# **/logs @octocat
|
54 |
+
|
55 |
+
# In this example, @octocat owns any file in the `/apps`
|
56 |
+
# directory in the root of your repository except for the `/apps/github`
|
57 |
+
# subdirectory, as its owners are left empty.
|
58 |
+
# /apps/ @octocat
|
59 |
+
# /apps/github
|
visual-quality-inspection/transfer-learning/DATASETS.md
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Datasets
|
2 |
+
|
3 |
+
This is a comprehensive list of public datasets used by this repository.
|
4 |
+
|
5 |
+
| Name (Link/Source) | Framework | Use Case |
|
6 |
+
|--------------------| --------- | -------- |
|
7 |
+
| [AG News (Hugging Face)](https://huggingface.co/datasets/ag_news) | PyTorch | Text Classification |
|
8 |
+
| [AG News (TFDS)](https://www.tensorflow.org/datasets/catalog/ag_news_subset) | TensorFlow | Text Classification |
|
9 |
+
| [Food101 (Torchvision)](https://pytorch.org/vision/stable/generated/torchvision.datasets.Food101.html#torchvision.datasets.Food101) | PyTorch | Image Classification |
|
10 |
+
| [Food101 (TFDS)](https://www.tensorflow.org/datasets/catalog/food101) | TensorFlow | Image Classification |
|
11 |
+
| [SMS Spam Collection](https://archive.ics.uci.edu/dataset/228/sms+spam+collection) | PyTorch & TensorFlow | Text Classification |
|
12 |
+
| [TF Flowers (TFDS)](https://www.tensorflow.org/datasets/catalog/tf_flowers) | PyTorch & TensorFlow | Image Classification |
|
13 |
+
| [Cats vs. Dogs (TFDS)](https://www.tensorflow.org/datasets/catalog/cats_vs_dogs) | TensorFlow | Image Classification |
|
14 |
+
| [Country211 (Torchvision)](https://pytorch.org/vision/stable/generated/torchvision.datasets.Country211.html#torchvision.datasets.Country211) | PyTorch | Image Classification |
|
15 |
+
| [DTD (Torchvision)](https://pytorch.org/vision/stable/generated/torchvision.datasets.DTD.html#torchvision.datasets.DTD) | PyTorch | Image Classification |
|
16 |
+
| [FGVCAircraft (Torchvision)](https://pytorch.org/vision/stable/generated/torchvision.datasets.FGVCAircraft.html#torchvision.datasets.FGVCAircraft) | PyTorch | Image Classification |
|
17 |
+
| [RenderedSST2 (Torchvision)](https://pytorch.org/vision/stable/generated/torchvision.datasets.RenderedSST2.html#torchvision.datasets.RenderedSST2) | PyTorch | Image Classification |
|
18 |
+
| [Rock Paper Scissors (TFDS)](https://www.tensorflow.org/datasets/catalog/rock_paper_scissors) | TensorFlow | Image Classification |
|
19 |
+
| [Rotten_Tomatoes (Hugging Face)](https://huggingface.co/datasets/rotten_tomatoes) | PyTorch | Text Classification |
|
20 |
+
| [TweetEval (Hugging Face)](https://huggingface.co/datasets/tweet_eval) | PyTorch | Text Classification |
|
21 |
+
| [CIFAR10 (Torchvision)](https://pytorch.org/vision/stable/generated/torchvision.datasets.CIFAR10.html#torchvision.datasets.CIFAR10) | PyTorch | Image Classification |
|
22 |
+
| [IMDB Reviews (Hugging Face)](https://huggingface.co/datasets/imdb) | PyTorch | Text Classification |
|
23 |
+
| [IMDB Reviews (TFDS)](https://www.tensorflow.org/datasets/catalog/imdb_reviews) | TensorFlow | Text Classification |
|
24 |
+
| [GLUE/SST2 (TFDS)](https://www.tensorflow.org/datasets/catalog/glue#gluesst2) | TensorFlow | Text Classification |
|
25 |
+
| [GLUE/COLA (TFDS)](https://www.tensorflow.org/datasets/catalog/glue#gluecola_default_config) | TensorFlow | Text Classification |
|
26 |
+
| [Colorectal Histology (TFDS)](https://www.tensorflow.org/datasets/catalog/colorectal_histology) | TensorFlow | Image Classification |
|
27 |
+
| [RESISC45 (TFDS)](https://www.tensorflow.org/datasets/catalog/resisc45) | TensorFlow | Image Classification |
|
28 |
+
| [CDD-CESM](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=109379611) | PyTorch & TensorFlow | Image & Text Classification |
|
29 |
+
| [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) | PyTorch & TensorFlow | Text Classification |
|
30 |
+
| [MVTec](https://www.mvtec.com/company/research/datasets/mvtec-ad) | PyTorch | Anomaly Detection |
|
visual-quality-inspection/transfer-learning/GetStarted.md
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Get Started
|
2 |
+
|
3 |
+
This is a guide for getting started with Intel® Transfer Learning Tool and will
|
4 |
+
walk you through the steps to check system requirements, install, and then run
|
5 |
+
the tool with a couple of examples showing no-code CLI and low-code API
|
6 |
+
approaches.
|
7 |
+
|
8 |
+
<p align="center"><b>Intel Transfer Learning Tool Get Started Flow</b></p>
|
9 |
+
|
10 |
+
<img alt="Intel Transfer Learning Tool Get Started Flow" title="Intel Transfer Learning Tool Get Started Flow" src="images/TLT-GSG_flow.svg" width="800">
|
11 |
+
|
12 |
+
## ① Check System Requirements
|
13 |
+
|
14 |
+
| Recommended Hardware | Precision |
|
15 |
+
| ---------------------------- | ---------- |
|
16 |
+
| Intel® 4th Gen Xeon® Scalable Performance processors | BF16 |
|
17 |
+
| Intel® 1st, 2nd, 3rd, and 4th Gen Xeon® Scalable Performance processors | FP32 |
|
18 |
+
|
19 |
+
| Resource | Minimum |
|
20 |
+
| ---------------------------- | ---------- |
|
21 |
+
| CPU Cores | 8 (16+ recommended) |
|
22 |
+
| RAM | 16 GB (24-32+ GB recommended) |
|
23 |
+
| Disk space | 10 GB minimum (can vary based on datasets downloaded) |
|
24 |
+
|
25 |
+
| Required Software |
|
26 |
+
| ------------------------- |
|
27 |
+
| Linux\* system (validated on Ubuntu\* 20.04/22.04 LTS) |
|
28 |
+
| Python (3.8, 3.9, or 3.10) |
|
29 |
+
| Pip |
|
30 |
+
| Conda or Python virtualenv |
|
31 |
+
| git (only required for advanced installation) |
|
32 |
+
|
33 |
+
## ② Install
|
34 |
+
|
35 |
+
1. **Install Dependencies**
|
36 |
+
|
37 |
+
Install required packages using:
|
38 |
+
|
39 |
+
```
|
40 |
+
sudo apt-get install build-essential python3-dev libgl1 libglib2.0-0
|
41 |
+
```
|
42 |
+
|
43 |
+
2. **Create and activate a Python3 virtual environment**
|
44 |
+
|
45 |
+
We encourage you to use a Python virtual environment (virtualenv or conda)
|
46 |
+
for consistent package management. There are two ways to do this:
|
47 |
+
|
48 |
+
a. Use `virtualenv`:
|
49 |
+
|
50 |
+
```
|
51 |
+
virtualenv -p python3 tlt_dev_venv
|
52 |
+
source tlt_dev_venv/bin/activate
|
53 |
+
```
|
54 |
+
|
55 |
+
b. Or use `conda`:
|
56 |
+
|
57 |
+
```
|
58 |
+
conda create --name tlt_dev_venv python=3.9
|
59 |
+
conda activate tlt_dev_venv
|
60 |
+
```
|
61 |
+
|
62 |
+
3. **Install Intel Transfer Learning Tool**
|
63 |
+
|
64 |
+
Use the Basic Installation instructions unless you plan on making code changes.
|
65 |
+
|
66 |
+
a. **Basic Installation**
|
67 |
+
|
68 |
+
```
|
69 |
+
pip install intel-transfer-learning-tool
|
70 |
+
```
|
71 |
+
|
72 |
+
b. **Advanced Installation**
|
73 |
+
|
74 |
+
Clone the repo:
|
75 |
+
|
76 |
+
```
|
77 |
+
git clone https://github.com/IntelAI/transfer-learning.git
|
78 |
+
cd transfer-learning
|
79 |
+
```
|
80 |
+
|
81 |
+
Then either do an editable install to avoid a rebuild and
|
82 |
+
install after each code change (preferred):
|
83 |
+
|
84 |
+
```
|
85 |
+
pip install --editable .
|
86 |
+
```
|
87 |
+
|
88 |
+
or build and install a wheel:
|
89 |
+
|
90 |
+
```
|
91 |
+
python setup.py bdist_wheel
|
92 |
+
pip install dist/intel_transfer_learning_tool-0.5.0-py3-none-any.whl
|
93 |
+
```
|
94 |
+
|
95 |
+
|
96 |
+
4. **Additional Feature-Specific Steps**
|
97 |
+
|
98 |
+
* For distributed/multinode training, follow these additional
|
99 |
+
[distributed training instructions](tlt/distributed/README.md).
|
100 |
+
|
101 |
+
5. **Verify Installation**
|
102 |
+
|
103 |
+
Verify that your installation was successful by using the following
|
104 |
+
command, which displays help information about the Intel Transfer Learning Tool:
|
105 |
+
|
106 |
+
```
|
107 |
+
tlt --help
|
108 |
+
```
|
109 |
+
|
110 |
+
## ③ Run the Intel Transfer Learning Tool
|
111 |
+
|
112 |
+
With the Intel Transfer Learning Tool, you can train AI models with TensorFlow or
|
113 |
+
PyTorch using either no-code CLI commands at a bash prompt, or low-code API
|
114 |
+
calls from a Python script. Both approaches provide the same opportunities for
|
115 |
+
training, evaluation, optimization, and benchmarking. With the CLI, no
|
116 |
+
programming experience is required, and you'll need basic Python knowledge to
|
117 |
+
use the API. Choose the approach that works best for you.
|
118 |
+
|
119 |
+
|
120 |
+
### Run Using the No-Code CLI
|
121 |
+
|
122 |
+
Let's continue from the previous step where you prepared the dataset, and train
|
123 |
+
a model using CLI commands. This example uses the CLI to train an image
|
124 |
+
classifier to identify different types of flowers. You can see a list of all
|
125 |
+
available image classifier models using the command:
|
126 |
+
|
127 |
+
```
|
128 |
+
tlt list models --use-case image_classification
|
129 |
+
```
|
130 |
+
|
131 |
+
**Train a Model**
|
132 |
+
|
133 |
+
In this example, we'll use the `tlt train` command to retrain the TensorFlow
|
134 |
+
ResNet50v1.5 model using a flowers dataset from the
|
135 |
+
[TensorFlow Datasets catalog](https://www.tensorflow.org/datasets/catalog/tf_flowers).
|
136 |
+
The `--dataset-dir` and `--output-dir` paths need to point to writable folders on your system.
|
137 |
+
```
|
138 |
+
# Use the follow environment variable setting to reduce the warnings and log output from TensorFlow
|
139 |
+
export TF_CPP_MIN_LOG_LEVEL="2"
|
140 |
+
|
141 |
+
tlt train -f tensorflow --model-name resnet_v1_50 --dataset-name tf_flowers --dataset-dir "/tmp/data-${USER}" --output-dir "/tmp/output-${USER}"
|
142 |
+
```
|
143 |
+
```
|
144 |
+
Model name: resnet_v1_50
|
145 |
+
Framework: tensorflow
|
146 |
+
Dataset name: tf_flowers
|
147 |
+
Training epochs: 1
|
148 |
+
Dataset dir: /tmp/data-user
|
149 |
+
Output directory: /tmp/output-user
|
150 |
+
...
|
151 |
+
Model: "sequential"
|
152 |
+
_________________________________________________________________
|
153 |
+
Layer (type) Output Shape Param #
|
154 |
+
=================================================================
|
155 |
+
keras_layer (KerasLayer) (None, 2048) 23561152
|
156 |
+
dense (Dense) (None, 5) 10245
|
157 |
+
=================================================================
|
158 |
+
Total params: 23,571,397
|
159 |
+
Trainable params: 10,245
|
160 |
+
Non-trainable params: 23,561,152
|
161 |
+
_________________________________________________________________
|
162 |
+
Checkpoint directory: /tmp/output-user/resnet_v1_50_checkpoints
|
163 |
+
86/86 [==============================] - 24s 248ms/step - loss: 0.4600 - acc: 0.8438
|
164 |
+
Saved model directory: /tmp/output-user/resnet_v1_50/1
|
165 |
+
```
|
166 |
+
|
167 |
+
After training completes, the `tlt train` command evaluates the model. The loss and
|
168 |
+
accuracy values are printed toward the end of the console output. The model is
|
169 |
+
exported to the output directory you specified in a numbered folder created for
|
170 |
+
each training run.
|
171 |
+
|
172 |
+
**Next Steps**
|
173 |
+
|
174 |
+
That ends this Get Started CLI example. As a next step, you can also follow the
|
175 |
+
[Beyond Get Started CLI Example](examples/cli/README.md) for a complete example
|
176 |
+
that includes evaluation, benchmarking, and quantization in the datasets.
|
177 |
+
|
178 |
+
Read about all the CLI commands in the [CLI reference](/cli.md).
|
179 |
+
Find more examples in our list of [Examples](examples/README.md).
|
180 |
+
|
181 |
+
### Run Using the Low-Code API
|
182 |
+
|
183 |
+
The following Python code example trains an image classification model with the TensorFlow
|
184 |
+
flowers dataset using API calls from Python. The model is
|
185 |
+
benchmarked and quantized to INT8 precision for improved inference performance.
|
186 |
+
|
187 |
+
You can run the API example using a Jupyter notebook. See the [notebook setup
|
188 |
+
instructions](/notebooks/setup.md) for more details for preparing the Jupyter
|
189 |
+
notebook environment.
|
190 |
+
|
191 |
+
```python
|
192 |
+
import os
|
193 |
+
|
194 |
+
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
195 |
+
|
196 |
+
from tlt.datasets import dataset_factory
|
197 |
+
from tlt.models import model_factory
|
198 |
+
from tlt.utils.types import FrameworkType, UseCaseType
|
199 |
+
|
200 |
+
username = os.getenv('USER', 'user')
|
201 |
+
|
202 |
+
# Specify a writable directory for the dataset to be downloaded
|
203 |
+
dataset_dir = '/tmp/data-{}'.format(username)
|
204 |
+
if not os.path.exists(dataset_dir):
|
205 |
+
os.makedirs(dataset_dir)
|
206 |
+
|
207 |
+
# Specify a writeable directory for output (such as saved model files)
|
208 |
+
output_dir = '/tmp/output-{}'.format(username)
|
209 |
+
if not os.path.exists(output_dir):
|
210 |
+
os.makedirs(output_dir)
|
211 |
+
|
212 |
+
# Get the model
|
213 |
+
model = model_factory.get_model(model_name="resnet_v1_50", framework=FrameworkType.TENSORFLOW)
|
214 |
+
|
215 |
+
# Download and preprocess the flowers dataset from the TensorFlow datasets catalog
|
216 |
+
dataset = dataset_factory.get_dataset(dataset_dir=dataset_dir,
|
217 |
+
dataset_name='tf_flowers',
|
218 |
+
use_case=UseCaseType.IMAGE_CLASSIFICATION,
|
219 |
+
framework=FrameworkType.TENSORFLOW,
|
220 |
+
dataset_catalog='tf_datasets')
|
221 |
+
dataset.preprocess(image_size=model.image_size, batch_size=32)
|
222 |
+
dataset.shuffle_split(train_pct=.75, val_pct=.25)
|
223 |
+
|
224 |
+
# Train the model using the dataset
|
225 |
+
model.train(dataset, output_dir=output_dir, epochs=1)
|
226 |
+
|
227 |
+
# Evaluate the trained model
|
228 |
+
metrics = model.evaluate(dataset)
|
229 |
+
for metric_name, metric_value in zip(model._model.metrics_names, metrics):
|
230 |
+
print("{}: {}".format(metric_name, metric_value))
|
231 |
+
|
232 |
+
# Export the model
|
233 |
+
saved_model_dir = model.export(output_dir=output_dir)
|
234 |
+
|
235 |
+
# Quantize the trained model
|
236 |
+
quantization_output = os.path.join(output_dir, "quantized_model")
|
237 |
+
model.quantize(quantization_output, dataset, overwrite_model=True)
|
238 |
+
|
239 |
+
# Benchmark the trained model using the Intel Neural Compressor config file
|
240 |
+
model.benchmark(dataset, saved_model_dir=quantization_output)
|
241 |
+
|
242 |
+
# Do graph optimization on the trained model
|
243 |
+
optimization_output = os.path.join(output_dir, "optimized_model")
|
244 |
+
model.optimize_graph(optimization_output, overwrite_model=True)
|
245 |
+
```
|
246 |
+
|
247 |
+
For more information on the API, see the [API Documentation](/api.md).
|
248 |
+
|
249 |
+
## Summary and Next Steps
|
250 |
+
|
251 |
+
The Intel Transfer Learning Tool can be used to develop an AI model and export
|
252 |
+
an Intel-optimized saved model for deployment. The sample CLI and API commands
|
253 |
+
we've presented show how to execute end-to-end transfer learning workflows.
|
254 |
+
|
255 |
+
For the no-code CLI, you can follow a
|
256 |
+
complete example that includes trainng, evaluation, benchmarking, and quantization
|
257 |
+
in the datasets, as well as some additional models in the [Beyond Get Started
|
258 |
+
CLI example](examples/cli/README.md) documentation. You can also read about all the
|
259 |
+
CLI commands in the [CLI reference](/cli.md).
|
260 |
+
|
261 |
+
For the low-code API, read about the API in the [API Documentation](/api.md).
|
262 |
+
|
263 |
+
Find more CLI and API examples in our list of [Examples](examples/README.md).
|
visual-quality-inspection/transfer-learning/LICENSE
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Apache License
|
3 |
+
Version 2.0, January 2004
|
4 |
+
http://www.apache.org/licenses/
|
5 |
+
|
6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
7 |
+
|
8 |
+
1. Definitions.
|
9 |
+
|
10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
12 |
+
|
13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
14 |
+
the copyright owner that is granting the License.
|
15 |
+
|
16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
17 |
+
other entities that control, are controlled by, or are under common
|
18 |
+
control with that entity. For the purposes of this definition,
|
19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
20 |
+
direction or management of such entity, whether by contract or
|
21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
23 |
+
|
24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
25 |
+
exercising permissions granted by this License.
|
26 |
+
|
27 |
+
"Source" form shall mean the preferred form for making modifications,
|
28 |
+
including but not limited to software source code, documentation
|
29 |
+
source, and configuration files.
|
30 |
+
|
31 |
+
"Object" form shall mean any form resulting from mechanical
|
32 |
+
transformation or translation of a Source form, including but
|
33 |
+
not limited to compiled object code, generated documentation,
|
34 |
+
and conversions to other media types.
|
35 |
+
|
36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
37 |
+
Object form, made available under the License, as indicated by a
|
38 |
+
copyright notice that is included in or attached to the work
|
39 |
+
(an example is provided in the Appendix below).
|
40 |
+
|
41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
42 |
+
form, that is based on (or derived from) the Work and for which the
|
43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
45 |
+
of this License, Derivative Works shall not include works that remain
|
46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
47 |
+
the Work and Derivative Works thereof.
|
48 |
+
|
49 |
+
"Contribution" shall mean any work of authorship, including
|
50 |
+
the original version of the Work and any modifications or additions
|
51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
55 |
+
means any form of electronic, verbal, or written communication sent
|
56 |
+
to the Licensor or its representatives, including but not limited to
|
57 |
+
communication on electronic mailing lists, source code control systems,
|
58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
60 |
+
excluding communication that is conspicuously marked or otherwise
|
61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
62 |
+
|
63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
65 |
+
subsequently incorporated within the Work.
|
66 |
+
|
67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
72 |
+
Work and such Derivative Works in Source or Object form.
|
73 |
+
|
74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
77 |
+
(except as stated in this section) patent license to make, have made,
|
78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
79 |
+
where such license applies only to those patent claims licensable
|
80 |
+
by such Contributor that are necessarily infringed by their
|
81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
83 |
+
institute patent litigation against any entity (including a
|
84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
85 |
+
or a Contribution incorporated within the Work constitutes direct
|
86 |
+
or contributory patent infringement, then any patent licenses
|
87 |
+
granted to You under this License for that Work shall terminate
|
88 |
+
as of the date such litigation is filed.
|
89 |
+
|
90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
91 |
+
Work or Derivative Works thereof in any medium, with or without
|
92 |
+
modifications, and in Source or Object form, provided that You
|
93 |
+
meet the following conditions:
|
94 |
+
|
95 |
+
(a) You must give any other recipients of the Work or
|
96 |
+
Derivative Works a copy of this License; and
|
97 |
+
|
98 |
+
(b) You must cause any modified files to carry prominent notices
|
99 |
+
stating that You changed the files; and
|
100 |
+
|
101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
102 |
+
that You distribute, all copyright, patent, trademark, and
|
103 |
+
attribution notices from the Source form of the Work,
|
104 |
+
excluding those notices that do not pertain to any part of
|
105 |
+
the Derivative Works; and
|
106 |
+
|
107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
108 |
+
distribution, then any Derivative Works that You distribute must
|
109 |
+
include a readable copy of the attribution notices contained
|
110 |
+
within such NOTICE file, excluding those notices that do not
|
111 |
+
pertain to any part of the Derivative Works, in at least one
|
112 |
+
of the following places: within a NOTICE text file distributed
|
113 |
+
as part of the Derivative Works; within the Source form or
|
114 |
+
documentation, if provided along with the Derivative Works; or,
|
115 |
+
within a display generated by the Derivative Works, if and
|
116 |
+
wherever such third-party notices normally appear. The contents
|
117 |
+
of the NOTICE file are for informational purposes only and
|
118 |
+
do not modify the License. You may add Your own attribution
|
119 |
+
notices within Derivative Works that You distribute, alongside
|
120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
121 |
+
that such additional attribution notices cannot be construed
|
122 |
+
as modifying the License.
|
123 |
+
|
124 |
+
You may add Your own copyright statement to Your modifications and
|
125 |
+
may provide additional or different license terms and conditions
|
126 |
+
for use, reproduction, or distribution of Your modifications, or
|
127 |
+
for any such Derivative Works as a whole, provided Your use,
|
128 |
+
reproduction, and distribution of the Work otherwise complies with
|
129 |
+
the conditions stated in this License.
|
130 |
+
|
131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
133 |
+
by You to the Licensor shall be under the terms and conditions of
|
134 |
+
this License, without any additional terms or conditions.
|
135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
136 |
+
the terms of any separate license agreement you may have executed
|
137 |
+
with Licensor regarding such Contributions.
|
138 |
+
|
139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
141 |
+
except as required for reasonable and customary use in describing the
|
142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
143 |
+
|
144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
145 |
+
agreed to in writing, Licensor provides the Work (and each
|
146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
148 |
+
implied, including, without limitation, any warranties or conditions
|
149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
151 |
+
appropriateness of using or redistributing the Work and assume any
|
152 |
+
risks associated with Your exercise of permissions under this License.
|
153 |
+
|
154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
155 |
+
whether in tort (including negligence), contract, or otherwise,
|
156 |
+
unless required by applicable law (such as deliberate and grossly
|
157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
158 |
+
liable to You for damages, including any direct, indirect, special,
|
159 |
+
incidental, or consequential damages of any character arising as a
|
160 |
+
result of this License or out of the use or inability to use the
|
161 |
+
Work (including but not limited to damages for loss of goodwill,
|
162 |
+
work stoppage, computer failure or malfunction, or any and all
|
163 |
+
other commercial damages or losses), even if such Contributor
|
164 |
+
has been advised of the possibility of such damages.
|
165 |
+
|
166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
169 |
+
or other liability obligations and/or rights consistent with this
|
170 |
+
License. However, in accepting such obligations, You may act only
|
171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
172 |
+
of any other Contributor, and only if You agree to indemnify,
|
173 |
+
defend, and hold each Contributor harmless for any liability
|
174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
175 |
+
of your accepting any such warranty or additional liability.
|
176 |
+
|
177 |
+
END OF TERMS AND CONDITIONS
|
178 |
+
|
179 |
+
APPENDIX: How to apply the Apache License to your work.
|
180 |
+
|
181 |
+
To apply the Apache License to your work, attach the following
|
182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
183 |
+
replaced with your own identifying information. (Don't include
|
184 |
+
the brackets!) The text should be enclosed in the appropriate
|
185 |
+
comment syntax for the file format. We also recommend that a
|
186 |
+
file or class name and description of purpose be included on the
|
187 |
+
same "printed page" as the copyright notice for easier
|
188 |
+
identification within third-party archives.
|
189 |
+
|
190 |
+
Copyright 2018 Intel Corporation
|
191 |
+
|
192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
193 |
+
you may not use this file except in compliance with the License.
|
194 |
+
You may obtain a copy of the License at
|
195 |
+
|
196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
197 |
+
|
198 |
+
Unless required by applicable law or agreed to in writing, software
|
199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
201 |
+
See the License for the specific language governing permissions and
|
202 |
+
limitations under the License.
|
visual-quality-inspection/transfer-learning/Legal.md
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Legal Information
|
2 |
+
|
3 |
+
## Disclaimer
|
4 |
+
|
5 |
+
Intel® Transfer Learning Tool scripts are not intended for benchmarking Intel® platforms. For any
|
6 |
+
performance and/or benchmarking information on specific Intel platforms, visit
|
7 |
+
https://www.intel.ai/blog.
|
8 |
+
|
9 |
+
Intel is committed to the respect of human rights and avoiding complicity in
|
10 |
+
human rights abuses, a policy reflected in the Intel Global Human Rights
|
11 |
+
Principles. Accordingly, by accessing the Intel material on this platform you
|
12 |
+
agree that you will not use the material in a product or application that causes
|
13 |
+
or contributes to a violation of an internationally recognized human right.
|
14 |
+
|
15 |
+
## License
|
16 |
+
|
17 |
+
Intel® Transfer Learning Tool, documentation, and example code are all licensed
|
18 |
+
under Apache License Version 2.0.
|
19 |
+
|
20 |
+
## Datasets
|
21 |
+
|
22 |
+
To the extent that any [public datasets](DATASETS.md) are referenced by Intel or accessed using
|
23 |
+
tools or code on this site those datasets are provided by the third party
|
24 |
+
indicated as the data source. Intel does not create the data, or datasets, and
|
25 |
+
does not warrant their accuracy or quality. By accessing the public dataset(s)
|
26 |
+
you agree to the terms associated with those datasets and that your use complies
|
27 |
+
with the applicable license.
|
28 |
+
|
29 |
+
Intel expressly disclaims the accuracy, adequacy, or completeness of any public
|
30 |
+
datasets, and is not liable for any errors, omissions, or defects in the data,
|
31 |
+
or for any reliance on the data. Intel is not liable for any liability or
|
32 |
+
damages relating to your use of public datasets.
|
33 |
+
|
34 |
+
\*Other names and brands may be claimed as the property of others. [Trademarks](http://www.intel.com/content/www/us/en/legal/trademarks.html)
|
visual-quality-inspection/transfer-learning/MANIFEST.in
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
recursive-include tlt *
|
visual-quality-inspection/transfer-learning/Makefile
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright (c) 2022 Intel Corporation
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
# SPDX-License-Identifier: Apache-2.0
|
17 |
+
#
|
18 |
+
|
19 |
+
# Note: These are just placeholders for future additions to Makefile.
|
20 |
+
# You can remove these comments later.
|
21 |
+
ACTIVATE_TLT_VENV = "tlt_dev_venv/bin/activate"
|
22 |
+
ACTIVATE_NOTEBOOK_VENV = "tlt_notebook_venv/bin/activate"
|
23 |
+
ACTIVATE_TEST_VENV = "tlt_test_venv/bin/activate"
|
24 |
+
ACTIVATE_DOCS_VENV = $(ACTIVATE_TEST_VENV)
|
25 |
+
|
26 |
+
# Customize sample test run commands
|
27 |
+
# PY_TEST_EXTRA_ARGS="'-vvv -k test_platform_util_with_no_args'" make test
|
28 |
+
# PY_TEST_EXTRA_ARGS="'--collect-only'" make test
|
29 |
+
PY_TEST_EXTRA_ARGS ?= "--durations=0"
|
30 |
+
|
31 |
+
tlt_test_venv: $(CURDIR)/tests/requirements-test.txt
|
32 |
+
@echo "Creating a virtualenv tlt_test_venv..."
|
33 |
+
@test -d tlt_test_venv || virtualenv -p python3 tlt_test_venv
|
34 |
+
|
35 |
+
@echo "Building the TLT API in tlt_test_venv env..."
|
36 |
+
@. $(ACTIVATE_TEST_VENV) && pip install --editable .
|
37 |
+
|
38 |
+
@echo "Installing test dependencies..."
|
39 |
+
@. $(ACTIVATE_TEST_VENV) && pip install -r $(CURDIR)/tests/requirements-test.txt
|
40 |
+
|
41 |
+
tlt_notebook_venv: $(CURDIR)/notebooks/requirements.txt
|
42 |
+
@echo "Creating a virtualenv tlt_notebook_venv..."
|
43 |
+
@test -d tlt_notebook_venv || virtualenv -p python3 tlt_notebook_venv
|
44 |
+
|
45 |
+
@echo "Installing TF & PYT notebook dependencies..."
|
46 |
+
@. $(ACTIVATE_NOTEBOOK_VENV) && pip install -r $(CURDIR)/notebooks/requirements.txt
|
47 |
+
|
48 |
+
test: unittest integration
|
49 |
+
|
50 |
+
unittest: tlt_test_venv
|
51 |
+
@echo "Testing unit test API..."
|
52 |
+
@. $(ACTIVATE_TEST_VENV) && PYTHONPATH=$(CURDIR)/tests py.test -vvv -s $(PY_TEST_EXTRA_ARGS) "-k not integration and not skip"
|
53 |
+
|
54 |
+
integration: tlt_test_venv
|
55 |
+
@echo "Testing integration test API..."
|
56 |
+
@. $(ACTIVATE_TEST_VENV) && PYTHONPATH=$(CURDIR)/tests py.test -vvv -s $(PY_TEST_EXTRA_ARGS) "-k integration and not skip"
|
57 |
+
|
58 |
+
lint: tlt_test_venv
|
59 |
+
@echo "Style checks..."
|
60 |
+
@. $(ACTIVATE_TEST_VENV) && flake8 tlt tests downloader
|
61 |
+
|
62 |
+
clean:
|
63 |
+
rm -rf tlt_test_venv
|
64 |
+
|
65 |
+
tlt_docs_venv: tlt_test_venv $(CURDIR)/docs/requirements-docs.txt
|
66 |
+
@echo "Installing docs dependencies..."
|
67 |
+
@. $(ACTIVATE_DOCS_VENV) && pip install -r $(CURDIR)/docs/requirements-docs.txt
|
68 |
+
|
69 |
+
html: tlt_docs_venv
|
70 |
+
@echo "Building Sphinx documentation..."
|
71 |
+
@. $(ACTIVATE_DOCS_VENV) && $(MAKE) -C docs clean html
|
72 |
+
|
73 |
+
test_docs: html
|
74 |
+
@echo "Testing Sphinx documentation..."
|
75 |
+
@. $(ACTIVATE_DOCS_VENV) && $(MAKE) -C docs doctest
|
76 |
+
|
77 |
+
tlt_notebook_venv: tlt_test_venv
|
78 |
+
@echo "Installing notebook dependencies..."
|
79 |
+
@. $(ACTIVATE_TEST_VENV) && pip install -r $(CURDIR)/notebooks/requirements.txt
|
80 |
+
|
81 |
+
test_notebook_custom: tlt_notebook_venv
|
82 |
+
@echo "Testing Jupyter notebooks with custom datasets..."
|
83 |
+
@. $(ACTIVATE_TEST_VENV) && \
|
84 |
+
bash run_notebooks.sh $(CURDIR)/notebooks/image_classification/tlt_api_tf_image_classification/TLT_TF_Image_Classification_Transfer_Learning.ipynb remove_for_custom_dataset && \
|
85 |
+
bash run_notebooks.sh $(CURDIR)/notebooks/image_classification/tlt_api_pyt_image_classification/TLT_PyTorch_Image_Classification_Transfer_Learning.ipynb remove_for_custom_dataset && \
|
86 |
+
bash run_notebooks.sh $(CURDIR)/notebooks/text_classification/tlt_api_tf_text_classification/TLT_TF_Text_Classification.ipynb remove_for_custom_dataset && \
|
87 |
+
bash run_notebooks.sh $(CURDIR)/notebooks/text_classification/tlt_api_pyt_text_classification/TLT_PYT_Text_Classification.ipynb remove_for_custom_dataset
|
88 |
+
|
89 |
+
test_notebook_catalog: tlt_notebook_venv
|
90 |
+
@echo "Testing Jupyter notebooks with public catalog datasets..."
|
91 |
+
@. $(ACTIVATE_TEST_VENV) && \
|
92 |
+
bash run_notebooks.sh $(CURDIR)/notebooks/image_classification/tlt_api_tf_image_classification/TLT_TF_Image_Classification_Transfer_Learning.ipynb remove_for_tf_dataset && \
|
93 |
+
bash run_notebooks.sh $(CURDIR)/notebooks/image_classification/tlt_api_pyt_image_classification/TLT_PyTorch_Image_Classification_Transfer_Learning.ipynb remove_for_tv_dataset && \
|
94 |
+
bash run_notebooks.sh $(CURDIR)/notebooks/text_classification/tlt_api_tf_text_classification/TLT_TF_Text_Classification.ipynb remove_for_tf_dataset && \
|
95 |
+
bash run_notebooks.sh $(CURDIR)/notebooks/text_classification/tlt_api_pyt_text_classification/TLT_PYT_Text_Classification.ipynb remove_for_hf_dataset
|
96 |
+
|
97 |
+
test_tf_notebook: tlt_notebook_venv
|
98 |
+
@. $(ACTIVATE_TEST_VENV) && bash run_notebooks.sh tensorflow
|
99 |
+
|
100 |
+
test_pyt_notebook: tlt_notebook_venv
|
101 |
+
@. $(ACTIVATE_TEST_VENV) && bash run_notebooks.sh pytorch
|
102 |
+
|
103 |
+
dist: tlt_docs_venv
|
104 |
+
@echo "Create binary wheel..."
|
105 |
+
@. $(ACTIVATE_DOCS_VENV) && python setup.py bdist_wheel
|
106 |
+
|
107 |
+
check_dist: dist
|
108 |
+
@echo "Testing the wheel..."
|
109 |
+
@. $(ACTIVATE_DOCS_VENV) && \
|
110 |
+
pip install twine && \
|
111 |
+
python setup.py bdist_wheel && \
|
112 |
+
twine check dist/*
|
visual-quality-inspection/transfer-learning/Models.md
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Intel® Transfer Learning Tool Supported Models
|
2 |
+
|
3 |
+
## Image Classification
|
4 |
+
|
5 |
+
| Model name | Framework | Model Hub |
|
6 |
+
|------------|-----------|-----------|
|
7 |
+
| alexnet | PyTorch* | Torchvision* |
|
8 |
+
| convnext_base | PyTorch | Torchvision |
|
9 |
+
| convnext_large | PyTorch | Torchvision |
|
10 |
+
| convnext_small | PyTorch | Torchvision |
|
11 |
+
| convnext_tiny | PyTorch | Torchvision |
|
12 |
+
| densenet121 | PyTorch | Torchvision |
|
13 |
+
| densenet161 | PyTorch | Torchvision |
|
14 |
+
| densenet169 | PyTorch | Torchvision |
|
15 |
+
| densenet201 | PyTorch | Torchvision |
|
16 |
+
| efficientnetv2-b0 | TensorFlow* | TensorFlow Hub* |
|
17 |
+
| efficientnetv2-b1 | TensorFlow | TensorFlow Hub |
|
18 |
+
| efficientnetv2-b2 | TensorFlow | TensorFlow Hub |
|
19 |
+
| efficientnetv2-b3 | TensorFlow | TensorFlow Hub |
|
20 |
+
| efficientnetv2-s | TensorFlow | TensorFlow Hub |
|
21 |
+
| efficientnet_b0 | TensorFlow | TensorFlow Hub |
|
22 |
+
| efficientnet_b0 | PyTorch | Torchvision |
|
23 |
+
| efficientnet_b1 | TensorFlow | TensorFlow Hub |
|
24 |
+
| efficientnet_b1 | PyTorch | Torchvision |
|
25 |
+
| efficientnet_b2 | TensorFlow | TensorFlow Hub |
|
26 |
+
| efficientnet_b2 | PyTorch | Torchvision |
|
27 |
+
| efficientnet_b3 | TensorFlow | TensorFlow Hub |
|
28 |
+
| efficientnet_b3 | PyTorch | Torchvision |
|
29 |
+
| efficientnet_b4 | TensorFlow | TensorFlow Hub |
|
30 |
+
| efficientnet_b4 | PyTorch | Torchvision |
|
31 |
+
| efficientnet_b5 | TensorFlow | TensorFlow Hub |
|
32 |
+
| efficientnet_b5 | PyTorch | Torchvision |
|
33 |
+
| efficientnet_b6 | TensorFlow | TensorFlow Hub |
|
34 |
+
| efficientnet_b6 | PyTorch | Torchvision |
|
35 |
+
| efficientnet_b7 | TensorFlow | TensorFlow Hub |
|
36 |
+
| efficientnet_b7 | PyTorch | Torchvision |
|
37 |
+
| googlenet | PyTorch | Torchvision |
|
38 |
+
| inception_v3 | TensorFlow | TensorFlow Hub |
|
39 |
+
| mnasnet0_5 | PyTorch | Torchvision |
|
40 |
+
| mnasnet1_0 | PyTorch | Torchvision |
|
41 |
+
| mobilenet_v2 | PyTorch | Torchvision |
|
42 |
+
| mobilenet_v2_100_224 | TensorFlow | TensorFlow Hub |
|
43 |
+
| mobilenet_v3_large | PyTorch | Torchvision |
|
44 |
+
| mobilenet_v3_small | PyTorch | Torchvision |
|
45 |
+
| nasnet_large | TensorFlow | TensorFlow Hub |
|
46 |
+
| proxyless_cpu | PyTorch | PyTorch Hub* |
|
47 |
+
| regnet_x_16gf | PyTorch | Torchvision |
|
48 |
+
| regnet_x_1_6gf | PyTorch | Torchvision |
|
49 |
+
| regnet_x_32gf | PyTorch | Torchvision |
|
50 |
+
| regnet_x_3_2gf | PyTorch | Torchvision |
|
51 |
+
| regnet_x_400mf | PyTorch | Torchvision |
|
52 |
+
| regnet_x_800mf | PyTorch | Torchvision |
|
53 |
+
| regnet_x_8gf | PyTorch | Torchvision |
|
54 |
+
| regnet_y_16gf | PyTorch | Torchvision |
|
55 |
+
| regnet_y_1_6gf | PyTorch | Torchvision |
|
56 |
+
| regnet_y_32gf | PyTorch | Torchvision |
|
57 |
+
| regnet_y_3_2gf | PyTorch | Torchvision |
|
58 |
+
| regnet_y_400mf | PyTorch | Torchvision |
|
59 |
+
| regnet_y_800mf | PyTorch | Torchvision |
|
60 |
+
| regnet_y_8gf | PyTorch | Torchvision |
|
61 |
+
| resnet101 | PyTorch | Torchvision |
|
62 |
+
| resnet152 | PyTorch | Torchvision |
|
63 |
+
| resnet18 | PyTorch | Torchvision |
|
64 |
+
| resnet18_ssl | PyTorch | PyTorch Hub |
|
65 |
+
| resnet18_swsl | PyTorch | PyTorch Hub |
|
66 |
+
| resnet34 | PyTorch | Torchvision |
|
67 |
+
| resnet50 | PyTorch | Torchvision |
|
68 |
+
| resnet50_ssl | PyTorch | PyTorch Hub |
|
69 |
+
| resnet50_swsl | PyTorch | PyTorch Hub |
|
70 |
+
| resnet_v1_50 | TensorFlow | TensorFlow Hub |
|
71 |
+
| resnet_v2_101 | TensorFlow | TensorFlow Hub |
|
72 |
+
| resnet_v2_50 | TensorFlow | TensorFlow Hub |
|
73 |
+
| resnext101_32x16d_ssl | PyTorch | PyTorch Hub |
|
74 |
+
| resnext101_32x16d_swsl | PyTorch | PyTorch Hub |
|
75 |
+
| resnext101_32x16d_wsl | PyTorch | PyTorch Hub |
|
76 |
+
| resnext101_32x32d_wsl | PyTorch | PyTorch Hub |
|
77 |
+
| resnext101_32x48d_wsl | PyTorch | PyTorch Hub |
|
78 |
+
| resnext101_32x4d_ssl | PyTorch | PyTorch Hub |
|
79 |
+
| resnext101_32x4d_swsl | PyTorch | PyTorch Hub |
|
80 |
+
| resnext101_32x8d | PyTorch | Torchvision |
|
81 |
+
| resnext101_32x8d_ssl | PyTorch | PyTorch Hub |
|
82 |
+
| resnext101_32x8d_swsl | PyTorch | PyTorch Hub |
|
83 |
+
| resnext101_32x8d_wsl | PyTorch | PyTorch Hub |
|
84 |
+
| resnext50_32x4d | PyTorch | Torchvision |
|
85 |
+
| resnext50_32x4d_ssl | PyTorch | PyTorch Hub |
|
86 |
+
| resnext50_32x4d_swsl | PyTorch | PyTorch Hub |
|
87 |
+
| shufflenet_v2_x0_5 | PyTorch | Torchvision |
|
88 |
+
| shufflenet_v2_x1_0 | PyTorch | Torchvision |
|
89 |
+
| vgg11 | PyTorch | Torchvision |
|
90 |
+
| vgg11_bn | PyTorch | Torchvision |
|
91 |
+
| vgg13 | PyTorch | Torchvision |
|
92 |
+
| vgg13_bn | PyTorch | Torchvision |
|
93 |
+
| vgg16 | PyTorch | Torchvision |
|
94 |
+
| vgg16_bn | PyTorch | Torchvision |
|
95 |
+
| vgg19 | PyTorch | Torchvision |
|
96 |
+
| vgg19_bn | PyTorch | Torchvision |
|
97 |
+
| vit_b_16 | PyTorch | Torchvision |
|
98 |
+
| vit_b_32 | PyTorch | Torchvision |
|
99 |
+
| vit_l_16 | PyTorch | Torchvision |
|
100 |
+
| vit_l_32 | PyTorch | Torchvision |
|
101 |
+
| wide_resnet101_2 | PyTorch | Torchvision |
|
102 |
+
| wide_resnet50_2 | PyTorch | Torchvision |
|
103 |
+
| ConvNeXtBase | TensorFlow | Keras* |
|
104 |
+
| ConvNeXtLarge | TensorFlow | Keras |
|
105 |
+
| ConvNeXtSmall | TensorFlow | Keras |
|
106 |
+
| ConvNeXtTiny | TensorFlow | Keras |
|
107 |
+
| ConvNeXtXLarge | TensorFlow | Keras |
|
108 |
+
| DenseNet121 | TensorFlow | Keras |
|
109 |
+
| DenseNet169 | TensorFlow | Keras |
|
110 |
+
| DenseNet201 | TensorFlow | Keras |
|
111 |
+
| EfficientNetV2B0 | TensorFlow | Keras |
|
112 |
+
| EfficientNetV2B1 | TensorFlow | Keras |
|
113 |
+
| EfficientNetV2B2 | TensorFlow | Keras |
|
114 |
+
| EfficientNetV2B3 | TensorFlow | Keras |
|
115 |
+
| EfficientNetV2L | TensorFlow | Keras |
|
116 |
+
| EfficientNetV2M | TensorFlow | Keras |
|
117 |
+
| EfficientNetV2S | TensorFlow | Keras |
|
118 |
+
| InceptionResNetV2 | TensorFlow | Keras |
|
119 |
+
| InceptionV3 | TensorFlow | Keras |
|
120 |
+
| MobileNet | TensorFlow | Keras |
|
121 |
+
| MobileNetV2 | TensorFlow | Keras |
|
122 |
+
| NASNetLarge | TensorFlow | Keras |
|
123 |
+
| NASNetMobile | TensorFlow | Keras |
|
124 |
+
| ResNet101 | TensorFlow | Keras |
|
125 |
+
| ResNet101V2 | TensorFlow | Keras |
|
126 |
+
| ResNet152 | TensorFlow | Keras |
|
127 |
+
| ResNet152V2 | TensorFlow | Keras |
|
128 |
+
| ResNet50 | TensorFlow | Keras |
|
129 |
+
| ResNet50V2 | TensorFlow | Keras |
|
130 |
+
| VGG16 | TensorFlow | Keras |
|
131 |
+
| VGG19 | TensorFlow | Keras |
|
132 |
+
| Xception | TensorFlow | Keras |
|
133 |
+
|
134 |
+
## Text Classification
|
135 |
+
|
136 |
+
| Model name | Framework | Model Hub |
|
137 |
+
|------------|-----------|-----------|
|
138 |
+
| bert-base-cased | PyTorch | Hugging Face* |
|
139 |
+
| bert-base-uncased | TensorFlow | Hugging Face |
|
140 |
+
| bert-large-uncased | TensorFlow | Hugging Face |
|
141 |
+
| bert-large-uncased | PyTorch | Hugging Face |
|
142 |
+
| clinical-bert | PyTorch | Hugging Face |
|
143 |
+
| distilbert-base-uncased | PyTorch | Hugging Face |
|
144 |
+
| google/bert_uncased_L-10_H-128_A-2 | TensorFlow | Hugging Face |
|
145 |
+
| google/bert_uncased_L-10_H-256_A-4 | TensorFlow | Hugging Face |
|
146 |
+
| google/bert_uncased_L-10_H-512_A-8 | TensorFlow | Hugging Face |
|
147 |
+
| google/bert_uncased_L-10_H-768_A-12 | TensorFlow | Hugging Face |
|
148 |
+
| google/bert_uncased_L-12_H-128_A-2 | TensorFlow | Hugging Face |
|
149 |
+
| google/bert_uncased_L-12_H-256_A-4 | TensorFlow | Hugging Face |
|
150 |
+
| google/bert_uncased_L-12_H-512_A-8 | TensorFlow | Hugging Face |
|
151 |
+
| google/bert_uncased_L-12_H-768_A-12 | TensorFlow | Hugging Face |
|
152 |
+
| google/bert_uncased_L-2_H-128_A-2 | TensorFlow | Hugging Face |
|
153 |
+
| google/bert_uncased_L-2_H-256_A-4 | TensorFlow | Hugging Face |
|
154 |
+
| google/bert_uncased_L-2_H-512_A-8 | TensorFlow | Hugging Face |
|
155 |
+
| google/bert_uncased_L-2_H-768_A-12 | TensorFlow | Hugging Face |
|
156 |
+
| google/bert_uncased_L-4_H-128_A-2 | TensorFlow | Hugging Face |
|
157 |
+
| google/bert_uncased_L-4_H-256_A-4 | TensorFlow | Hugging Face |
|
158 |
+
| google/bert_uncased_L-4_H-512_A-8 | TensorFlow | Hugging Face |
|
159 |
+
| google/bert_uncased_L-4_H-768_A-12 | TensorFlow | Hugging Face |
|
160 |
+
| google/bert_uncased_L-6_H-128_A-2 | TensorFlow | Hugging Face |
|
161 |
+
| google/bert_uncased_L-6_H-256_A-4 | TensorFlow | Hugging Face |
|
162 |
+
| google/bert_uncased_L-6_H-512_A-8 | TensorFlow | Hugging Face |
|
163 |
+
| google/bert_uncased_L-6_H-768_A-12 | TensorFlow | Hugging Face |
|
164 |
+
| google/bert_uncased_L-8_H-128_A-2 | TensorFlow | Hugging Face |
|
165 |
+
| google/bert_uncased_L-8_H-256_A-4 | TensorFlow | Hugging Face |
|
166 |
+
| google/bert_uncased_L-8_H-512_A-8 | TensorFlow | Hugging Face |
|
167 |
+
| google/bert_uncased_L-8_H-768_A-12 | TensorFlow | Hugging Face |
|
168 |
+
|
169 |
+
## Image Anomaly Detection
|
170 |
+
|
171 |
+
| Model name | Framework | Model Hub |
|
172 |
+
|------------|-----------|-----------|
|
173 |
+
| resnet101 | PyTorch | Torchvision |
|
174 |
+
| resnet152 | PyTorch | Torchvision |
|
175 |
+
| resnet18 | PyTorch | Torchvision |
|
176 |
+
| resnet34 | PyTorch | Torchvision |
|
177 |
+
| resnet50 | PyTorch | Torchvision |
|
178 |
+
|
visual-quality-inspection/transfer-learning/README.md
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*Note: You may find it easier to read about Intel Transfer Learning tool, follow the Get
|
2 |
+
Started guide, and browse the API material from our published documentation site
|
3 |
+
https://intelai.github.io/transfer-learning.*
|
4 |
+
|
5 |
+
<!-- SkipBadges -->
|
6 |
+
|
7 |
+
# Intel® Transfer Learning Tool
|
8 |
+
|
9 |
+
Transfer learning workflows use the knowledge learned by a pre-trained model on
|
10 |
+
a large dataset to improve the performance of a related problem with a smaller
|
11 |
+
dataset.
|
12 |
+
|
13 |
+
## What is Intel® Transfer Learning Tool
|
14 |
+
|
15 |
+
Intel® Transfer Learning Tool makes it easier and faster for you to
|
16 |
+
create transfer learning workflows across a variety of AI use cases. Its
|
17 |
+
open-source Python\* library leverages public pretrained model hubs,
|
18 |
+
Intel-optimized deep learning frameworks, and your custom dataset to efficiently
|
19 |
+
generate new models optimized for Intel hardware.
|
20 |
+
|
21 |
+
This project documentation provides information, resource links, and instructions for the Intel
|
22 |
+
Transfer Learning Tool as well as Jupyter\* notebooks and examples that
|
23 |
+
demonstrate its usage.
|
24 |
+
|
25 |
+
**Features:**
|
26 |
+
* Supports PyTorch\* and TensorFlow\*
|
27 |
+
* Select from over [100 image classification and text classification models](Models.md) from
|
28 |
+
Torchvision, PyTorch Hub, TensorFlow Hub, Keras, and Hugging Face
|
29 |
+
* Use your own custom dataset or get started quickly with built-in datasets
|
30 |
+
* Automatically create a trainable classification layer customized for your dataset
|
31 |
+
* Pre-process your dataset using scaling, cropping, batching, and splitting
|
32 |
+
* Use APIs for prediction, evaluation, and benchmarking
|
33 |
+
* Export your model for deployment or resume training from checkpoints
|
34 |
+
|
35 |
+
**Intel Optimizations:**
|
36 |
+
* Boost performance with Intel® Optimization for TensorFlow and Intel® Extension for PyTorch
|
37 |
+
* Quantize to INT8 to reduce model size and speed up inference using Intel® Neural Compressor
|
38 |
+
* Optimize model for FP32 inference using Intel Neural Compressor
|
39 |
+
* Reduce training time with auto-mixed precision for select hardware platforms
|
40 |
+
* Further reduce training time with multinode training for PyTorch
|
41 |
+
|
42 |
+
## How the Intel Transfer Learning Tool Works
|
43 |
+
|
44 |
+
The Intel Transfer Learning Tool lets you train AI models with TensorFlow or
|
45 |
+
PyTorch using either no-code command line interface (CLI) commands at a bash
|
46 |
+
prompt, or low-code application programming interface (API) calls from a Python
|
47 |
+
script.
|
48 |
+
|
49 |
+
Use your own dataset or select an existing image or text classification dataset listed in the
|
50 |
+
[public datasets](DATASETS.md) documentation. Construct your own CLI or API commands for training, evaluation,
|
51 |
+
and optimization using the TensorFlow or PyTorch framework, and finally export
|
52 |
+
your saved model optimized for inference on Intel CPUs.
|
53 |
+
|
54 |
+
An overview of the Intel Transfer Learning Tool flow is shown in this
|
55 |
+
figure:
|
56 |
+
|
57 |
+
<p align="center"><b>Intel Transfer Learning Tool Flow</b></p>
|
58 |
+
|
59 |
+
<img alt="Intel Transfer Learning Tool Flow" title="Intel Transfer Learing Tool Flow" src="images/TLT-tool_flow.svg" width="600">
|
60 |
+
|
61 |
+
## Get Started
|
62 |
+
|
63 |
+
The [Get Started](GetStarted.md) guide walks you through the steps to check
|
64 |
+
system requirements, install, and then run the tool with a couple of examples
|
65 |
+
showing no-code CLI and low-code API approaches. After that, you can check out
|
66 |
+
these additional CLI and API [Examples](examples/README.md).
|
67 |
+
|
68 |
+
<!-- ExpandGetStarted-Start -->
|
69 |
+
As described in the [Get Started](GetStarted.md) guide, once you have a Python
|
70 |
+
3.9 environment set up, you do a basic install of the Intel Transfer Learning
|
71 |
+
Tool using:
|
72 |
+
|
73 |
+
```
|
74 |
+
pip install intel-transfer-learning-tool
|
75 |
+
```
|
76 |
+
|
77 |
+
Then you can use the Transfer Learning Tool CLI interface (tlt) to train a
|
78 |
+
TensorFlow image classification model (resnet_v1_50), download and use an
|
79 |
+
existing built-in dataset (tf_flowers), and save the trained model to
|
80 |
+
`/tmp/output` using this one command:
|
81 |
+
|
82 |
+
```
|
83 |
+
tlt train --framework tensorflow --model-name resnet_v1_50 --dataset-name tf_flowers \
|
84 |
+
--output-dir /tmp/output --dataset-dir /tmp/data
|
85 |
+
```
|
86 |
+
|
87 |
+
Use `tlt --help` to see the list of CLI commands. More detailed help for each
|
88 |
+
command can be found using, for example, `tlt train --help`.
|
89 |
+
|
90 |
+
<!-- ExpandGetStarted-End -->
|
91 |
+
|
92 |
+
## Support
|
93 |
+
|
94 |
+
The Intel Transfer Learning Tool team tracks bugs and enhancement requests using
|
95 |
+
[GitHub issues](https://github.com/IntelAI/transfer-learning-tool/issues). Before submitting a
|
96 |
+
suggestion or bug report, search the existing GitHub issues to see if your issue has already been reported.
|
97 |
+
|
98 |
+
See [Legal Information](Legal.md) for Disclaimers, Trademark, and Licensing information.
|
visual-quality-inspection/transfer-learning/SECURITY.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Security Policy
|
2 |
+
|
3 |
+
## Report a Vulnerability
|
4 |
+
|
5 |
+
Please report security issues or vulnerabilities to the [Intel® Security Center].
|
6 |
+
|
7 |
+
For more information on how Intel® works to resolve security issues, see
|
8 |
+
[Vulnerability Handling Guidelines].
|
9 |
+
|
10 |
+
[Intel® Security Center]:https://www.intel.com/content/www/us/en/security-center/default.html
|
11 |
+
|
12 |
+
[Vulnerability Handling Guidelines]:https://www.intel.com/content/www/us/en/security-center/vulnerability-handling-guidelines.html
|
visual-quality-inspection/transfer-learning/_config.yml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
theme: jekyll-theme-minimal
|
visual-quality-inspection/transfer-learning/api.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# API Reference
|
2 |
+
|
3 |
+
Low-code Python\* API documentation is automatically generated from the code and
|
4 |
+
appears in the Transfer Learning Tool documentation website's [API](https://intelai.github.io/transfer-learning/main/api.html) page.
|
visual-quality-inspection/transfer-learning/bandit.yaml
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# FILE: bandit.yaml
|
2 |
+
exclude_dirs: [ '.venv', '.tox', 'tlt_test_venv', 'tlt_notebook_venv', 'tests' ]
|
3 |
+
skips: [ 'B301', 'B311', 'B403', 'B404' ]
|
4 |
+
# B301 - dill usage scoope is different from what's described in CWE-502
|
5 |
+
# dill is mostly used used for dumping/saving models to disk(serialization)
|
6 |
+
# When loading previously saved models from disk(descerialization),
|
7 |
+
# either Keras model loader or PyTorch loader used first to verify the model,
|
8 |
+
# and then create a copy to be passed to dill for loading.
|
9 |
+
# B311 - random usage scope is different from what's described in CWE-330
|
10 |
+
# B403 - this one is reported everytime 'dill' is imported, so it's actually covered by B301 justification
|
11 |
+
# B404 - this one is reported everytime 'subprocess' is imported but this modules is not used as described in CWE-78
|
visual-quality-inspection/transfer-learning/cli.md
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# CLI Reference
|
2 |
+
|
3 |
+
No-code bash CLI documentation is automatically generated from the code and
|
4 |
+
appears in the Transfer Learning Tool documentation website's [CLI](https://intelai.github.io/transfer-learning/main/cli.html) page.
|
visual-quality-inspection/transfer-learning/docker/Dockerfile
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright (c) 2023 Intel Corporation
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
# SPDX-License-Identifier: Apache-2.0
|
17 |
+
|
18 |
+
ARG IMAGE_NAME=ubuntu
|
19 |
+
ARG IMAGE_TAG=22.04
|
20 |
+
FROM ${IMAGE_NAME}:${IMAGE_TAG} as base
|
21 |
+
|
22 |
+
# TLT base target
|
23 |
+
FROM base as tlt-base
|
24 |
+
|
25 |
+
ARG PYTHON=python3
|
26 |
+
|
27 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
28 |
+
|
29 |
+
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
|
30 |
+
libgl1 \
|
31 |
+
libglib2.0-0 \
|
32 |
+
${PYTHON} \
|
33 |
+
python3-pip && \
|
34 |
+
apt-get clean autoclean && \
|
35 |
+
apt-get autoremove -y && \
|
36 |
+
rm -rf /var/lib/apt/lists/*
|
37 |
+
|
38 |
+
RUN ln -sf "$(which ${PYTHON})" /usr/bin/python
|
39 |
+
|
40 |
+
# TLT target for GitHub actions
|
41 |
+
FROM tlt-base as tlt-ci
|
42 |
+
|
43 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
44 |
+
|
45 |
+
ENV LANG C.UTF-8
|
46 |
+
ARG PYTHON=python3
|
47 |
+
|
48 |
+
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
|
49 |
+
${PYTHON}-dev \
|
50 |
+
${PYTHON}-distutils \
|
51 |
+
build-essential \
|
52 |
+
ca-certificates \
|
53 |
+
make \
|
54 |
+
pandoc && \
|
55 |
+
apt-get clean autoclean && \
|
56 |
+
apt-get autoremove -y && \
|
57 |
+
rm -rf /var/lib/apt/lists/*
|
58 |
+
|
59 |
+
RUN ${PYTHON} -m pip install virtualenv
|
60 |
+
|
61 |
+
# TLT target for development
|
62 |
+
FROM tlt-ci as tlt-devel
|
63 |
+
|
64 |
+
COPY . /tmp/intel-transfer-learning
|
65 |
+
|
66 |
+
WORKDIR /tmp/intel-transfer-learning
|
67 |
+
|
68 |
+
RUN ${PYTHON} setup.py bdist_wheel && \
|
69 |
+
pip install --no-cache-dir -f https://download.pytorch.org/whl/cpu/torch_stable.html dist/*.whl
|
70 |
+
|
71 |
+
# TLT target for deployment
|
72 |
+
FROM tlt-base as tlt-prod
|
73 |
+
|
74 |
+
COPY --from=tlt-devel /usr/local/lib/python3.10/dist-packages /usr/local/lib/python3.10/dist-packages
|
75 |
+
COPY --from=tlt-devel /usr/local/bin /usr/local/bin
|
76 |
+
|
77 |
+
ENV DATASET_DIR=/tmp/data
|
78 |
+
ENV OUTPUT_DIR=/tmp/output
|
79 |
+
|
80 |
+
# TLT target for running with MPI
|
81 |
+
FROM tlt-prod as tlt-mpi
|
82 |
+
|
83 |
+
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
|
84 |
+
libopenmpi-dev \
|
85 |
+
openmpi-bin \
|
86 |
+
openmpi-common \
|
87 |
+
openssh-client \
|
88 |
+
openssh-server && \
|
89 |
+
apt-get clean autoclean && \
|
90 |
+
apt-get autoremove -y && \
|
91 |
+
rm -rf /var/lib/apt/lists/*
|
92 |
+
|
93 |
+
# Create a wrapper for OpenMPI to allow running as root by default
|
94 |
+
RUN mv /usr/bin/mpirun /usr/bin/mpirun.real && \
|
95 |
+
echo '#!/bin/bash' > /usr/bin/mpirun && \
|
96 |
+
echo 'mpirun.real --allow-run-as-root "$@"' >> /usr/bin/mpirun && \
|
97 |
+
chmod a+x /usr/bin/mpirun
|
98 |
+
|
99 |
+
# Configure OpenMPI to run good defaults:
|
100 |
+
RUN echo "btl_tcp_if_exclude = lo,docker0" >> /etc/openmpi/openmpi-mca-params.conf
|
101 |
+
|
102 |
+
# Install OpenSSH for MPI to communicate between containers and allow OpenSSH to
|
103 |
+
# talk to containers without asking for confirmation
|
104 |
+
RUN mkdir -p /var/run/sshd && \
|
105 |
+
cat /etc/ssh/ssh_config | grep -v StrictHostKeyChecking > /etc/ssh/ssh_config.new && \
|
106 |
+
echo " StrictHostKeyChecking no" >> /etc/ssh/ssh_config.new && \
|
107 |
+
mv /etc/ssh/ssh_config.new /etc/ssh/ssh_config
|
108 |
+
|
109 |
+
# TLT target for with MPI, Horovod and all development tools
|
110 |
+
FROM tlt-mpi as tlt-dist-devel
|
111 |
+
|
112 |
+
ARG HOROVOD_WITH_PYTORCH=1
|
113 |
+
ARG HOROVOD_WITHOUT_MXNET=1
|
114 |
+
ARG HOROVOD_WITH_TENSORFLOW=1
|
115 |
+
ARG HOROVOD_VERSION
|
116 |
+
|
117 |
+
ARG PYTHON=python3
|
118 |
+
|
119 |
+
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
|
120 |
+
build-essential \
|
121 |
+
cmake \
|
122 |
+
g++ \
|
123 |
+
gcc \
|
124 |
+
git \
|
125 |
+
${PYTHON}-dev && \
|
126 |
+
apt-get clean && \
|
127 |
+
rm -rf /var/lib/apt/lists/*
|
128 |
+
|
129 |
+
RUN python -m pip install --no-cache-dir horovod==${HOROVOD_VERSION}
|
130 |
+
|
131 |
+
ARG ONECCL_VERSION
|
132 |
+
ARG ONECCL_URL=https://developer.intel.com/ipex-whl-stable-cpu
|
133 |
+
|
134 |
+
RUN python -m pip install --no-cache-dir oneccl_bind_pt==${ONECCL_VERSION} -f ${ONECCL_URL}
|
135 |
+
|
136 |
+
COPY . /tmp/intel-transfer-learning
|
137 |
+
|
138 |
+
WORKDIR /tmp/intel-transfer-learning
|
139 |
+
|
140 |
+
FROM tlt-mpi as tlt-dist-prod
|
141 |
+
|
142 |
+
COPY --from=tlt-dist-devel /usr/local/lib/${PYTHON}/dist-packages /usr/local/lib/python3.10/dist-packages
|
143 |
+
COPY --from=tlt-dist-devel /usr/local/bin /usr/local/bin
|
visual-quality-inspection/transfer-learning/docker/README.md
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Docker
|
2 |
+
Follow these instructions to set up and run our provided Docker image.
|
3 |
+
|
4 |
+
## Set Up Docker Engine and Docker Compose
|
5 |
+
You'll need to install Docker Engine on your development system. Note that while **Docker Engine** is free to use, **Docker Desktop** may require you to purchase a license. See the [Docker Engine Server installation instructions](https://docs.docker.com/engine/install/#server) for details.
|
6 |
+
|
7 |
+
To build and run this workload inside a Docker Container, ensure you have Docker Compose installed on your machine. If you don't have this tool installed, consult the official [Docker Compose installation documentation](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually).
|
8 |
+
|
9 |
+
```bash
|
10 |
+
DOCKER_CONFIG=${DOCKER_CONFIG:-$HOME/.docker}
|
11 |
+
mkdir -p $DOCKER_CONFIG/cli-plugins
|
12 |
+
curl -SL https://github.com/docker/compose/releases/download/v2.7.0/docker-compose-linux-x86_64 -o $DOCKER_CONFIG/cli-plugins/docker-compose
|
13 |
+
chmod +x $DOCKER_CONFIG/cli-plugins/docker-compose
|
14 |
+
docker compose version
|
15 |
+
```
|
16 |
+
|
17 |
+
## Set Up Docker Image
|
18 |
+
Build or Pull the provided docker images.
|
19 |
+
|
20 |
+
```bash
|
21 |
+
cd docker
|
22 |
+
docker compose build
|
23 |
+
```
|
24 |
+
OR
|
25 |
+
```bash
|
26 |
+
docker pull intel/ai-tools:tlt-0.5.0
|
27 |
+
docker pull intel/ai-tools:tlt-devel-0.5.0
|
28 |
+
docker pull intel/ai-tools:tlt-dist-0.5.0
|
29 |
+
docker pull intel/ai-tools:tlt-dist-devel-0.5.0
|
30 |
+
```
|
31 |
+
|
32 |
+
## Use Docker Image
|
33 |
+
Utilize the TLT CLI without installation by using the provided docker image and docker compose.
|
34 |
+
|
35 |
+
```bash
|
36 |
+
docker compose run tlt-prod
|
37 |
+
# OR
|
38 |
+
docker compose run tlt-prod tlt --help
|
39 |
+
```
|
40 |
+
|
41 |
+
## Kubernetes
|
42 |
+
### 1. Install Helm
|
43 |
+
- Install [Helm](https://helm.sh/docs/intro/install/)
|
44 |
+
```bash
|
45 |
+
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 && \
|
46 |
+
chmod 700 get_helm.sh && \
|
47 |
+
./get_helm.sh
|
48 |
+
```
|
49 |
+
### 2. Setting up Training Operator
|
50 |
+
Install the standalone operator from GitHub or use a pre-existing Kubeflow configuration.
|
51 |
+
```bash
|
52 |
+
kubectl apply -k "github.com/kubeflow/training-operator/manifests/overlays/standalone"
|
53 |
+
```
|
54 |
+
OR
|
55 |
+
```bash
|
56 |
+
helm repo add cowboysysop https://cowboysysop.github.io/charts/
|
57 |
+
helm install <release name> cowboysysop/training-operator
|
58 |
+
```
|
59 |
+
### 3. Deploy TLT Distributed Job
|
60 |
+
For more customization information, see the chart [README](./docker/chart/README.md)
|
61 |
+
```bash
|
62 |
+
export NAMESPACE=kubeflow
|
63 |
+
helm install --namespace ${NAMESPACE} --set ... tlt-distributed ./docker/chart
|
64 |
+
```
|
65 |
+
### 4. View
|
66 |
+
To view your workflow progress
|
67 |
+
```bash
|
68 |
+
kubectl get -o yaml mpijob tf-tlt-distributed -n ${NAMESPACE}
|
69 |
+
```
|
70 |
+
OR
|
71 |
+
```bash
|
72 |
+
kubectl logs tf-tlt-distributed-launcher -n ${NAMESPACE}
|
73 |
+
```
|
visual-quality-inspection/transfer-learning/docker/chart/.helmignore
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Patterns to ignore when building packages.
|
2 |
+
# This supports shell glob matching, relative path matching, and
|
3 |
+
# negation (prefixed with !). Only one pattern per line.
|
4 |
+
.DS_Store
|
5 |
+
# Common VCS dirs
|
6 |
+
.git/
|
7 |
+
.gitignore
|
8 |
+
.bzr/
|
9 |
+
.bzrignore
|
10 |
+
.hg/
|
11 |
+
.hgignore
|
12 |
+
.svn/
|
13 |
+
# Common backup files
|
14 |
+
*.swp
|
15 |
+
*.bak
|
16 |
+
*.tmp
|
17 |
+
*.orig
|
18 |
+
*~
|
19 |
+
# Various IDEs
|
20 |
+
.project
|
21 |
+
.idea/
|
22 |
+
*.tmproj
|
23 |
+
.vscode/
|
visual-quality-inspection/transfer-learning/docker/chart/Chart.yaml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
apiVersion: v2
|
2 |
+
name: TLT TF Distributed Training
|
3 |
+
description: A Helm chart for Kubernetes
|
4 |
+
|
5 |
+
# A chart can be either an 'application' or a 'library' chart.
|
6 |
+
#
|
7 |
+
# Application charts are a collection of templates that can be packaged into versioned archives
|
8 |
+
# to be deployed.
|
9 |
+
#
|
10 |
+
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
11 |
+
# a dependency of application charts to inject those utilities and functions into the rendering
|
12 |
+
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
13 |
+
type: application
|
14 |
+
|
15 |
+
# This is the chart version. This version number should be incremented each time you make changes
|
16 |
+
# to the chart and its templates, including the app version.
|
17 |
+
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
18 |
+
version: 0.2.0
|
19 |
+
|
20 |
+
# This is the version number of the application being deployed. This version number should be
|
21 |
+
# incremented each time you make changes to the application. Versions are not expected to
|
22 |
+
# follow Semantic Versioning. They should reflect the version the application is using.
|
23 |
+
# It is recommended to use it with quotes.
|
24 |
+
appVersion: "1.16.0"
|
visual-quality-inspection/transfer-learning/docker/chart/README.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# TLT TF Distributed Training
|
2 |
+
|
3 |
+
  
|
4 |
+
|
5 |
+
A Helm chart for Kubernetes
|
6 |
+
|
7 |
+
## Values
|
8 |
+
|
9 |
+
| Key | Type | Default | Description |
|
10 |
+
|-----|------|---------|-------------|
|
11 |
+
| batchDenom | int | `1` | Batch denominator to be used to divide global batch size |
|
12 |
+
| batchSize | int | `128` | Global batch size to distributed data |
|
13 |
+
| datasetName | string | `"cifar10"` | Dataset name to load from tfds |
|
14 |
+
| epochs | int | `1` | Total epochs to train the model |
|
15 |
+
| imageName | string | `"intel/ai-tools"` | |
|
16 |
+
| imageTag | string | `"0.5.0-dist-devel"` | |
|
17 |
+
| metadata.name | string | `"tlt-distributed"` | |
|
18 |
+
| metadata.namespace | string | `"kubeflow"` | |
|
19 |
+
| modelName | string | `"https://tfhub.dev/google/efficientnet/b1/feature-vector/1"` | TF Hub or HuggingFace model URL |
|
20 |
+
| pvcName | string | `"tlt"` | |
|
21 |
+
| pvcResources.data | string | `"2Gi"` | Amount of Storage for Dataset |
|
22 |
+
| pvcResources.output | string | `"1Gi"` | Amount of Storage for Output Directory |
|
23 |
+
| pvcScn | string | `"nil"` | PVC `StorageClassName` |
|
24 |
+
| resources.cpu | int | `2` | Number of Compute for Launcher |
|
25 |
+
| resources.memory | string | `"4Gi"` | Amount of Memory for Launcher |
|
26 |
+
| scaling | string | `"strong"` | For `weak` scaling, `lr` is scaled by a factor of `sqrt(batch_size/batch_denom)` and uses global batch size for all the processes. For `strong` scaling, lr is scaled by world size and divides global batch size by world size |
|
27 |
+
| slotsPerWorker | int | `1` | Number of Processes Per Worker |
|
28 |
+
| useCase | string | `"image_classification"` | Use case (`image_classification`|`text_classification`) |
|
29 |
+
| workerResources.cpu | int | `4` | Number of Compute per Worker |
|
30 |
+
| workerResources.memory | string | `"8Gi"` | Amount of Memory per Worker |
|
31 |
+
| workers | int | `4` | Number of Workers |
|
visual-quality-inspection/transfer-learning/docker/chart/templates/mpijob.yaml
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
apiVersion: kubeflow.org/v1
|
2 |
+
kind: MPIJob
|
3 |
+
metadata:
|
4 |
+
name: tf-{{ .Values.metadata.name }}
|
5 |
+
namespace: {{ .Values.metadata.namespace }}
|
6 |
+
spec:
|
7 |
+
slotsPerWorker: {{ .Values.slotsPerWorker }}
|
8 |
+
runPolicy:
|
9 |
+
cleanPodPolicy: None
|
10 |
+
mpiReplicaSpecs:
|
11 |
+
Launcher:
|
12 |
+
replicas: 1
|
13 |
+
template:
|
14 |
+
spec:
|
15 |
+
hostIPC: true
|
16 |
+
containers:
|
17 |
+
- image: "{{ .Values.imageName }}:{{ .Values.imageTag }}"
|
18 |
+
name: mpi
|
19 |
+
command:
|
20 |
+
- horovodrun
|
21 |
+
args:
|
22 |
+
- --verbose
|
23 |
+
- -np
|
24 |
+
- {{ .Values.workers }}
|
25 |
+
- --hostfile
|
26 |
+
- /etc/mpi/hostfile
|
27 |
+
- python
|
28 |
+
- /tmp/intel-transfer-learning/tlt/distributed/tensorflow/run_train_tf.py
|
29 |
+
- --batch_denom
|
30 |
+
- "{{ .Values.batchDenom }}"
|
31 |
+
- --batch_size
|
32 |
+
- "{{ .Values.batchSize }}"
|
33 |
+
- --dataset-dir
|
34 |
+
- /tmp/data
|
35 |
+
- --dataset-name
|
36 |
+
- {{ .Values.datasetName }}
|
37 |
+
- --epochs
|
38 |
+
- "{{ .Values.epochs }}"
|
39 |
+
- --model-name
|
40 |
+
- {{ .Values.modelName }}
|
41 |
+
- --output-dir
|
42 |
+
- /tmp/output
|
43 |
+
- --scaling
|
44 |
+
- "{{ .Values.scaling }}"
|
45 |
+
- --shuffle
|
46 |
+
- --use-case
|
47 |
+
- {{ .Values.useCase }}
|
48 |
+
resources:
|
49 |
+
limits:
|
50 |
+
cpu: {{ .Values.resources.cpu }}
|
51 |
+
memory: {{ .Values.resources.memory }}
|
52 |
+
volumeMounts:
|
53 |
+
- name: dataset-dir
|
54 |
+
mountPath: /tmp/data
|
55 |
+
- name: output-dir
|
56 |
+
mountPath: /tmp/output
|
57 |
+
volumes:
|
58 |
+
- name: dshm
|
59 |
+
emptyDir:
|
60 |
+
medium: Memory
|
61 |
+
- name: dataset-dir
|
62 |
+
persistentVolumeClaim:
|
63 |
+
claimName: "{{ .Values.pvcName }}-data"
|
64 |
+
- name: output-dir
|
65 |
+
persistentVolumeClaim:
|
66 |
+
claimName: "{{ .Values.pvcName }}-output"
|
67 |
+
Worker:
|
68 |
+
replicas: {{ .Values.workers }}
|
69 |
+
template:
|
70 |
+
spec:
|
71 |
+
containers:
|
72 |
+
- image: "{{ .Values.imageName }}:{{ .Values.imageTag }}"
|
73 |
+
name: mpi
|
74 |
+
resources:
|
75 |
+
limits:
|
76 |
+
cpu: {{ .Values.workerResources.cpu }}
|
77 |
+
memory: {{ .Values.workerResources.memory }}
|
78 |
+
volumeMounts:
|
79 |
+
- name: dataset-dir
|
80 |
+
mountPath: /tmp/data
|
81 |
+
- name: output-dir
|
82 |
+
mountPath: /tmp/output
|
83 |
+
volumes:
|
84 |
+
- name: dshm
|
85 |
+
emptyDir:
|
86 |
+
medium: Memory
|
87 |
+
- name: dataset-dir
|
88 |
+
persistentVolumeClaim:
|
89 |
+
claimName: "{{ .Values.pvcName }}-data"
|
90 |
+
- name: output-dir
|
91 |
+
persistentVolumeClaim:
|
92 |
+
claimName: "{{ .Values.pvcName }}-output"
|
visual-quality-inspection/transfer-learning/docker/chart/templates/pvc.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
apiVersion: v1
|
2 |
+
kind: PersistentVolumeClaim
|
3 |
+
metadata:
|
4 |
+
name: {{ .Values.pvcName }}-data
|
5 |
+
namespace: {{ .Values.metadata.namespace }}
|
6 |
+
spec:
|
7 |
+
storageClassName: {{ .Values.pvcScn }}
|
8 |
+
accessModes:
|
9 |
+
- "ReadWriteOnce"
|
10 |
+
resources:
|
11 |
+
requests:
|
12 |
+
storage: {{ .Values.pvcResources.data }}
|
13 |
+
---
|
14 |
+
apiVersion: v1
|
15 |
+
kind: PersistentVolumeClaim
|
16 |
+
metadata:
|
17 |
+
name: {{ .Values.pvcName }}-output
|
18 |
+
namespace: {{ .Values.metadata.namespace }}
|
19 |
+
spec:
|
20 |
+
storageClassName: {{ .Values.pvcScn }}
|
21 |
+
accessModes:
|
22 |
+
- "ReadWriteOnce"
|
23 |
+
resources:
|
24 |
+
requests:
|
25 |
+
storage: {{ .Values.pvcResources.output }}
|
visual-quality-inspection/transfer-learning/docker/chart/values.yaml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
metadata:
|
2 |
+
name: tlt-distributed
|
3 |
+
namespace: kubeflow
|
4 |
+
|
5 |
+
imageName: intel/ai-tools
|
6 |
+
imageTag: 0.5.0-dist-devel
|
7 |
+
|
8 |
+
batchDenom: 1
|
9 |
+
batchSize: 128
|
10 |
+
datasetName: cifar10
|
11 |
+
epochs: 1
|
12 |
+
modelName: https://tfhub.dev/google/efficientnet/b1/feature-vector/1
|
13 |
+
scaling: strong
|
14 |
+
slotsPerWorker: 1
|
15 |
+
useCase: image_classification
|
16 |
+
workers: 4
|
17 |
+
|
18 |
+
pvcName: tlt
|
19 |
+
pvcScn: nil
|
20 |
+
pvcResources:
|
21 |
+
data: 2Gi
|
22 |
+
output: 1Gi
|
23 |
+
resources:
|
24 |
+
cpu: 2
|
25 |
+
memory: 4Gi
|
26 |
+
workerResources:
|
27 |
+
cpu: 4
|
28 |
+
memory: 8Gi
|
visual-quality-inspection/transfer-learning/docker/docker-compose.yml
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version: "3"
|
2 |
+
services:
|
3 |
+
tlt-devel:
|
4 |
+
build:
|
5 |
+
args:
|
6 |
+
http_proxy: ${http_proxy}
|
7 |
+
https_proxy: ${https_proxy}
|
8 |
+
no_proxy: ""
|
9 |
+
IMAGE_NAME: ubuntu
|
10 |
+
IMAGE_TAG: 22.04
|
11 |
+
PYTHON: python3.10 # Version must be specified for prod
|
12 |
+
context: ../
|
13 |
+
dockerfile: ./docker/Dockerfile
|
14 |
+
target: tlt-devel
|
15 |
+
image: intel/ai-tools:tlt-devel-latest
|
16 |
+
pull_policy: always
|
17 |
+
tlt-prod:
|
18 |
+
extends:
|
19 |
+
service: tlt-devel
|
20 |
+
build:
|
21 |
+
args:
|
22 |
+
DATASET_DIR: /tmp/data
|
23 |
+
OUTPUT_DIR: /tmp/output
|
24 |
+
target: tlt-prod
|
25 |
+
image: intel/ai-tools:tlt-prod-latest
|
26 |
+
volumes:
|
27 |
+
- /${DATASET_DIR:-$PWD/../data}:/tmp/data
|
28 |
+
- /${OUTPUT_DIR:-$PWD/../output}:/tmp/output
|
29 |
+
tlt-dist-devel:
|
30 |
+
extends:
|
31 |
+
service: tlt-prod
|
32 |
+
build:
|
33 |
+
args:
|
34 |
+
HOROVOD_VERSION: 0.28.0
|
35 |
+
ONECCL_VERSION: 2.0.0
|
36 |
+
ONECCL_URL: https://developer.intel.com/ipex-whl-stable-cpu
|
37 |
+
target: tlt-dist-devel
|
38 |
+
image: intel/ai-tools:tlt-dist-devel-latest
|
39 |
+
tlt-dist-prod:
|
40 |
+
extends:
|
41 |
+
service: tlt-dist-devel
|
42 |
+
build:
|
43 |
+
target: tlt-dist-prod
|
44 |
+
command: |
|
45 |
+
tlt train -f tensorflow
|
46 |
+
--dataset-name cifar10
|
47 |
+
--model-name resnet_v1_50
|
48 |
+
--dataset-dir /tmp/data
|
49 |
+
--output-dir /tmp/output
|
50 |
+
environment:
|
51 |
+
http_proxy: ${http_proxy}
|
52 |
+
https_proxy: ${https_proxy}
|
53 |
+
no_proxy: ${no_proxy}
|
54 |
+
image: intel/ai-tools:tlt-dist-prod-latest
|
visual-quality-inspection/transfer-learning/docs/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
markdown
|
visual-quality-inspection/transfer-learning/docs/DATASETS.rst
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
:orphan:
|
2 |
+
|
3 |
+
.. include:: ../DATASETS.md
|
4 |
+
:parser: myst_parser.sphinx_
|
visual-quality-inspection/transfer-learning/docs/GetStarted.rst
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.. include:: ../GetStarted.md
|
2 |
+
:parser: myst_parser.sphinx_
|
visual-quality-inspection/transfer-learning/docs/Legal.rst
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.. include:: ../Legal.md
|
2 |
+
:parser: myst_parser.sphinx_
|
visual-quality-inspection/transfer-learning/docs/Makefile
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Copyright (c) 2022 Intel Corporation
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
#
|
16 |
+
# SPDX-License-Identifier: Apache-2.0
|
17 |
+
#
|
18 |
+
|
19 |
+
# Minimal makefile for Sphinx documentation
|
20 |
+
#
|
21 |
+
|
22 |
+
# You can set these variables from the command line, and also
|
23 |
+
# from the environment for the first two.
|
24 |
+
SPHINXOPTS ?=
|
25 |
+
SPHINXBUILD ?= sphinx-build
|
26 |
+
SOURCEDIR = .
|
27 |
+
BUILDDIR = _build
|
28 |
+
|
29 |
+
# Put it first so that "make" without argument is like "make help".
|
30 |
+
help:
|
31 |
+
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
32 |
+
|
33 |
+
.PHONY: help Makefile
|
34 |
+
|
35 |
+
# Catch-all target: route all unknown targets to Sphinx using the new
|
36 |
+
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
37 |
+
%: Makefile
|
38 |
+
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
39 |
+
|
40 |
+
LISTEN_IP ?= 127.0.0.1
|
41 |
+
LISTEN_PORT ?= 9999
|
42 |
+
serve:
|
43 |
+
@python -m http.server --directory ./_build/html ${LISTEN_PORT} --bind ${LISTEN_IP}
|
visual-quality-inspection/transfer-learning/docs/Models.rst
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.. include:: ../Models.md
|
2 |
+
:parser: myst_parser.sphinx_
|
visual-quality-inspection/transfer-learning/docs/README.md
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Building Documentation
|
2 |
+
|
3 |
+
## Sphinx Documentation
|
4 |
+
|
5 |
+
Install `tlt` and its dependencies for developers as described the [Get Started](/GetStarted) guide.
|
6 |
+
```bash
|
7 |
+
# Run these commands from root of the project
|
8 |
+
python3 -m virtualenv tlt_dev_venv
|
9 |
+
source tlt_dev_venv/bin/activate
|
10 |
+
python -m pip install --editable .
|
11 |
+
```
|
12 |
+
|
13 |
+
Install Pandoc, Sphinx and a few other tools required to build docs
|
14 |
+
```bash
|
15 |
+
sudo apt-get install pandoc
|
16 |
+
pip install -r docs/requirements-docs.txt
|
17 |
+
```
|
18 |
+
|
19 |
+
Navigate to the `docs` directory and run the doctests to ensure all tests pass:
|
20 |
+
```bash
|
21 |
+
# run this command from within docs directory
|
22 |
+
make doctest
|
23 |
+
```
|
24 |
+
|
25 |
+
This should produce output similiar to:
|
26 |
+
```bash
|
27 |
+
Doctest summary
|
28 |
+
===============
|
29 |
+
6 tests
|
30 |
+
0 failures in tests
|
31 |
+
0 failures in setup code
|
32 |
+
0 failures in cleanup code
|
33 |
+
build succeeded.
|
34 |
+
```
|
35 |
+
|
36 |
+
Finally generate the html docs (from within `docs` directory):
|
37 |
+
```bash
|
38 |
+
make clean html
|
39 |
+
```
|
40 |
+
|
41 |
+
The output HTML files will be located in `transfer-learning/docs/_build/html`.
|
42 |
+
|
43 |
+
To start a local HTTP server and view the docs locally, try:
|
44 |
+
```bash
|
45 |
+
make serve
|
46 |
+
Serving HTTP on 127.0.1.1 port 9999 (http://127.0.1.1:9999/) ...
|
47 |
+
```
|
48 |
+
|
49 |
+
If you need to view the docs from another machine, please try either port forwarding or
|
50 |
+
provide appropriate values for `LISTEN_IP/LISTEN_PORT` arguments.
|
51 |
+
For example:
|
52 |
+
```bash
|
53 |
+
LISTEN_IP=0.0.0.0 make serve
|
54 |
+
Serving HTTP on 0.0.0.0 port 9999 (http://0.0.0.0:9999/) ...
|
55 |
+
```
|
56 |
+
|
57 |
+
runs the docs server on the host while listening to all hosts.
|
58 |
+
Now you can navigate to `HOSTNAME:9999` to view the docs.
|
visual-quality-inspection/transfer-learning/docs/_static/tlt-custom.css
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* allow the page to use more of the window width */
|
2 |
+
.wy-nav-content {
|
3 |
+
max-width: 1100px;
|
4 |
+
}
|
5 |
+
|
6 |
+
/* allow text wrapping in a table to avoid table horizontal scrolling */
|
7 |
+
.wy-table-responsive table td, .wy-table-responsive table th {
|
8 |
+
white-space: normal !important;
|
9 |
+
}
|
10 |
+
|
11 |
+
/* center all images */
|
12 |
+
.rst-content img {
|
13 |
+
margin-left: auto;
|
14 |
+
margin-right: auto;
|
15 |
+
display: block;
|
16 |
+
}
|
17 |
+
|
18 |
+
/* add an underline to title headings and wrap long API headings
|
19 |
+
* Note: we use JavaScript to add a ​ after the dot
|
20 |
+
* in really long H1 titles created automatically from the code */
|
21 |
+
.rst-content h1,h2,h3,h4,h5 {
|
22 |
+
text-decoration: underline;
|
23 |
+
word-wrap: break-word;
|
24 |
+
}
|
25 |
+
|
26 |
+
/* add link color to module xref generated by autodoc */
|
27 |
+
.rst-content a.internal code.xref span.pre {
|
28 |
+
color: #2980b9;
|
29 |
+
}
|
30 |
+
|
31 |
+
/* change red text color to dark gray in code literals */
|
32 |
+
.rst-content code.literal, .rst-content tt.literal {
|
33 |
+
color: #404040;
|
34 |
+
}
|
35 |
+
|
36 |
+
/* change background color of search area/site title to increase contrast */
|
37 |
+
.wy-side-nav-search {
|
38 |
+
background-color: #2f71b2;
|
39 |
+
}
|
40 |
+
/* change href link color to increase contrast */
|
41 |
+
a {
|
42 |
+
color: #2f71b2;
|
43 |
+
}
|
visual-quality-inspection/transfer-learning/docs/_static/tlt-custom.js
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Extra tlt-specific javascript */
|
2 |
+
|
3 |
+
$(document).ready(function(){
|
4 |
+
|
5 |
+
/* open external links in a new tab */
|
6 |
+
$('a[class*=external]').attr({target: '_blank', rel: 'noopener'});
|
7 |
+
|
8 |
+
/* add word break points (zero-width space) after a period in really long titles */
|
9 |
+
$('h1').html(function(index, html){
|
10 |
+
return html.replace(/\./g, '.\u200B');
|
11 |
+
});
|
12 |
+
|
13 |
+
/* copy image alt tags as title so hover text tool tip by browser
|
14 |
+
* (Looks like the myst-parser isn't passing the title tag through to Sphinx,
|
15 |
+
* but is passing the alt tag) */
|
16 |
+
$("img[alt]").each(function(){
|
17 |
+
$(this).attr('title', $(this).attr('alt'));
|
18 |
+
});
|
19 |
+
});
|
visual-quality-inspection/transfer-learning/docs/_templates/footer.html
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% extends '!footer.html' %}
|
2 |
+
{% block extrafooter %}
|
3 |
+
*Other names and brands may be claimed as the property of others.
|
4 |
+
<a href="http://www.intel.com/content/www/us/en/legal/trademarks.html">Trademarks</a>
|
5 |
+
{% endblock %}
|
visual-quality-inspection/transfer-learning/docs/api.rst
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
API Reference
|
2 |
+
=============
|
3 |
+
|
4 |
+
Datasets
|
5 |
+
--------
|
6 |
+
|
7 |
+
.. currentmodule:: tlt.datasets
|
8 |
+
|
9 |
+
The simplest way to create datasets is with the dataset factory methods :meth:`load_dataset`, for using a
|
10 |
+
custom dataset, and :meth:`get_dataset`, for downloading and using a third-party dataset from a catalog such as TensorFlow
|
11 |
+
Datasets or Torchvision.
|
12 |
+
|
13 |
+
Factory Methods
|
14 |
+
***************
|
15 |
+
|
16 |
+
.. automodule:: tlt.datasets.dataset_factory
|
17 |
+
:members: load_dataset, get_dataset
|
18 |
+
|
19 |
+
Class Reference
|
20 |
+
***************
|
21 |
+
|
22 |
+
Image Classification
|
23 |
+
^^^^^^^^^^^^^^^^^^^^
|
24 |
+
|
25 |
+
.. currentmodule:: tlt.datasets.image_classification
|
26 |
+
|
27 |
+
.. autosummary::
|
28 |
+
:toctree: _autosummary
|
29 |
+
:nosignatures:
|
30 |
+
|
31 |
+
tfds_image_classification_dataset.TFDSImageClassificationDataset
|
32 |
+
torchvision_image_classification_dataset.TorchvisionImageClassificationDataset
|
33 |
+
tf_custom_image_classification_dataset.TFCustomImageClassificationDataset
|
34 |
+
pytorch_custom_image_classification_dataset.PyTorchCustomImageClassificationDataset
|
35 |
+
image_classification_dataset.ImageClassificationDataset
|
36 |
+
|
37 |
+
Text Classification
|
38 |
+
^^^^^^^^^^^^^^^^^^^
|
39 |
+
|
40 |
+
.. currentmodule:: tlt.datasets.text_classification
|
41 |
+
|
42 |
+
.. autosummary::
|
43 |
+
:toctree: _autosummary
|
44 |
+
:nosignatures:
|
45 |
+
|
46 |
+
tfds_text_classification_dataset.TFDSTextClassificationDataset
|
47 |
+
hf_text_classification_dataset.HFTextClassificationDataset
|
48 |
+
tf_custom_text_classification_dataset.TFCustomTextClassificationDataset
|
49 |
+
hf_custom_text_classification_dataset.HFCustomTextClassificationDataset
|
50 |
+
text_classification_dataset.TextClassificationDataset
|
51 |
+
|
52 |
+
Base Classes
|
53 |
+
^^^^^^^^^^^^
|
54 |
+
|
55 |
+
.. note:: Users should rarely need to interact directly with these.
|
56 |
+
|
57 |
+
.. currentmodule:: tlt.datasets
|
58 |
+
|
59 |
+
.. autosummary::
|
60 |
+
:toctree: _autosummary
|
61 |
+
:nosignatures:
|
62 |
+
|
63 |
+
pytorch_dataset.PyTorchDataset
|
64 |
+
tf_dataset.TFDataset
|
65 |
+
hf_dataset.HFDataset
|
66 |
+
dataset.BaseDataset
|
67 |
+
|
68 |
+
Models
|
69 |
+
------
|
70 |
+
|
71 |
+
.. currentmodule:: tlt.models
|
72 |
+
|
73 |
+
Discover and work with available models by using model factory methods. The :meth:`get_model` function will download
|
74 |
+
third-party models, while the :meth:`load_model` function will load a custom model, from either a path location or a
|
75 |
+
model object in memory. The model discovery and inspection methods are :meth:`get_supported_models` and
|
76 |
+
:meth:`print_supported_models`.
|
77 |
+
|
78 |
+
Factory Methods
|
79 |
+
***************
|
80 |
+
|
81 |
+
.. automodule:: tlt.models.model_factory
|
82 |
+
:members: get_model, load_model, get_supported_models, print_supported_models
|
83 |
+
|
84 |
+
Class Reference
|
85 |
+
***************
|
86 |
+
|
87 |
+
Image Classification
|
88 |
+
^^^^^^^^^^^^^^^^^^^^
|
89 |
+
|
90 |
+
.. currentmodule:: tlt.models.image_classification
|
91 |
+
|
92 |
+
.. autosummary::
|
93 |
+
:toctree: _autosummary
|
94 |
+
:nosignatures:
|
95 |
+
|
96 |
+
tfhub_image_classification_model.TFHubImageClassificationModel
|
97 |
+
tf_image_classification_model.TFImageClassificationModel
|
98 |
+
keras_image_classification_model.KerasImageClassificationModel
|
99 |
+
torchvision_image_classification_model.TorchvisionImageClassificationModel
|
100 |
+
pytorch_image_classification_model.PyTorchImageClassificationModel
|
101 |
+
pytorch_hub_image_classification_model.PyTorchHubImageClassificationModel
|
102 |
+
image_classification_model.ImageClassificationModel
|
103 |
+
|
104 |
+
Text Classification
|
105 |
+
^^^^^^^^^^^^^^^^^^^
|
106 |
+
|
107 |
+
.. currentmodule:: tlt.models.text_classification
|
108 |
+
|
109 |
+
.. autosummary::
|
110 |
+
:toctree: _autosummary
|
111 |
+
:nosignatures:
|
112 |
+
|
113 |
+
tf_text_classification_model.TFTextClassificationModel
|
114 |
+
pytorch_hf_text_classification_model.PyTorchHFTextClassificationModel
|
115 |
+
tf_hf_text_classification_model.TFHFTextClassificationModel
|
116 |
+
text_classification_model.TextClassificationModel
|
117 |
+
|
118 |
+
Base Classes
|
119 |
+
^^^^^^^^^^^^
|
120 |
+
|
121 |
+
.. note:: Users should rarely need to interact directly with these.
|
122 |
+
|
123 |
+
.. currentmodule:: tlt.models
|
124 |
+
|
125 |
+
.. autosummary::
|
126 |
+
:toctree: _autosummary
|
127 |
+
:nosignatures:
|
128 |
+
|
129 |
+
pytorch_model.PyTorchModel
|
130 |
+
tf_model.TFModel
|
131 |
+
hf_model.HFModel
|
132 |
+
model.BaseModel
|
visual-quality-inspection/transfer-learning/docs/cli.rst
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
CLI Reference
|
2 |
+
=============
|
3 |
+
|
4 |
+
.. click:: tlt.tools.cli.main:cli_group
|
5 |
+
:prog: tlt
|
6 |
+
:nested: full
|
7 |
+
|
visual-quality-inspection/transfer-learning/docs/conf.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
#
|
4 |
+
# Copyright (c) 2022 Intel Corporation
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
#
|
18 |
+
# SPDX-License-Identifier: Apache-2.0
|
19 |
+
#
|
20 |
+
|
21 |
+
# Configuration file for the Sphinx documentation builder.
|
22 |
+
#
|
23 |
+
# This file only contains a selection of the most common options. For a full
|
24 |
+
# list see the documentation:
|
25 |
+
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
26 |
+
|
27 |
+
# -- Path setup --------------------------------------------------------------
|
28 |
+
|
29 |
+
# If extensions (or modules to document with autodoc) are in another directory,
|
30 |
+
# add these directories to sys.path here. If the directory is relative to the
|
31 |
+
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
32 |
+
#
|
33 |
+
import os
|
34 |
+
import sys
|
35 |
+
import shutil
|
36 |
+
import glob
|
37 |
+
sys.path.insert(0, os.path.abspath('../..'))
|
38 |
+
sys.setrecursionlimit(1500)
|
39 |
+
import sphinx_rtd_theme
|
40 |
+
from datetime import datetime
|
41 |
+
|
42 |
+
# -- Project information -----------------------------------------------------
|
43 |
+
|
44 |
+
project = 'Intel® Transfer Learning Tool'
|
45 |
+
author = 'Intel Corporation'
|
46 |
+
copyright = '2022-' + str(datetime.now().year) + u', ' + author
|
47 |
+
|
48 |
+
# The full version, including alpha/beta/rc tags
|
49 |
+
release = '0.2.0'
|
50 |
+
|
51 |
+
|
52 |
+
# -- General configuration ---------------------------------------------------
|
53 |
+
|
54 |
+
# Add any Sphinx extension module names here, as strings. They can be
|
55 |
+
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
56 |
+
# ones.
|
57 |
+
extensions = [
|
58 |
+
'myst_parser',
|
59 |
+
'nbsphinx',
|
60 |
+
'nbsphinx_link',
|
61 |
+
'sphinx.ext.autodoc',
|
62 |
+
'sphinx.ext.autosummary',
|
63 |
+
'sphinx.ext.doctest',
|
64 |
+
'sphinx.ext.intersphinx',
|
65 |
+
'sphinx.ext.napoleon',
|
66 |
+
'sphinx.ext.todo',
|
67 |
+
'sphinx.ext.viewcode',
|
68 |
+
'sphinx_click',
|
69 |
+
]
|
70 |
+
|
71 |
+
# Add any paths that contain templates here, relative to this directory.
|
72 |
+
templates_path = ['_templates']
|
73 |
+
|
74 |
+
# List of patterns, relative to source directory, that match files and
|
75 |
+
# directories to ignore when looking for source files.
|
76 |
+
# This pattern also affects html_static_path and html_extra_path.
|
77 |
+
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.venv3', 'README.md']
|
78 |
+
|
79 |
+
# -- Options for HTML output -------------------------------------------------
|
80 |
+
|
81 |
+
# The theme to use for HTML and HTML Help pages. See the documentation for
|
82 |
+
# a list of builtin themes.
|
83 |
+
#
|
84 |
+
html_theme = 'sphinx_rtd_theme'
|
85 |
+
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
86 |
+
|
87 |
+
html_last_updated_fmt = '%b %d, %Y'
|
88 |
+
html_show_sphinx = False
|
89 |
+
html_favicon = 'images/favicon-intel-32x32.png'
|
90 |
+
|
91 |
+
html_static_path = ['_static']
|
92 |
+
templates_path = ['_templates']
|
93 |
+
|
94 |
+
def setup(app):
|
95 |
+
app.add_css_file("tlt-custom.css")
|
96 |
+
app.add_js_file("tlt-custom.js")
|
97 |
+
|
98 |
+
# Add any paths that contain custom static files (such as style sheets) here,
|
99 |
+
# relative to this directory. They are copied after the builtin static files,
|
100 |
+
# so a file named "default.css" will overwrite the builtin "default.css".
|
101 |
+
autodoc_member_order = 'bysource'
|
102 |
+
nbsphinx_execute = 'never'
|
103 |
+
nbsphinx_prolog = """
|
104 |
+
:orphan:
|
105 |
+
|
106 |
+
"""
|
107 |
+
myst_heading_anchors = 2
|
108 |
+
suppress_warnings = ["myst.xref_missing", "myst.header"]
|
109 |
+
|
110 |
+
# ask the myst parser to process <img> tags so Sphinx can handle the properly
|
111 |
+
myst_enable_extensions = [ "html_image" ]
|
visual-quality-inspection/transfer-learning/docs/distributed.rst
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
:orphan:
|
2 |
+
|
3 |
+
.. include:: ../tlt/distributed/README.md
|
4 |
+
:parser: myst_parser.sphinx_
|
visual-quality-inspection/transfer-learning/docs/docbuild.rst
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
:orphan:
|
2 |
+
|
3 |
+
.. include:: README.md
|
4 |
+
:parser: myst_parser.sphinx_
|