diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/README.md b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ab93218765d04ff6b64a348019b64ae0b76145d5 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/README.md @@ -0,0 +1,19 @@ +# Diffusion Model Details + +This directory contains four stable diffusion projects. Each folder contains detailed instructions on how to use them. The stable-diffusion-v-2-1 project is specifically designed for inference, the stable-diffusion project is suitable for both inference and training and stable-diffusion-finetuning is for only training. + +Since Stable-diffusion-v-2-1 is newer version, it is recommended to consider it as the option for running `inference`. To run `training`, you can use the stable-diffusion project. + +### Overview: + +* stable-diffusion-v-2-1: is the latest Habana-optimized version of stable diffusion (v2.1) and is based on https://github.com/Stability-AI/stablediffusion/tree/d55bcd4d31d0316fcbdf552f2fd2628fdc812500. +* stable-diffusion: is designed for both inference and training, based on the first version of stable diffusion https://github.com/pesser/stable-diffusion/tree/a166aa7fbf578f41f855efeab2e14001d6732563. +* stable-diffusion-finetuning: is designed for training on stable diffusion (v2.1) and is based on https://github.com/cloneofsimo/lora/tree/bdd51b04c49fa90a88919a19850ec3b4cf3c5ecd + +### Supported Configuration +| Project | SynapseAI Version | Mode | +|:---------|-------------------|-------| +| stable-diffusion-v-2-1 | 1.14.0 | Inference | +| stable-diffusion | 1.14.0 | Training | +| stable-diffusion | 1.7.1 | Inference | +| stable-diffusion-finetuning | 1.14.0 | Training | diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/.gitignore b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..54742cbf0b8953a1b6e07508986bf7d7ed3d2975 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/.gitignore @@ -0,0 +1,165 @@ +# Generated by project +outputs/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# General MacOS +.DS_Store +.AppleDouble +.LSOverride + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# IDEs +.idea/ +.vscode/ diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..33d67dc6292a37458fac17ca301732e58472a981 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (C) 2022 Habana Labs, Ltd. an Intel Company +Copyright (c) 2022 Stability AI + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE-MODEL b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE-MODEL new file mode 100644 index 0000000000000000000000000000000000000000..dcd5fa1fd3342668f0e352238304c046da991ec4 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/LICENSE-MODEL @@ -0,0 +1,85 @@ +Copyright (C) 2022 Habana Labs, Ltd. an Intel Company +Copyright (c) 2022 Stability AI and contributors + +CreativeML Open RAIL++-M License +dated November 24, 2022 + +Section I: PREAMBLE + +Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation. + +Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations. + +In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation. + +Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI. + +This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model. + +NOW THEREFORE, You and Licensor agree as follows: + +1. Definitions + +- "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document. +- "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License. +- "Output" means the results of operating a Model as embodied in informational content resulting therefrom. +- "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material. +- "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model. +- "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any. +- "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access. +- "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model. +- "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator. +- "Third Parties" means individuals or legal entities that are not under common control with Licensor or You. +- "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." +- "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model. + +Section II: INTELLECTUAL PROPERTY RIGHTS + +Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model. +3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed. + +Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION + +4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions: +Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material. +You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License; +You must cause any modified files to carry prominent notices stating that You changed the files; +You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model. +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License. +5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5). +6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License. + +Section IV: OTHER PROVISIONS + +7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License. +8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors. +9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License. +10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. +11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. +12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. + +END OF TERMS AND CONDITIONS + + + + +Attachment A + +Use Restrictions + +You agree not to use the Model or Derivatives of the Model: + +- In any way that violates any applicable national, federal, state, local or international law or regulation; +- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way; +- To generate or disseminate verifiably false information and/or content with the purpose of harming others; +- To generate or disseminate personal identifiable information that can be used to harm an individual; +- To defame, disparage or otherwise harass others; +- For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation; +- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics; +- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm; +- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories; +- To provide medical advice and medical results interpretation; +- To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use). + diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/README.md b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c524f4b4d0343c04bde717eb417533c2e68041bb --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/README.md @@ -0,0 +1,105 @@ +# Stable Diffusion 2.1 for PyTorch + +This directory provides scripts to perform text-to-image inference on a stable diffusion 2.1 model and is tested and maintained by Habana. + +For more information on training and inference of deep learning models using Gaudi, refer to [developer.habana.ai](https://developer.habana.ai/resources/). + +## Table of Contents + +* [Model-References](../../../README.md) +* [Model Overview](#model-overview) +* [Setup](#setup) +* [Model Checkpoint](#model-checkpoint) +* [Inference and Examples](#inference-and-examples) +* [Supported Configuration](#supported-configuration) +* [Changelog](#changelog) +* [Known Issues](#known-issues) + +## Model Overview +This implementation is based on the following paper - [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752). + +### How to use +Users acknowledge and understand that the models referenced by Habana are mere examples for models that can be run on Gaudi. +Users bear sole liability and responsibility to follow and comply with any third party licenses pertaining to such models, +and Habana Labs disclaims and will bear no any warranty or liability with respect to users' use or compliance with such third party licenses. + +## Setup +Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) +to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/PyTorch/Model_Optimization_PyTorch/Optimization_in_Training_Platform.html). +The guides will walk you through the process of setting up your system to run the model on Gaudi. + +### Clone Habana Model-References +In the docker container, clone this repository and switch to the branch that matches your SynapseAI version. +You can run the [`hl-smi`](https://docs.habana.ai/en/latest/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) utility to determine the SynapseAI version. +```bash +git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References +``` + +### Install Model Requirements +1. In the docker container, go to the model directory: +```bash +cd Model-References/PyTorch/generative_models/stable-diffusion-v-2-1 +``` + +2. Install the required packages using pip. +```bash +pip install -r requirements.txt --user +``` + +## Model Checkpoint +### Text-to-Image +Download the pre-trained weights for 768x768 images (4.9GB) +```bash +wget https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt +``` +and/or 512x512 images (4.9GB). +```bash +wget https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.ckpt +``` + +## Inference and Examples +The following command generates a total of 3 images of size 768x768 and saves each sample individually as well as a grid of size `n_iter` x `n_samples` at the specified output location (default: `outputs/txt2img-samples`). + +```bash +$PYTHON scripts/txt2img.py --prompt "a professional photograph of an astronaut riding a horse" --ckpt v2-1_768-ema-pruned.ckpt --config configs/stable-diffusion/v2-inference-v.yaml --H 768 --W 768 --n_samples 1 --n_iter 3 --use_hpu_graph +``` +To generate 3 images of a 512x512 size using a k-diffusion dpmpp_2m sampler with 35 steps, use the command: +```bash +$PYTHON scripts/txt2img.py --prompt "a professional photograph of an astronaut riding a horse" --ckpt v2-1_512-ema-pruned.ckpt --config configs/stable-diffusion/v2-inference.yaml --H 512 --W 512 --n_samples 1 --n_iter 3 --steps 35 --k_sampler dpmpp_2m --use_hpu_graph +``` + +For a more detailed description of parameters, please use the following command to see a help message: +```bash +$PYTHON scripts/txt2img.py -h +``` + +## Performance +The first two batches of images generate a performance penalty. +All subsequent batches will be generated much faster. + +## Supported Configuration +| Validated on | SynapseAI Version | PyTorch Version | Mode | +|---------|-------------------|-----------------|----------------| +| Gaudi | 1.14.0 | 2.1.1 | Inference | +| Gaudi2 | 1.14.0 | 2.1.1 | Inference | + +## Changelog +### 1.8.0 +Initial release. + +### 1.10.0 +Decreased host overhead to minimum by rewriting samplers and the main sampling loop. + +### Script Modifications +Major changes done to the original model from [Stability-AI/stablediffusion](https://github.com/Stability-AI/stablediffusion/tree/d55bcd4d31d0316fcbdf552f2fd2628fdc812500) repository: +* Changed README. +* Added HPU support. +* Modified configs/stable-diffusion/v2-inference-v.yaml and configs/stable-diffusion/v2-inference.yaml +* Changed code around einsum operation in ldm/modules/attention.py +* randn moved to cpu in scripts/txt2img.py and ldm/models/diffusion/ddim.py +* sampling is rewritten in an accelerator-friendly way + +## Known Issues +* Initial random noise generation has been moved to CPU. +Contrary to when noise is generated on Gaudi, CPU-generated random noise produces consistent output regardless of whether HPU Graph API is used or not. +* The model supports batch sizes up to 16 on Gaudi and up to 8 on Gaudi2 for output images 512x512px, and batch size 1 for images 768x768px on Gaudi and Gaudi2. diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/environment.yaml b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..15d014186dd3bfee12a6951f7aee3e4951108674 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/environment.yaml @@ -0,0 +1,19 @@ +name: ldm +channels: + - pytorch + - defaults +dependencies: + - python=3.8.5 + - pip=20.3 + - cudatoolkit=11.3 + - pytorch=1.12.1 + - torchvision=0.13.1 + - numpy=1.23.1 + - pip: + - pytorch-lightning==1.7.7 + - omegaconf==2.1.1 + - test-tube>=0.7.5 + - einops==0.3.0 + - transformers==4.19.2 + - open_clip_torch==2.7.0 + - -e . diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/data/__init__.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/autoencoder.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..d122549995ce2cd64092c81a58419ed4a15a02fd --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/autoencoder.py @@ -0,0 +1,219 @@ +import torch +import pytorch_lightning as pl +import torch.nn.functional as F +from contextlib import contextmanager + +from ldm.modules.diffusionmodules.model import Encoder, Decoder +from ldm.modules.distributions.distributions import DiagonalGaussianDistribution + +from ldm.util import instantiate_from_config +from ldm.modules.ema import LitEma + + +class AutoencoderKL(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + ema_decay=None, + learn_logvar=False + ): + super().__init__() + self.learn_logvar = learn_logvar + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + assert ddconfig["double_z"] + self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + + self.use_ema = ema_decay is not None + if self.use_ema: + self.ema_decay = ema_decay + assert 0. < ema_decay < 1. + self.model_ema = LitEma(self, decay=ema_decay) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + self.load_state_dict(sd, strict=False) + print(f"Restored from {path}") + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input, sample_posterior=True): + posterior = self.encode(input) + if sample_posterior: + z = posterior.sample() + else: + z = posterior.mode() + dec = self.decode(z) + return dec, posterior + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + + if optimizer_idx == 0: + # train encoder+decoder+logvar + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return aeloss + + if optimizer_idx == 1: + # train the discriminator + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + + self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema") + return log_dict + + def _validation_step(self, batch, batch_idx, postfix=""): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, + last_layer=self.get_last_layer(), split="val"+postfix) + + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, + last_layer=self.get_last_layer(), split="val"+postfix) + + self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"]) + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr = self.learning_rate + ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list( + self.quant_conv.parameters()) + list(self.post_quant_conv.parameters()) + if self.learn_logvar: + print(f"{self.__class__.__name__}: Learning logvar") + ae_params_list.append(self.loss.logvar) + opt_ae = torch.optim.Adam(ae_params_list, + lr=lr, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr, betas=(0.5, 0.9)) + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + @torch.no_grad() + def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if not only_inputs: + xrec, posterior = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["samples"] = self.decode(torch.randn_like(posterior.sample())) + log["reconstructions"] = xrec + if log_ema or self.use_ema: + with self.ema_scope(): + xrec_ema, posterior_ema = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec_ema.shape[1] > 3 + xrec_ema = self.to_rgb(xrec_ema) + log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample())) + log["reconstructions_ema"] = xrec_ema + log["inputs"] = x + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class IdentityFirstStage(torch.nn.Module): + def __init__(self, *args, vq_interface=False, **kwargs): + self.vq_interface = vq_interface + super().__init__() + + def encode(self, x, *args, **kwargs): + return x + + def decode(self, x, *args, **kwargs): + return x + + def quantize(self, x, *args, **kwargs): + if self.vq_interface: + return x, None, [None, None, None] + return x + + def forward(self, x, *args, **kwargs): + return x + diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/__init__.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddim.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddim.py new file mode 100644 index 0000000000000000000000000000000000000000..bdda1fa932327d6a68d806ddc0285a1974c5aea2 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddim.py @@ -0,0 +1,158 @@ +############################################################################### +# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company +############################################################################### +import torch +import numpy as np + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like +from ldm.models.diffusion.sampler import Sampler + + +class DDIMSampler(Sampler): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.model_wrap = model.apply_model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + self.rand_scale = 1.0 + + def register_buffer(self, name, attr): + if self.model.device == "cuda": + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + + def to_torch(x): return x.clone().detach().to( + torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch( + self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', + to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch( + np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', + to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', + to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch( + np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta, verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', + np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', + sigmas_for_original_sampling_steps) + + @torch.no_grad() + def compile(self, + S, + shape, + batch_size=1, + eta=0., + temperature=1., + verbose=False, + unconditional_guidance_scale=1., + use_original_steps=False, + **kwargs + ): + + self.steps = S + self.batch_size = batch_size + self.shape = shape + self.eta = eta + self.temperature = temperature + self.cond_scale = unconditional_guidance_scale + self.x_shape = (self.batch_size, + self.shape[0], self.shape[1], self.shape[2]) + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for DDIM sampling is {size}, eta {eta}') + + self.ts_list = torch.Tensor( + np.expand_dims(self.ddim_timesteps, axis=0)) + self.ts_list = self.ts_list.fliplr().to(torch.int32).to(self.model.device) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + alphas_prev = torch.Tensor(alphas_prev) + + self.params_init = [ + ('alpha', alphas), + ('alpha_prev', alphas_prev), + ('rsqrt(alpha)', alphas.rsqrt()), + ('sqrt(alpha_prev)', alphas_prev.sqrt()), + ('sqrt(1-alpha)', sqrt_one_minus_alphas), + ('sigma', torch.Tensor(sigmas)), + ('dir', torch.sqrt(1. - alphas_prev - sigmas**2)) + ] + + self.params = torch.stack(list(map(lambda x: x[1], self.params_init))) + self.params = self.params.fliplr().to( + self.model.betas.dtype).to(self.model.device) + + def one_step(self, x, c_in, ts_t, param_t): + ts = ts_t[0].broadcast_to((self.batch_size)).contiguous() + + param = {} + for idx, val in enumerate(self.params_init): + param[val[0]] = param_t[idx].broadcast_to( + (self.batch_size, 1, 1, 1)).contiguous() + + model_output = self.run_model(x, c_in, ts) + + if self.model.parameterization == "v": + e_t = self.model.predict_eps_from_z_and_v(x, ts, model_output) + else: + e_t = model_output + + # current prediction for x_0 + if self.model.parameterization != "v": + pred_x0 = (x - param['sqrt(1-alpha)'] * + e_t) * param['rsqrt(alpha)'] + else: + pred_x0 = self.model.predict_start_from_z_and_v( + x, ts, model_output) + + # direction pointing to x_t + dir_xt = param['dir'] * e_t + noise = param['sigma'] * \ + noise_like(x.shape, self.model.device, False) * self.temperature + x = param['sqrt(alpha_prev)'] * pred_x0 + dir_xt + noise + return x + + def sampler_step(self, arg): + x, c_in, ts, params = arg + x = self.one_step(x, c_in, ts[:, 0], params[:, 0]) + ts = torch.roll(ts, shifts=-1, dims=1) + params = torch.roll(params, shifts=-1, dims=1) + return [x, c_in, ts, params] + + def init_loop(self, x, c_in): + return [x, c_in, self.ts_list.clone(), self.params.clone()] diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddpm.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddpm.py new file mode 100644 index 0000000000000000000000000000000000000000..fbbfeca3fd8380c645d642f2193e3cc79839ab2f --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/ddpm.py @@ -0,0 +1,1795 @@ +""" +wild mixture of +https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py +https://github.com/CompVis/taming-transformers +-- merci +""" + +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager, nullcontext +from functools import partial +import itertools +from tqdm import tqdm +from torchvision.utils import make_grid +from pytorch_lightning.utilities.rank_zero import rank_zero_only +from omegaconf import ListConfig + +from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config +from ldm.modules.ema import LitEma +from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution +from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL +from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like +from ldm.models.diffusion.ddim import DDIMSampler + + +__conditioning_keys__ = {'concat': 'c_concat', + 'crossattn': 'c_crossattn', + 'adm': 'y'} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPM(pl.LightningModule): + # classic DDPM with Gaussian diffusion, in image space + def __init__(self, + unet_config, + timesteps=1000, + beta_schedule="linear", + loss_type="l2", + ckpt_path=None, + ignore_keys=[], + load_only_unet=False, + monitor="val/loss", + use_ema=True, + first_stage_key="image", + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0., + v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1., + conditioning_key=None, + parameterization="eps", # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0., + make_it_fit=False, + ucg_training=None, + reset_ema=False, + reset_num_ema_updates=False, + ): + super().__init__() + assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' + self.parameterization = parameterization + print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapper(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + if monitor is not None: + self.monitor = monitor + self.make_it_fit = make_it_fit + if reset_ema: assert exists(ckpt_path) + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) + if reset_ema: + assert self.use_ema + print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") + self.model_ema = LitEma(self.model) + if reset_num_ema_updates: + print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") + assert self.use_ema + self.model_ema.reset_num_updates() + + self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, + linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + + self.ucg_training = ucg_training or dict() + if self.ucg_training: + self.ucg_prng = np.random.RandomState() + + def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( + 1. - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + if self.parameterization == "eps": + lvlb_weights = self.betas ** 2 / ( + 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) + elif self.parameterization == "x0": + lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) + elif self.parameterization == "v": + lvlb_weights = torch.ones_like(self.betas ** 2 / ( + 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) + else: + raise NotImplementedError("mu not supported") + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + @torch.no_grad() + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + if self.make_it_fit: + n_params = len([name for name, _ in + itertools.chain(self.named_parameters(), + self.named_buffers())]) + for name, param in tqdm( + itertools.chain(self.named_parameters(), + self.named_buffers()), + desc="Fitting old weights to new weights", + total=n_params + ): + if not name in sd: + continue + old_shape = sd[name].shape + new_shape = param.shape + assert len(old_shape) == len(new_shape) + if len(new_shape) > 2: + # we only modify first two axes + assert new_shape[2:] == old_shape[2:] + # assumes first axis corresponds to output dim + if not new_shape == old_shape: + new_param = param.clone() + old_param = sd[name] + if len(new_shape) == 1: + for i in range(new_param.shape[0]): + new_param[i] = old_param[i % old_shape[0]] + elif len(new_shape) >= 2: + for i in range(new_param.shape[0]): + for j in range(new_param.shape[1]): + new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] + + n_used_old = torch.ones(old_shape[1]) + for j in range(new_param.shape[1]): + n_used_old[j % old_shape[1]] += 1 + n_used_new = torch.zeros(new_shape[1]) + for j in range(new_param.shape[1]): + n_used_new[j] = n_used_old[j % old_shape[1]] + + n_used_new = n_used_new[None, :] + while len(n_used_new.shape) < len(new_shape): + n_used_new = n_used_new.unsqueeze(-1) + new_param /= n_used_new + + sd[name] = new_param + + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys:\n {missing}") + if len(unexpected) > 0: + print(f"\nUnexpected Keys:\n {unexpected}") + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def predict_start_from_z_and_v(self, x_t, t, v): + # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v + ) + + def predict_eps_from_z_and_v(self, x_t, t, v): + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop((batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def get_v(self, x, noise, t): + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x + ) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == 'l1': + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == 'l2': + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss(target, pred, reduction='none') + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == "eps": + target = noise + elif self.parameterization == "x0": + target = x_start + elif self.parameterization == "v": + target = self.get_v(x_start, noise, t) + else: + raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = 'train' if self.training else 'val' + + loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f'{log_prefix}/loss': loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + for k in self.ucg_training: + p = self.ucg_training[k]["p"] + val = self.ucg_training[k]["val"] + if val is None: + val = "" + for i in range(len(batch[k])): + if self.ucg_prng.choice(2, p=[1 - p, p]): + batch[k][i] = val + + loss, loss_dict = self.shared_step(batch) + + self.log_dict(loss_dict, prog_bar=True, + logger=True, on_step=True, on_epoch=True) + + self.log("global_step", self.global_step, + prog_bar=True, logger=True, on_step=True, on_epoch=False) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} + self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): + log = dict() + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log["inputs"] = x + + # get diffusion row + diffusion_row = list() + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) + + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusion(DDPM): + """main class""" + + def __init__(self, + first_stage_config, + cond_stage_config, + num_timesteps_cond=None, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + force_null_conditioning=False, + *args, **kwargs): + self.force_null_conditioning = force_null_conditioning + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs['timesteps'] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = 'concat' if concat_mode else 'crossattn' + if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: + conditioning_key = None + ckpt_path = kwargs.pop("ckpt_path", None) + reset_ema = kwargs.pop("reset_ema", False) + reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) + ignore_keys = kwargs.pop("ignore_keys", []) + super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + try: + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 + except: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer('scale_factor', torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + self.cond_stage_forward = cond_stage_forward + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + if reset_ema: + assert self.use_ema + print( + f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") + self.model_ema = LitEma(self.model) + if reset_num_ema_updates: + print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") + assert self.use_ema + self.model_ema.reset_num_updates() + + def make_cond_schedule(self, ): + self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) + ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() + self.cond_ids[:self.num_timesteps_cond] = ids + + @rank_zero_only + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + # only for very first batch + if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: + assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' + # set rescale weight to 1./std of encodings + print("### USING STD-RESCALING ###") + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer('scale_factor', 1. / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") + + def register_schedule(self, + given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") + self.cond_stage_model = self.first_stage_model + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != '__is_first_stage__' + assert config != '__is_unconditional__' + model = instantiate_from_config(config) + self.cond_stage_model = model + + def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append(self.decode_first_stage(zd.to(self.device), + force_not_quantize=force_no_decoder_quantization)) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") + return self.scale_factor * z + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + + if self.split_input_params["tie_braker"]: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip(L_weighting, + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"]) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) + + elif uf > 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, padding=0, + stride=(stride[0] * uf, stride[1] * uf)) + fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) + + elif df > 1 and uf == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, padding=0, + stride=(stride[0] // df, stride[1] // df)) + fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + @torch.no_grad() + def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, + cond_key=None, return_original_cond=False, bs=None, return_x=False): + x = super().get_input(batch, k) + if bs is not None: + x = x[:bs] + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + + if self.model.conditioning_key is not None and not self.force_null_conditioning: + if cond_key is None: + cond_key = self.cond_stage_key + if cond_key != self.first_stage_key: + if cond_key in ['caption', 'coordinates_bbox', "txt"]: + xc = batch[cond_key] + elif cond_key in ['class_label', 'cls']: + xc = batch + else: + xc = super().get_input(batch, cond_key).to(self.device) + else: + xc = x + if not self.cond_stage_trainable or force_c_encode: + if isinstance(xc, dict) or isinstance(xc, list): + c = self.get_learned_conditioning(xc) + else: + c = self.get_learned_conditioning(xc.to(self.device)) + else: + c = xc + if bs is not None: + c = c[:bs] + + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + ckey = __conditioning_keys__[self.model.conditioning_key] + c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} + + else: + c = None + xc = None + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + c = {'pos_x': pos_x, 'pos_y': pos_y} + out = [z, c] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_x: + out.extend([x]) + if return_original_cond: + out.append(xc) + return out + + @torch.no_grad() + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + return self.first_stage_model.decode(z) + + @torch.no_grad() + def encode_first_stage(self, x): + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + def apply_model(self, x_noisy, t, cond, return_ids=False): + if isinstance(cond, dict): + # hybrid case, cond is expected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' + cond = {key: cond} + + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) + return mean_flat(kl_prior) / np.log(2.0) + + def p_losses(self, x_start, cond, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_output = self.apply_model(x_noisy, t, cond) + + loss_dict = {} + prefix = 'train' if self.training else 'val' + + if self.parameterization == "x0": + target = x_start + elif self.parameterization == "eps": + target = noise + elif self.parameterization == "v": + target = self.get_v(x_start, noise, t) + else: + raise NotImplementedError() + + loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) + loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) + + logvar_t = self.logvar[t].to(self.device) + loss = loss_simple / torch.exp(logvar_t) + logvar_t + # loss = loss_simple / torch.exp(self.logvar) + self.logvar + if self.learn_logvar: + loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) + loss_dict.update({'logvar': self.logvar.data.mean()}) + + loss = self.l_simple_weight * loss.mean() + + loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) + loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() + loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) + loss += (self.original_elbo_weight * loss_vlb) + loss_dict.update({f'{prefix}/loss': loss}) + + return loss, loss_dict + + def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, + return_x0=False, score_corrector=None, corrector_kwargs=None): + t_in = t + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) + + if score_corrector is not None: + assert self.parameterization == "eps" + model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1., 1.) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + if return_codebook_ids: + return model_mean, posterior_variance, posterior_log_variance, logits + elif return_x0: + return model_mean, posterior_variance, posterior_log_variance, x_recon + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, + return_codebook_ids=False, quantize_denoised=False, return_x0=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if return_codebook_ids: + raise DeprecationWarning("Support dropped.") + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + + if return_codebook_ids: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) + if return_x0: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 + else: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, + img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., + score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, + log_every_t=None): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', + total=timesteps) if verbose else reversed( + range(0, timesteps)) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img, x0_partial = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, return_x0=True, + temperature=temperature[i], noise_dropout=noise_dropout, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: callback(i) + if img_callback: img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop(self, cond, shape, return_intermediates=False, + x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, start_T=None, + log_every_t=None): + + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( + range(0, timesteps)) + + if mask is not None: + assert x0 is not None + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: callback(i) + if img_callback: img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, + verbose=True, timesteps=None, quantize_denoised=False, + mask=None, x0=None, shape=None, **kwargs): + if shape is None: + shape = (batch_size, self.channels, self.image_size, self.image_size) + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + return self.p_sample_loop(cond, + shape, + return_intermediates=return_intermediates, x_T=x_T, + verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, + mask=mask, x0=x0) + + @torch.no_grad() + def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, + shape, cond, verbose=False, **kwargs) + + else: + samples, intermediates = self.sample(cond=cond, batch_size=batch_size, + return_intermediates=True, **kwargs) + + return samples, intermediates + + @torch.no_grad() + def get_unconditional_conditioning(self, batch_size, null_label=None): + if null_label is not None: + xc = null_label + if isinstance(xc, ListConfig): + xc = list(xc) + if isinstance(xc, dict) or isinstance(xc, list): + c = self.get_learned_conditioning(xc) + else: + if hasattr(xc, "to"): + xc = xc.to(self.device) + c = self.get_learned_conditioning(xc) + else: + if self.cond_stage_key in ["class_label", "cls"]: + xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) + return self.get_learned_conditioning(xc) + else: + raise NotImplementedError("todo") + if isinstance(c, list): # in case the encoder gives us a list + for i in range(len(c)): + c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) + else: + c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) + return c + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) + log["conditioning"] = xc + elif self.cond_stage_key in ['class_label', "cls"]: + try: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) + log['conditioning'] = xc + except KeyError: + # probably no "human_label" in batch + pass + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( + self.first_stage_model, IdentityFirstStage): + # also display when quantizing x0 while sampling + with ema_scope("Plotting Quantized Denoised"): + samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + quantize_denoised=True) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_x0_quantized"] = x_samples + + if unconditional_guidance_scale > 1.0: + uc = self.get_unconditional_conditioning(N, unconditional_guidance_label) + if self.model.conditioning_key == "crossattn-adm": + uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]} + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + + if inpaint: + # make a simple center square + b, h, w = z.shape[0], z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. + mask = mask[:, None, ...] + with ema_scope("Plotting Inpaint"): + samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_inpainting"] = x_samples + log["mask"] = mask + + # outpaint + mask = 1. - mask + with ema_scope("Plotting Outpaint"): + samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_outpainting"] = x_samples + + if plot_progressive_rows: + with ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising(c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N) + prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") + log["progressive_row"] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.cond_stage_trainable: + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print('Diffusion model optimizing logvar') + params.append(self.logvar) + opt = torch.optim.AdamW(params, lr=lr) + if self.use_scheduler: + assert 'target' in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, "colorize"): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. + return x + + +class DiffusionWrapper(pl.LightningModule): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False) + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm'] + + def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == 'concat': + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == 'crossattn': + if not self.sequential_cross_attn: + cc = torch.cat(c_crossattn, 1) + else: + cc = c_crossattn + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == 'hybrid': + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == 'hybrid-adm': + assert c_adm is not None + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc, y=c_adm) + elif self.conditioning_key == 'crossattn-adm': + assert c_adm is not None + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc, y=c_adm) + elif self.conditioning_key == 'adm': + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class LatentUpscaleDiffusion(LatentDiffusion): + def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs): + super().__init__(*args, **kwargs) + # assumes that neither the cond_stage nor the low_scale_model contain trainable params + assert not self.cond_stage_trainable + self.instantiate_low_stage(low_scale_config) + self.low_scale_key = low_scale_key + self.noise_level_key = noise_level_key + + def instantiate_low_stage(self, config): + model = instantiate_from_config(config) + self.low_scale_model = model.eval() + self.low_scale_model.train = disabled_train + for param in self.low_scale_model.parameters(): + param.requires_grad = False + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): + if not log_mode: + z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) + else: + z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, + force_c_encode=True, return_original_cond=True, bs=bs) + x_low = batch[self.low_scale_key][:bs] + x_low = rearrange(x_low, 'b h w c -> b c h w') + x_low = x_low.to(memory_format=torch.contiguous_format).float() + zx, noise_level = self.low_scale_model(x_low) + if self.noise_level_key is not None: + # get noise level from batch instead, e.g. when extracting a custom noise level for bsr + raise NotImplementedError('TODO') + + all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level} + if log_mode: + # TODO: maybe disable if too expensive + x_low_rec = self.low_scale_model.decode(zx) + return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level + return z, all_conds + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, + unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, + **kwargs): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N, + log_mode=True) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + log["x_lr"] = x_low + log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) + log["conditioning"] = xc + elif self.cond_stage_key in ['class_label', 'cls']: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label) + # TODO explore better "unconditional" choices for the other keys + # maybe guide away from empty text label and highest noise level and maximally degraded zx? + uc = dict() + for k in c: + if k == "c_crossattn": + assert isinstance(c[k], list) and len(c[k]) == 1 + uc[k] = [uc_tmp] + elif k == "c_adm": # todo: only run with text-based guidance? + assert isinstance(c[k], torch.Tensor) + #uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level + uc[k] = c[k] + elif isinstance(c[k], list): + uc[k] = [c[k][i] for i in range(len(c[k]))] + else: + uc[k] = c[k] + + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + + if plot_progressive_rows: + with ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising(c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N) + prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") + log["progressive_row"] = prog_row + + return log + + +class LatentFinetuneDiffusion(LatentDiffusion): + """ + Basis for different finetunas, such as inpainting or depth2image + To disable finetuning mode, set finetune_keys to None + """ + + def __init__(self, + concat_keys: tuple, + finetune_keys=("model.diffusion_model.input_blocks.0.0.weight", + "model_ema.diffusion_modelinput_blocks00weight" + ), + keep_finetune_dims=4, + # if model was trained without concat mode before and we would like to keep these channels + c_concat_log_start=None, # to log reconstruction of c_concat codes + c_concat_log_end=None, + *args, **kwargs + ): + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", list()) + super().__init__(*args, **kwargs) + self.finetune_keys = finetune_keys + self.concat_keys = concat_keys + self.keep_dims = keep_finetune_dims + self.c_concat_log_start = c_concat_log_start + self.c_concat_log_end = c_concat_log_end + if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint' + if exists(ckpt_path): + self.init_from_ckpt(ckpt_path, ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + + # make it explicit, finetune by including extra input channels + if exists(self.finetune_keys) and k in self.finetune_keys: + new_entry = None + for name, param in self.named_parameters(): + if name in self.finetune_keys: + print( + f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only") + new_entry = torch.zeros_like(param) # zero init + assert exists(new_entry), 'did not find matching parameter to modify' + new_entry[:, :self.keep_dims, ...] = sd[k] + sd[k] = new_entry + + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, + use_ema_scope=True, + **kwargs): + ema_scope = self.ema_scope if use_ema_scope else nullcontext + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True) + c_cat, c = c["c_concat"][0], c["c_crossattn"][0] + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption", "txt"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) + log["conditioning"] = xc + elif self.cond_stage_key in ['class_label', 'cls']: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if not (self.c_concat_log_start is None and self.c_concat_log_end is None): + log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end]) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with ema_scope("Sampling"): + samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, + batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if unconditional_guidance_scale > 1.0: + uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label) + uc_cat = c_cat + uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} + with ema_scope("Sampling with classifier-free guidance"): + samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, + batch_size=N, ddim=use_ddim, + ddim_steps=ddim_steps, eta=ddim_eta, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=uc_full, + ) + x_samples_cfg = self.decode_first_stage(samples_cfg) + log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg + + return log + + +class LatentInpaintDiffusion(LatentFinetuneDiffusion): + """ + can either run as pure inpainting model (only concat mode) or with mixed conditionings, + e.g. mask as concat and text via cross-attn. + To disable finetuning mode, set finetune_keys to None + """ + + def __init__(self, + concat_keys=("mask", "masked_image"), + masked_image_key="masked_image", + *args, **kwargs + ): + super().__init__(concat_keys, *args, **kwargs) + self.masked_image_key = masked_image_key + assert self.masked_image_key in concat_keys + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): + # note: restricted to non-trainable encoders currently + assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting' + z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, + force_c_encode=True, return_original_cond=True, bs=bs) + + assert exists(self.concat_keys) + c_cat = list() + for ck in self.concat_keys: + cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + bchw = z.shape + if ck != self.masked_image_key: + cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) + else: + cc = self.get_first_stage_encoding(self.encode_first_stage(cc)) + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, *args, **kwargs): + log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs) + log["masked_image"] = rearrange(args[0]["masked_image"], + 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() + return log + + +class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion): + """ + condition on monocular depth estimation + """ + + def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs): + super().__init__(concat_keys=concat_keys, *args, **kwargs) + self.depth_model = instantiate_from_config(depth_stage_config) + self.depth_stage_key = concat_keys[0] + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): + # note: restricted to non-trainable encoders currently + assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img' + z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, + force_c_encode=True, return_original_cond=True, bs=bs) + + assert exists(self.concat_keys) + assert len(self.concat_keys) == 1 + c_cat = list() + for ck in self.concat_keys: + cc = batch[ck] + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + cc = self.depth_model(cc) + cc = torch.nn.functional.interpolate( + cc, + size=z.shape[2:], + mode="bicubic", + align_corners=False, + ) + + depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3], + keepdim=True) + cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1. + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, *args, **kwargs): + log = super().log_images(*args, **kwargs) + depth = self.depth_model(args[0][self.depth_stage_key]) + depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \ + torch.amax(depth, dim=[1, 2, 3], keepdim=True) + log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1. + return log + + +class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion): + """ + condition on low-res image (and optionally on some spatial noise augmentation) + """ + def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None, + low_scale_config=None, low_scale_key=None, *args, **kwargs): + super().__init__(concat_keys=concat_keys, *args, **kwargs) + self.reshuffle_patch_size = reshuffle_patch_size + self.low_scale_model = None + if low_scale_config is not None: + print("Initializing a low-scale model") + assert exists(low_scale_key) + self.instantiate_low_stage(low_scale_config) + self.low_scale_key = low_scale_key + + def instantiate_low_stage(self, config): + model = instantiate_from_config(config) + self.low_scale_model = model.eval() + self.low_scale_model.train = disabled_train + for param in self.low_scale_model.parameters(): + param.requires_grad = False + + @torch.no_grad() + def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): + # note: restricted to non-trainable encoders currently + assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft' + z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, + force_c_encode=True, return_original_cond=True, bs=bs) + + assert exists(self.concat_keys) + assert len(self.concat_keys) == 1 + # optionally make spatial noise_level here + c_cat = list() + noise_level = None + for ck in self.concat_keys: + cc = batch[ck] + cc = rearrange(cc, 'b h w c -> b c h w') + if exists(self.reshuffle_patch_size): + assert isinstance(self.reshuffle_patch_size, int) + cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w', + p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size) + if bs is not None: + cc = cc[:bs] + cc = cc.to(self.device) + if exists(self.low_scale_model) and ck == self.low_scale_key: + cc, noise_level = self.low_scale_model(cc) + c_cat.append(cc) + c_cat = torch.cat(c_cat, dim=1) + if exists(noise_level): + all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level} + else: + all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} + if return_first_stage_outputs: + return z, all_conds, x, xrec, xc + return z, all_conds + + @torch.no_grad() + def log_images(self, *args, **kwargs): + log = super().log_images(*args, **kwargs) + log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w') + return log diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/__init__.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7427f38c07530afbab79154ea8aaf88c4bf70a08 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/__init__.py @@ -0,0 +1 @@ +from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/dpm_solver.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/dpm_solver.py new file mode 100644 index 0000000000000000000000000000000000000000..095e5ba3ce0b1aa7f4b3f1e2e5d8fff7cfe6dc8c --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/dpm_solver.py @@ -0,0 +1,1154 @@ +import torch +import torch.nn.functional as F +import math +from tqdm import tqdm + + +class NoiseScheduleVP: + def __init__( + self, + schedule='discrete', + betas=None, + alphas_cumprod=None, + continuous_beta_0=0.1, + continuous_beta_1=20., + ): + """Create a wrapper class for the forward SDE (VP type). + *** + Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. + We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. + *** + The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). + We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). + Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: + log_alpha_t = self.marginal_log_mean_coeff(t) + sigma_t = self.marginal_std(t) + lambda_t = self.marginal_lambda(t) + Moreover, as lambda(t) is an invertible function, we also support its inverse function: + t = self.inverse_lambda(lambda_t) + =============================================================== + We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). + 1. For discrete-time DPMs: + For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: + t_i = (i + 1) / N + e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. + We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. + Args: + betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) + alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) + Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. + **Important**: Please pay special attention for the args for `alphas_cumprod`: + The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that + q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). + Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have + alpha_{t_n} = \sqrt{\hat{alpha_n}}, + and + log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). + 2. For continuous-time DPMs: + We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise + schedule are the default settings in DDPM and improved-DDPM: + Args: + beta_min: A `float` number. The smallest beta for the linear schedule. + beta_max: A `float` number. The largest beta for the linear schedule. + cosine_s: A `float` number. The hyperparameter in the cosine schedule. + cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. + T: A `float` number. The ending time of the forward process. + =============================================================== + Args: + schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, + 'linear' or 'cosine' for continuous-time DPMs. + Returns: + A wrapper object of the forward SDE (VP type). + + =============================================================== + Example: + # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', betas=betas) + # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + # For continuous-time DPMs (VPSDE), linear schedule: + >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) + """ + + if schedule not in ['discrete', 'linear', 'cosine']: + raise ValueError( + "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format( + schedule)) + + self.schedule = schedule + if schedule == 'discrete': + if betas is not None: + log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) + else: + assert alphas_cumprod is not None + log_alphas = 0.5 * torch.log(alphas_cumprod) + self.total_N = len(log_alphas) + self.T = 1. + self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) + self.log_alpha_array = log_alphas.reshape((1, -1,)) + else: + self.total_N = 1000 + self.beta_0 = continuous_beta_0 + self.beta_1 = continuous_beta_1 + self.cosine_s = 0.008 + self.cosine_beta_max = 999. + self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * ( + 1. + self.cosine_s) / math.pi - self.cosine_s + self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) + self.schedule = schedule + if schedule == 'cosine': + # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. + # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. + self.T = 0.9946 + else: + self.T = 1. + + def marginal_log_mean_coeff(self, t): + """ + Compute log(alpha_t) of a given continuous-time label t in [0, T]. + """ + if self.schedule == 'discrete': + return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), + self.log_alpha_array.to(t.device)).reshape((-1)) + elif self.schedule == 'linear': + return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 + elif self.schedule == 'cosine': + log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) + log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 + return log_alpha_t + + def marginal_alpha(self, t): + """ + Compute alpha_t of a given continuous-time label t in [0, T]. + """ + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + """ + Compute sigma_t of a given continuous-time label t in [0, T]. + """ + return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + return log_mean_coeff - log_std + + def inverse_lambda(self, lamb): + """ + Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. + """ + if self.schedule == 'linear': + tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + Delta = self.beta_0 ** 2 + tmp + return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) + elif self.schedule == 'discrete': + log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) + t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), + torch.flip(self.t_array.to(lamb.device), [1])) + return t.reshape((-1,)) + else: + log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * ( + 1. + self.cosine_s) / math.pi - self.cosine_s + t = t_fn(log_alpha) + return t + + +def model_wrapper( + model, + noise_schedule, + model_type="noise", + model_kwargs={}, + guidance_type="uncond", + condition=None, + unconditional_condition=None, + guidance_scale=1., + classifier_fn=None, + classifier_kwargs={}, +): + """Create a wrapper function for the noise prediction model. + DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to + firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. + We support four types of the diffusion model by setting `model_type`: + 1. "noise": noise prediction model. (Trained by predicting noise). + 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). + 3. "v": velocity prediction model. (Trained by predicting the velocity). + The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. + [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." + arXiv preprint arXiv:2202.00512 (2022). + [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." + arXiv preprint arXiv:2210.02303 (2022). + + 4. "score": marginal score function. (Trained by denoising score matching). + Note that the score function and the noise prediction model follows a simple relationship: + ``` + noise(x_t, t) = -sigma_t * score(x_t, t) + ``` + We support three types of guided sampling by DPMs by setting `guidance_type`: + 1. "uncond": unconditional sampling by DPMs. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + The input `classifier_fn` has the following format: + `` + classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) + `` + [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," + in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. + 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. + The input `model` has the following format: + `` + model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score + `` + And if cond == `unconditional_condition`, the model output is the unconditional DPM output. + [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." + arXiv preprint arXiv:2207.12598 (2022). + + The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) + or continuous-time labels (i.e. epsilon to T). + We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: + `` + def model_fn(x, t_continuous) -> noise: + t_input = get_model_input_time(t_continuous) + return noise_pred(model, x, t_input, **model_kwargs) + `` + where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. + =============================================================== + Args: + model: A diffusion model with the corresponding format described above. + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + model_type: A `str`. The parameterization type of the diffusion model. + "noise" or "x_start" or "v" or "score". + model_kwargs: A `dict`. A dict for the other inputs of the model function. + guidance_type: A `str`. The type of the guidance for sampling. + "uncond" or "classifier" or "classifier-free". + condition: A pytorch tensor. The condition for the guided sampling. + Only used for "classifier" or "classifier-free" guidance type. + unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. + Only used for "classifier-free" guidance type. + guidance_scale: A `float`. The scale for the guided sampling. + classifier_fn: A classifier function. Only used for the classifier guidance. + classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. + Returns: + A noise prediction model that accepts the noised data and the continuous time as the inputs. + """ + + def get_model_input_time(t_continuous): + """ + Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. + For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. + For continuous-time DPMs, we just use `t_continuous`. + """ + if noise_schedule.schedule == 'discrete': + return (t_continuous - 1. / noise_schedule.total_N) * 1000. + else: + return t_continuous + + def noise_pred_fn(x, t_continuous, cond=None): + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + t_input = get_model_input_time(t_continuous) + if cond is None: + output = model(x, t_input, **model_kwargs) + else: + output = model(x, t_input, cond, **model_kwargs) + if model_type == "noise": + return output + elif model_type == "x_start": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) + elif model_type == "v": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x + elif model_type == "score": + sigma_t = noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return -expand_dims(sigma_t, dims) * output + + def cond_grad_fn(x, t_input): + """ + Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). + """ + with torch.enable_grad(): + x_in = x.detach().requires_grad_(True) + log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) + return torch.autograd.grad(log_prob.sum(), x_in)[0] + + def model_fn(x, t_continuous): + """ + The noise predicition model function that is used for DPM-Solver. + """ + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + if guidance_type == "uncond": + return noise_pred_fn(x, t_continuous) + elif guidance_type == "classifier": + assert classifier_fn is not None + t_input = get_model_input_time(t_continuous) + cond_grad = cond_grad_fn(x, t_input) + sigma_t = noise_schedule.marginal_std(t_continuous) + noise = noise_pred_fn(x, t_continuous) + return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad + elif guidance_type == "classifier-free": + if guidance_scale == 1. or unconditional_condition is None: + return noise_pred_fn(x, t_continuous, cond=condition) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t_continuous] * 2) + c_in = torch.cat([unconditional_condition, condition]) + noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) + return noise_uncond + guidance_scale * (noise - noise_uncond) + + assert model_type in ["noise", "x_start", "v"] + assert guidance_type in ["uncond", "classifier", "classifier-free"] + return model_fn + + +class DPM_Solver: + def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): + """Construct a DPM-Solver. + We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). + If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). + If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). + In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. + The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. + Args: + model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): + `` + def model_fn(x, t_continuous): + return noise + `` + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. + thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. + max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. + + [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. + """ + self.model = model_fn + self.noise_schedule = noise_schedule + self.predict_x0 = predict_x0 + self.thresholding = thresholding + self.max_val = max_val + + def noise_prediction_fn(self, x, t): + """ + Return the noise prediction model. + """ + return self.model(x, t) + + def data_prediction_fn(self, x, t): + """ + Return the data prediction model (with thresholding). + """ + noise = self.noise_prediction_fn(x, t) + dims = x.dim() + alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) + x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) + if self.thresholding: + p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. + s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) + s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) + x0 = torch.clamp(x0, -s, s) / s + return x0 + + def model_fn(self, x, t): + """ + Convert the model to the noise prediction model or the data prediction model. + """ + if self.predict_x0: + return self.data_prediction_fn(x, t) + else: + return self.noise_prediction_fn(x, t) + + def get_time_steps(self, skip_type, t_T, t_0, N, device): + """Compute the intermediate time steps for sampling. + Args: + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + N: A `int`. The total number of the spacing of the time steps. + device: A torch device. + Returns: + A pytorch tensor of the time steps, with the shape (N + 1,). + """ + if skip_type == 'logSNR': + lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) + lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) + logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) + return self.noise_schedule.inverse_lambda(logSNR_steps) + elif skip_type == 'time_uniform': + return torch.linspace(t_T, t_0, N + 1).to(device) + elif skip_type == 'time_quadratic': + t_order = 2 + t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device) + return t + else: + raise ValueError( + "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) + + def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): + """ + Get the order of each step for sampling by the singlestep DPM-Solver. + We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". + Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: + - If order == 1: + We take `steps` of DPM-Solver-1 (i.e. DDIM). + - If order == 2: + - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of DPM-Solver-2. + - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If order == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. + ============================================ + Args: + order: A `int`. The max order for the solver (2 or 3). + steps: A `int`. The total number of function evaluations (NFE). + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + device: A torch device. + Returns: + orders: A list of the solver order of each step. + """ + if order == 3: + K = steps // 3 + 1 + if steps % 3 == 0: + orders = [3, ] * (K - 2) + [2, 1] + elif steps % 3 == 1: + orders = [3, ] * (K - 1) + [1] + else: + orders = [3, ] * (K - 1) + [2] + elif order == 2: + if steps % 2 == 0: + K = steps // 2 + orders = [2, ] * K + else: + K = steps // 2 + 1 + orders = [2, ] * (K - 1) + [1] + elif order == 1: + K = 1 + orders = [1, ] * steps + else: + raise ValueError("'order' must be '1' or '2' or '3'.") + if skip_type == 'logSNR': + # To reproduce the results in DPM-Solver paper + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) + else: + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ + torch.cumsum(torch.tensor([0, ] + orders)).to(device)] + return timesteps_outer, orders + + def denoise_to_zero_fn(self, x, s): + """ + Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. + """ + return self.data_prediction_fn(x, s) + + def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): + """ + DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + if self.predict_x0: + phi_1 = torch.expm1(-h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + else: + phi_1 = torch.expm1(h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + + def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, + solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-2 from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the second-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 0.5 + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + s1 = ns.inverse_lambda(lambda_s1) + log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff( + s1), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) + alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_1 = torch.expm1(-h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * ( + model_s1 - model_s) + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_1 = torch.expm1(h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) + ) + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1} + else: + return x_t + + def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None, + return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-3 from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). + If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 1. / 3. + if r2 is None: + r2 = 2. / 3. + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + lambda_s2 = lambda_s + r2 * h + s1 = ns.inverse_lambda(lambda_s1) + s2 = ns.inverse_lambda(lambda_s2) + log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff( + s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std( + s2), ns.marginal_std(t) + alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_12 = torch.expm1(-r2 * h) + phi_1 = torch.expm1(-h) + phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. + phi_2 = phi_1 / h + 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(sigma_s2 / sigma_s, dims) * x + - expand_dims(alpha_s2 * phi_12, dims) * model_s + + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + expand_dims(alpha_t * phi_2, dims) * D1 + - expand_dims(alpha_t * phi_3, dims) * D2 + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_12 = torch.expm1(r2 * h) + phi_1 = torch.expm1(h) + phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. + phi_2 = phi_1 / h - 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x + - expand_dims(sigma_s2 * phi_12, dims) * model_s + - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - expand_dims(sigma_t * phi_2, dims) * D1 + - expand_dims(sigma_t * phi_3, dims) * D2 + ) + + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} + else: + return x_t + + def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): + """ + Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + ns = self.noise_schedule + dims = x.dim() + model_prev_1, model_prev_0 = model_prev_list + t_prev_1, t_prev_0 = t_prev_list + lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( + t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0 = h_0 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + if self.predict_x0: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 + ) + else: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 + ) + return x_t + + def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): + """ + Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + model_prev_2, model_prev_1, model_prev_0 = model_prev_list + t_prev_2, t_prev_1, t_prev_0 = t_prev_list + lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda( + t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_1 = lambda_prev_1 - lambda_prev_2 + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0, r1 = h_0 / h, h_1 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) + D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) + D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) + if self.predict_x0: + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 + - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2 + ) + else: + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 + - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2 + ) + return x_t + + def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, + r2=None): + """ + Singlestep DPM-Solver with the order `order` from time `s` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + r1: A `float`. The hyperparameter of the second-order or third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) + elif order == 2: + return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, + solver_type=solver_type, r1=r1) + elif order == 3: + return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, + solver_type=solver_type, r1=r1, r2=r2) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): + """ + Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) + elif order == 2: + return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + elif order == 3: + return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, + solver_type='dpm_solver'): + """ + The adaptive step size solver based on singlestep DPM-Solver. + Args: + x: A pytorch tensor. The initial value at time `t_T`. + order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + h_init: A `float`. The initial step size (for logSNR). + atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. + rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. + theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. + t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the + current time and `t_0` is less than `t_err`. The default setting is 1e-5. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_0: A pytorch tensor. The approximated solution at time `t_0`. + [1] A. Jolicoeur-Martineau, K. Li, R. PichĂ©-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. + """ + ns = self.noise_schedule + s = t_T * torch.ones((x.shape[0],)).to(x) + lambda_s = ns.marginal_lambda(s) + lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) + h = h_init * torch.ones_like(s).to(x) + x_prev = x + nfe = 0 + if order == 2: + r1 = 0.5 + lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, + solver_type=solver_type, + **kwargs) + elif order == 3: + r1, r2 = 1. / 3., 2. / 3. + lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, + return_intermediate=True, + solver_type=solver_type) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, + solver_type=solver_type, + **kwargs) + else: + raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) + while torch.abs((s - t_0)).mean() > t_err: + t = ns.inverse_lambda(lambda_s + h) + x_lower, lower_noise_kwargs = lower_update(x, s, t) + x_higher = higher_update(x, s, t, **lower_noise_kwargs) + delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) + norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) + E = norm_fn((x_higher - x_lower) / delta).max() + if torch.all(E <= 1.): + x = x_higher + s = t + x_prev = x_lower + lambda_s = ns.marginal_lambda(s) + h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) + nfe += order + print('adaptive solver nfe', nfe) + return x + + def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', + method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', + atol=0.0078, rtol=0.05, + ): + """ + Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. + ===================================================== + We support the following algorithms for both noise prediction model and data prediction model: + - 'singlestep': + Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. + We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). + The total number of function evaluations (NFE) == `steps`. + Given a fixed NFE == `steps`, the sampling procedure is: + - If `order` == 1: + - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. + - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If `order` == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. + - 'multistep': + Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. + We initialize the first `order` values by lower order multistep solvers. + Given a fixed NFE == `steps`, the sampling procedure is: + Denote K = steps. + - If `order` == 1: + - We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. + - If `order` == 3: + - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. + - 'singlestep_fixed': + Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). + We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. + - 'adaptive': + Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). + We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. + You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs + (NFE) and the sample quality. + - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. + - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. + ===================================================== + Some advices for choosing the algorithm: + - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: + Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, + skip_type='time_uniform', method='singlestep') + - For **guided sampling with large guidance scale** by DPMs: + Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, + skip_type='time_uniform', method='multistep') + We support three types of `skip_type`: + - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** + - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. + - 'time_quadratic': quadratic time for the time steps. + ===================================================== + Args: + x: A pytorch tensor. The initial value at time `t_start` + e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. + steps: A `int`. The total number of function evaluations (NFE). + t_start: A `float`. The starting time of the sampling. + If `T` is None, we use self.noise_schedule.T (default is 1.0). + t_end: A `float`. The ending time of the sampling. + If `t_end` is None, we use 1. / self.noise_schedule.total_N. + e.g. if total_N == 1000, we have `t_end` == 1e-3. + For discrete-time DPMs: + - We recommend `t_end` == 1. / self.noise_schedule.total_N. + For continuous-time DPMs: + - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. + order: A `int`. The order of DPM-Solver. + skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. + method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. + denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. + Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). + This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and + score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID + for diffusion models sampling by diffusion SDEs for low-resolutional images + (such as CIFAR-10). However, we observed that such trick does not matter for + high-resolutional images. As it needs an additional NFE, we do not recommend + it for high-resolutional images. + lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. + Only valid for `method=multistep` and `steps < 15`. We empirically find that + this trick is a key to stabilizing the sampling by DPM-Solver with very few steps + (especially for steps <= 10). So we recommend to set it to be `True`. + solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. + atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + Returns: + x_end: A pytorch tensor. The approximated solution at time `t_end`. + """ + t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + t_T = self.noise_schedule.T if t_start is None else t_start + device = x.device + if method == 'adaptive': + with torch.no_grad(): + x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, + solver_type=solver_type) + elif method == 'multistep': + assert steps >= order + timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) + assert timesteps.shape[0] - 1 == steps + with torch.no_grad(): + vec_t = timesteps[0].expand((x.shape[0])) + model_prev_list = [self.model_fn(x, vec_t)] + t_prev_list = [vec_t] + # Init the first `order` values by lower order multistep DPM-Solver. + for init_order in tqdm(range(1, order), desc="DPM init order"): + vec_t = timesteps[init_order].expand(x.shape[0]) + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, + solver_type=solver_type) + model_prev_list.append(self.model_fn(x, vec_t)) + t_prev_list.append(vec_t) + # Compute the remaining values by `order`-th order multistep DPM-Solver. + for step in tqdm(range(order, steps + 1), desc="DPM multistep"): + vec_t = timesteps[step].expand(x.shape[0]) + if lower_order_final and steps < 15: + step_order = min(order, steps + 1 - step) + else: + step_order = order + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, + solver_type=solver_type) + for i in range(order - 1): + t_prev_list[i] = t_prev_list[i + 1] + model_prev_list[i] = model_prev_list[i + 1] + t_prev_list[-1] = vec_t + # We do not need to evaluate the final model value. + if step < steps: + model_prev_list[-1] = self.model_fn(x, vec_t) + elif method in ['singlestep', 'singlestep_fixed']: + if method == 'singlestep': + timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, + skip_type=skip_type, + t_T=t_T, t_0=t_0, + device=device) + elif method == 'singlestep_fixed': + K = steps // order + orders = [order, ] * K + timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) + for i, order in enumerate(orders): + t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] + timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), + N=order, device=device) + lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) + vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) + h = lambda_inner[-1] - lambda_inner[0] + r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h + r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h + x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) + if denoise_to_zero: + x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) + return x + + +############################################################# +# other utility functions +############################################################# + +def interpolate_fn(x, xp, yp): + """ + A piecewise linear function y = f(x), using xp and yp as keypoints. + We implement f(x) in a differentiable way (i.e. applicable for autograd). + The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) + Args: + x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). + xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. + yp: PyTorch tensor with shape [C, K]. + Returns: + The function values f(x), with shape [N, C]. + """ + N, K = x.shape[0], xp.shape[1] + all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) + sorted_all_x, x_indices = torch.sort(all_x, dim=2) + x_idx = torch.argmin(x_indices, dim=2) + cand_start_idx = x_idx - 1 + start_idx = torch.where( + torch.eq(x_idx, 0), + torch.tensor(1, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) + start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) + end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) + start_idx2 = torch.where( + torch.eq(x_idx, 0), + torch.tensor(0, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) + start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) + end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) + cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) + return cand + + +def expand_dims(v, dims): + """ + Expand the tensor `v` to the dim `dims`. + Args: + `v`: a PyTorch tensor with shape [N]. + `dim`: a `int`. + Returns: + a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. + """ + return v[(...,) + (None,) * (dims - 1)] \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/sampler.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..7d137b8cf36718c1c58faa09f9dd919e5fb2977b --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpm_solver/sampler.py @@ -0,0 +1,87 @@ +"""SAMPLING ONLY.""" +import torch + +from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver + + +MODEL_TYPES = { + "eps": "noise", + "v": "v" +} + + +class DPMSolverSampler(object): + def __init__(self, model, **kwargs): + super().__init__() + self.model = model + to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) + self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + + print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') + + device = self.model.betas.device + if x_T is None: + img = torch.randn(size, device=device) + else: + img = x_T + + ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) + + model_fn = model_wrapper( + lambda x, t, c: self.model.apply_model(x, t, c), + ns, + model_type=MODEL_TYPES[self.model.parameterization], + guidance_type="classifier-free", + condition=conditioning, + unconditional_condition=unconditional_conditioning, + guidance_scale=unconditional_guidance_scale, + ) + + dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) + x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) + + return x.to(device), None \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpmpp_2m.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpmpp_2m.py new file mode 100644 index 0000000000000000000000000000000000000000..79ee6ed2eebc3924922a1bcc98810c5835ba9885 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/dpmpp_2m.py @@ -0,0 +1,78 @@ +############################################################################### +# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company +############################################################################### +import torch +import k_diffusion as K +import numpy as np + +from ldm.models.diffusion.sampler import Sampler + + +class DPMPP2M_Sampler(Sampler): + def __init__(self, model, v_mode, **kwargs): + super().__init__() + self.model = model + if v_mode: + self.model_wrap = K.external.CompVisVDenoiser(model) + else: + self.model_wrap = K.external.CompVisDenoiser(model) + + def generate_params(self, sigmas): + """DPM-Solver++(2M).""" + # Based on https://github.com/crowsonkb/k-diffusion/blob/v0.0.14/k_diffusion/sampling.py#L585 + device = sigmas.device + sigmas = sigmas.cpu() + def sigma_fn(t): return t.neg().exp() + def t_fn(sigma): return sigma.log().neg() + params = [] + for i in range(len(sigmas) - 1): + sigma = sigmas[i] + t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1]) + h = t_next - t + a = sigma_fn(t_next) / sigma_fn(t) + if i == 0 or sigmas[i + 1] == 0: + b = 1.0 + c = 0.0 + else: + h_last = t - t_fn(sigmas[i - 1]) + r = h_last / h + b = 1 + 1 / (2 * r) + c = 1 / (2 * r) + b *= - (-h).expm1() + c *= (-h).expm1() + p = np.array([a.numpy(), b.numpy(), c.numpy(), sigma.numpy()]) + params.append(p) + params = torch.Tensor(np.stack(params, axis=0) + ).transpose(0, 1).to(device) + return params + + @torch.no_grad() + def compile(self, + S, + shape, + unconditional_guidance_scale=1., + batch_size=1, + **kwargs + ): + self.sigmas = self.model_wrap.get_sigmas(S) + self.params = self.generate_params(self.sigmas) + self.cond_scale = unconditional_guidance_scale + self.old_denoised_zeros = self.sigmas.new_zeros([batch_size] + shape) + self.rand_scale = self.params[3, 0].to(torch.float32).cpu() + self.batch_size = batch_size + + def one_step(self, x, c_in, old_denoised, param_t): + a, b, c, sigma = param_t.chunk(4) + sigma = sigma.broadcast_to((self.batch_size)).contiguous() + denoised = self.run_model(x, c_in, sigma) + x = a * x + b * denoised + c * old_denoised + return x, denoised + + def sampler_step(self, arg): + x, c_in, params, old_denoised = arg + x, denoised = self.one_step(x, c_in, old_denoised, params[:, 0]) + params = torch.roll(params, shifts=-1, dims=1) + return [x, c_in, params, denoised] + + def init_loop(self, x, c_in): + return [x, c_in, self.params.clone(), self.old_denoised_zeros.clone()] diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/plms.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/plms.py new file mode 100644 index 0000000000000000000000000000000000000000..7002a365d27168ced0a04e9a4d83e088f8284eae --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/plms.py @@ -0,0 +1,244 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like +from ldm.models.diffusion.sampling_util import norm_thresholding + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + dynamic_threshold=None, + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + dynamic_threshold=dynamic_threshold, + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, + dynamic_threshold=None): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next, + dynamic_threshold=dynamic_threshold) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, + dynamic_threshold=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + if dynamic_threshold is not None: + pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampler.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..9c90994636e510460fd5096b4b6075ae5689db19 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampler.py @@ -0,0 +1,19 @@ +############################################################################### +# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company +############################################################################### +import torch + + +class Sampler(object): + def __init__(self, **kwargs): + super().__init__() + + @torch.no_grad() + def compile(self, S, shape, **kwargs): + pass + + def run_model(self, x, c_in, sigma): + x_in = torch.cat([x] * 2) + sigma_in = torch.cat([sigma] * 2) + uncond, cond = self.model_wrap(x_in, sigma_in, cond=c_in).chunk(2) + return uncond + (cond - uncond) * self.cond_scale diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampling_util.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampling_util.py new file mode 100644 index 0000000000000000000000000000000000000000..7eff02be6d7c54d43ee6680636ac0698dd3b3f33 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/models/diffusion/sampling_util.py @@ -0,0 +1,22 @@ +import torch +import numpy as np + + +def append_dims(x, target_dims): + """Appends dimensions to the end of a tensor until it has target_dims dimensions. + From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" + dims_to_append = target_dims - x.ndim + if dims_to_append < 0: + raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') + return x[(...,) + (None,) * dims_to_append] + + +def norm_thresholding(x0, value): + s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) + return x0 * (value / s) + + +def spatial_norm_thresholding(x0, value): + # b c h w + s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) + return x0 * (value / s) \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/openaimodel.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/openaimodel.py new file mode 100644 index 0000000000000000000000000000000000000000..7df6b5abfe8eff07f0c8e8703ba8aee90d45984b --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/diffusionmodules/openaimodel.py @@ -0,0 +1,786 @@ +from abc import abstractmethod +import math + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from ldm.modules.diffusionmodules.util import ( + checkpoint, + conv_nd, + linear, + avg_pool_nd, + zero_module, + normalization, + timestep_embedding, +) +from ldm.modules.attention import SpatialTransformer +from ldm.util import exists + + +# dummy replace +def convert_module_to_f16(x): + pass + +def convert_module_to_f32(x): + pass + + +## go +class AttentionPool2d(nn.Module): + """ + Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py + """ + + def __init__( + self, + spacial_dim: int, + embed_dim: int, + num_heads_channels: int, + output_dim: int = None, + ): + super().__init__() + self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) + self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) + self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) + self.num_heads = embed_dim // num_heads_channels + self.attention = QKVAttention(self.num_heads) + + def forward(self, x): + b, c, *_spatial = x.shape + x = x.reshape(b, c, -1) # NC(HW) + x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) + x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) + x = self.qkv_proj(x) + x = self.attention(x) + x = self.c_proj(x) + return x[:, :, 0] + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, SpatialTransformer): + x = layer(x, context) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + +class TransposedUpsample(nn.Module): + 'Learned 2x upsampling without padding' + def __init__(self, channels, out_channels=None, ks=5): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) + + def forward(self,x): + return self.up(x) + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + use_new_attention_order=False, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels) + self.qkv = conv_nd(1, channels, channels * 3, 1) + if use_new_attention_order: + # split qkv before split heads + self.attention = QKVAttention(self.num_heads) + else: + # split heads before split qkv + self.attention = QKVAttentionLegacy(self.num_heads) + + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x): + return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + #return pt_checkpoint(self._forward, x) # pytorch + + def _forward(self, x): + b, c, *spatial = x.shape + x = x.reshape(b, c, -1) + qkv = self.qkv(self.norm(x)) + h = self.attention(qkv) + h = self.proj_out(h) + return (x + h).reshape(b, c, *spatial) + + +def count_flops_attn(model, _x, y): + """ + A counter for the `thop` package to count the operations in an + attention operation. + Meant to be used like: + macs, params = thop.profile( + model, + inputs=(inputs, timestamps), + custom_ops={QKVAttention: QKVAttention.count_flops}, + ) + """ + b, c, *spatial = y[0].shape + num_spatial = int(np.prod(spatial)) + # We perform two matmuls with the same number of ops. + # The first computes the weight matrix, the second computes + # the combination of the value vectors. + matmul_ops = 2 * b * (num_spatial ** 2) * c + model.total_ops += th.DoubleTensor([matmul_ops]) + + +class QKVAttentionLegacy(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention and splits in a different order. + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.chunk(3, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", + (q * scale).view(bs * self.n_heads, ch, length), + (k * scale).view(bs * self.n_heads, ch, length), + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param use_new_attention_order: use a different attention pattern for potentially + increased efficiency. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + num_classes=None, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + disable_self_attentions=None, + num_attention_blocks=None, + disable_middle_self_attn=False, + use_linear_in_transformer=False, + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + if isinstance(num_res_blocks, int): + self.num_res_blocks = len(channel_mult) * [num_res_blocks] + else: + if len(num_res_blocks) != len(channel_mult): + raise ValueError("provide num_res_blocks either as an int (globally constant) or " + "as a list/tuple (per-level) with the same length as channel_mult") + self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: + # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not + assert len(disable_self_attentions) == len(channel_mult) + if num_attention_blocks is not None: + assert len(num_attention_blocks) == len(self.num_res_blocks) + assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) + print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " + f"This option has LESS priority than attention_resolutions {attention_resolutions}, " + f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " + f"attention will still not be set.") + + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + if self.num_classes is not None: + if isinstance(self.num_classes, int): + self.label_emb = nn.Embedding(num_classes, time_embed_dim) + elif self.num_classes == "continuous": + print("setting up linear c_adm embedding layer") + self.label_emb = nn.Linear(1, time_embed_dim) + else: + raise ValueError() + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for nr in range(self.num_res_blocks[level]): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(self.num_res_blocks[level] + 1): + ich = input_block_chans.pop() + layers = [ + ResBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + if exists(disable_self_attentions): + disabled_sa = disable_self_attentions[level] + else: + disabled_sa = False + + if not exists(num_attention_blocks) or i < num_attention_blocks[level]: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, + disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint + ) + ) + if level and i == self.num_res_blocks[level]: + out_ch = ch + layers.append( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + if self.predict_codebook_ids: + self.id_predictor = nn.Sequential( + normalization(ch), + conv_nd(dims, model_channels, n_embed, 1), + #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits + ) + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + self.output_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + self.output_blocks.apply(convert_module_to_f32) + + def forward(self, x, timesteps=None, context=None, y=None,**kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape[0] == x.shape[0] + emb = emb + self.label_emb(y) + + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + if self.predict_codebook_ids: + return self.id_predictor(h) + else: + return self.out(h) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/distributions/__init__.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/distributions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/distributions/distributions.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/distributions/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b8ef901130efc171aa69742ca0244d94d3f2e9 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/distributions/distributions.py @@ -0,0 +1,92 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.sum(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=[1, 2, 3]) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=[1, 2, 3]) + + def nll(self, sample, dims=[1,2,3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/base_model.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf430239b47ec5ec07531263f26f5c24a2311cd --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/base_model.py @@ -0,0 +1,16 @@ +import torch + + +class BaseModel(torch.nn.Module): + def load(self, path): + """Load model from file. + + Args: + path (str): file path + """ + parameters = torch.load(path, map_location=torch.device('cpu')) + + if "optimizer" in parameters: + parameters = parameters["model"] + + self.load_state_dict(parameters) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/blocks.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..2145d18fa98060a618536d9a64fe6589e9be4f78 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/blocks.py @@ -0,0 +1,342 @@ +import torch +import torch.nn as nn + +from .vit import ( + _make_pretrained_vitb_rn50_384, + _make_pretrained_vitl16_384, + _make_pretrained_vitb16_384, + forward_vit, +) + +def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",): + if backbone == "vitl16_384": + pretrained = _make_pretrained_vitl16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [256, 512, 1024, 1024], features, groups=groups, expand=expand + ) # ViT-L/16 - 85.0% Top1 (backbone) + elif backbone == "vitb_rn50_384": + pretrained = _make_pretrained_vitb_rn50_384( + use_pretrained, + hooks=hooks, + use_vit_only=use_vit_only, + use_readout=use_readout, + ) + scratch = _make_scratch( + [256, 512, 768, 768], features, groups=groups, expand=expand + ) # ViT-H/16 - 85.0% Top1 (backbone) + elif backbone == "vitb16_384": + pretrained = _make_pretrained_vitb16_384( + use_pretrained, hooks=hooks, use_readout=use_readout + ) + scratch = _make_scratch( + [96, 192, 384, 768], features, groups=groups, expand=expand + ) # ViT-B/16 - 84.6% Top1 (backbone) + elif backbone == "resnext101_wsl": + pretrained = _make_pretrained_resnext101_wsl(use_pretrained) + scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3 + elif backbone == "efficientnet_lite3": + pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable) + scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3 + else: + print(f"Backbone '{backbone}' not implemented") + assert False + + return pretrained, scratch + + +def _make_scratch(in_shape, out_shape, groups=1, expand=False): + scratch = nn.Module() + + out_shape1 = out_shape + out_shape2 = out_shape + out_shape3 = out_shape + out_shape4 = out_shape + if expand==True: + out_shape1 = out_shape + out_shape2 = out_shape*2 + out_shape3 = out_shape*4 + out_shape4 = out_shape*8 + + scratch.layer1_rn = nn.Conv2d( + in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer2_rn = nn.Conv2d( + in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer3_rn = nn.Conv2d( + in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + scratch.layer4_rn = nn.Conv2d( + in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups + ) + + return scratch + + +def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False): + efficientnet = torch.hub.load( + "rwightman/gen-efficientnet-pytorch", + "tf_efficientnet_lite3", + pretrained=use_pretrained, + exportable=exportable + ) + return _make_efficientnet_backbone(efficientnet) + + +def _make_efficientnet_backbone(effnet): + pretrained = nn.Module() + + pretrained.layer1 = nn.Sequential( + effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2] + ) + pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3]) + pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5]) + pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9]) + + return pretrained + + +def _make_resnet_backbone(resnet): + pretrained = nn.Module() + pretrained.layer1 = nn.Sequential( + resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1 + ) + + pretrained.layer2 = resnet.layer2 + pretrained.layer3 = resnet.layer3 + pretrained.layer4 = resnet.layer4 + + return pretrained + + +def _make_pretrained_resnext101_wsl(use_pretrained): + resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl") + return _make_resnet_backbone(resnet) + + + +class Interpolate(nn.Module): + """Interpolation module. + """ + + def __init__(self, scale_factor, mode, align_corners=False): + """Init. + + Args: + scale_factor (float): scaling + mode (str): interpolation mode + """ + super(Interpolate, self).__init__() + + self.interp = nn.functional.interpolate + self.scale_factor = scale_factor + self.mode = mode + self.align_corners = align_corners + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: interpolated data + """ + + x = self.interp( + x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners + ) + + return x + + +class ResidualConvUnit(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.conv1 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True + ) + + self.conv2 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True + ) + + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + out = self.relu(x) + out = self.conv1(out) + out = self.relu(out) + out = self.conv2(out) + + return out + x + + +class FeatureFusionBlock(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock, self).__init__() + + self.resConfUnit1 = ResidualConvUnit(features) + self.resConfUnit2 = ResidualConvUnit(features) + + def forward(self, *xs): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + output += self.resConfUnit1(xs[1]) + + output = self.resConfUnit2(output) + + output = nn.functional.interpolate( + output, scale_factor=2, mode="bilinear", align_corners=True + ) + + return output + + + + +class ResidualConvUnit_custom(nn.Module): + """Residual convolution module. + """ + + def __init__(self, features, activation, bn): + """Init. + + Args: + features (int): number of features + """ + super().__init__() + + self.bn = bn + + self.groups=1 + + self.conv1 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + self.conv2 = nn.Conv2d( + features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups + ) + + if self.bn==True: + self.bn1 = nn.BatchNorm2d(features) + self.bn2 = nn.BatchNorm2d(features) + + self.activation = activation + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input + + Returns: + tensor: output + """ + + out = self.activation(x) + out = self.conv1(out) + if self.bn==True: + out = self.bn1(out) + + out = self.activation(out) + out = self.conv2(out) + if self.bn==True: + out = self.bn2(out) + + if self.groups > 1: + out = self.conv_merge(out) + + return self.skip_add.add(out, x) + + # return out + x + + +class FeatureFusionBlock_custom(nn.Module): + """Feature fusion block. + """ + + def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True): + """Init. + + Args: + features (int): number of features + """ + super(FeatureFusionBlock_custom, self).__init__() + + self.deconv = deconv + self.align_corners = align_corners + + self.groups=1 + + self.expand = expand + out_features = features + if self.expand==True: + out_features = features//2 + + self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) + + self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) + self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) + + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, *xs): + """Forward pass. + + Returns: + tensor: output + """ + output = xs[0] + + if len(xs) == 2: + res = self.resConfUnit1(xs[1]) + output = self.skip_add.add(output, res) + # output += res + + output = self.resConfUnit2(output) + + output = nn.functional.interpolate( + output, scale_factor=2, mode="bilinear", align_corners=self.align_corners + ) + + output = self.out_conv(output) + + return output + diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/dpt_depth.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/dpt_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..4e9aab5d2767dffea39da5b3f30e2798688216f1 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/dpt_depth.py @@ -0,0 +1,109 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_model import BaseModel +from .blocks import ( + FeatureFusionBlock, + FeatureFusionBlock_custom, + Interpolate, + _make_encoder, + forward_vit, +) + + +def _make_fusion_block(features, use_bn): + return FeatureFusionBlock_custom( + features, + nn.ReLU(False), + deconv=False, + bn=use_bn, + expand=False, + align_corners=True, + ) + + +class DPT(BaseModel): + def __init__( + self, + head, + features=256, + backbone="vitb_rn50_384", + readout="project", + channels_last=False, + use_bn=False, + ): + + super(DPT, self).__init__() + + self.channels_last = channels_last + + hooks = { + "vitb_rn50_384": [0, 1, 8, 11], + "vitb16_384": [2, 5, 8, 11], + "vitl16_384": [5, 11, 17, 23], + } + + # Instantiate backbone and reassemble blocks + self.pretrained, self.scratch = _make_encoder( + backbone, + features, + False, # Set to true of you want to train from scratch, uses ImageNet weights + groups=1, + expand=False, + exportable=False, + hooks=hooks[backbone], + use_readout=readout, + ) + + self.scratch.refinenet1 = _make_fusion_block(features, use_bn) + self.scratch.refinenet2 = _make_fusion_block(features, use_bn) + self.scratch.refinenet3 = _make_fusion_block(features, use_bn) + self.scratch.refinenet4 = _make_fusion_block(features, use_bn) + + self.scratch.output_conv = head + + + def forward(self, x): + if self.channels_last == True: + x.contiguous(memory_format=torch.channels_last) + + layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x) + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + path_4 = self.scratch.refinenet4(layer_4_rn) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv(path_1) + + return out + + +class DPTDepthModel(DPT): + def __init__(self, path=None, non_negative=True, **kwargs): + features = kwargs["features"] if "features" in kwargs else 256 + + head = nn.Sequential( + nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), + Interpolate(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + nn.Identity(), + ) + + super().__init__(head, **kwargs) + + if path is not None: + self.load(path) + + def forward(self, x): + return super().forward(x).squeeze(dim=1) + diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net.py new file mode 100644 index 0000000000000000000000000000000000000000..8a954977800b0a0f48807e80fa63041910e33c1f --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net.py @@ -0,0 +1,76 @@ +"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. +This file contains code that is adapted from +https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py +""" +import torch +import torch.nn as nn + +from .base_model import BaseModel +from .blocks import FeatureFusionBlock, Interpolate, _make_encoder + + +class MidasNet(BaseModel): + """Network for monocular depth estimation. + """ + + def __init__(self, path=None, features=256, non_negative=True): + """Init. + + Args: + path (str, optional): Path to saved model. Defaults to None. + features (int, optional): Number of features. Defaults to 256. + backbone (str, optional): Backbone network for encoder. Defaults to resnet50 + """ + print("Loading weights: ", path) + + super(MidasNet, self).__init__() + + use_pretrained = False if path is None else True + + self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained) + + self.scratch.refinenet4 = FeatureFusionBlock(features) + self.scratch.refinenet3 = FeatureFusionBlock(features) + self.scratch.refinenet2 = FeatureFusionBlock(features) + self.scratch.refinenet1 = FeatureFusionBlock(features) + + self.scratch.output_conv = nn.Sequential( + nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), + Interpolate(scale_factor=2, mode="bilinear"), + nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1), + nn.ReLU(True), + nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + ) + + if path: + self.load(path) + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input data (image) + + Returns: + tensor: depth + """ + + layer_1 = self.pretrained.layer1(x) + layer_2 = self.pretrained.layer2(layer_1) + layer_3 = self.pretrained.layer3(layer_2) + layer_4 = self.pretrained.layer4(layer_3) + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + path_4 = self.scratch.refinenet4(layer_4_rn) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv(path_1) + + return torch.squeeze(out, dim=1) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net_custom.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net_custom.py new file mode 100644 index 0000000000000000000000000000000000000000..50e4acb5e53d5fabefe3dde16ab49c33c2b7797c --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/midas_net_custom.py @@ -0,0 +1,128 @@ +"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. +This file contains code that is adapted from +https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py +""" +import torch +import torch.nn as nn + +from .base_model import BaseModel +from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder + + +class MidasNet_small(BaseModel): + """Network for monocular depth estimation. + """ + + def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True, + blocks={'expand': True}): + """Init. + + Args: + path (str, optional): Path to saved model. Defaults to None. + features (int, optional): Number of features. Defaults to 256. + backbone (str, optional): Backbone network for encoder. Defaults to resnet50 + """ + print("Loading weights: ", path) + + super(MidasNet_small, self).__init__() + + use_pretrained = False if path else True + + self.channels_last = channels_last + self.blocks = blocks + self.backbone = backbone + + self.groups = 1 + + features1=features + features2=features + features3=features + features4=features + self.expand = False + if "expand" in self.blocks and self.blocks['expand'] == True: + self.expand = True + features1=features + features2=features*2 + features3=features*4 + features4=features*8 + + self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) + + self.scratch.activation = nn.ReLU(False) + + self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) + self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) + self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) + self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) + + + self.scratch.output_conv = nn.Sequential( + nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups), + Interpolate(scale_factor=2, mode="bilinear"), + nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1), + self.scratch.activation, + nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), + nn.ReLU(True) if non_negative else nn.Identity(), + nn.Identity(), + ) + + if path: + self.load(path) + + + def forward(self, x): + """Forward pass. + + Args: + x (tensor): input data (image) + + Returns: + tensor: depth + """ + if self.channels_last==True: + print("self.channels_last = ", self.channels_last) + x.contiguous(memory_format=torch.channels_last) + + + layer_1 = self.pretrained.layer1(x) + layer_2 = self.pretrained.layer2(layer_1) + layer_3 = self.pretrained.layer3(layer_2) + layer_4 = self.pretrained.layer4(layer_3) + + layer_1_rn = self.scratch.layer1_rn(layer_1) + layer_2_rn = self.scratch.layer2_rn(layer_2) + layer_3_rn = self.scratch.layer3_rn(layer_3) + layer_4_rn = self.scratch.layer4_rn(layer_4) + + + path_4 = self.scratch.refinenet4(layer_4_rn) + path_3 = self.scratch.refinenet3(path_4, layer_3_rn) + path_2 = self.scratch.refinenet2(path_3, layer_2_rn) + path_1 = self.scratch.refinenet1(path_2, layer_1_rn) + + out = self.scratch.output_conv(path_1) + + return torch.squeeze(out, dim=1) + + + +def fuse_model(m): + prev_previous_type = nn.Identity() + prev_previous_name = '' + previous_type = nn.Identity() + previous_name = '' + for name, module in m.named_modules(): + if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU: + # print("FUSED ", prev_previous_name, previous_name, name) + torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) + elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: + # print("FUSED ", prev_previous_name, previous_name) + torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) + # elif previous_type == nn.Conv2d and type(module) == nn.ReLU: + # print("FUSED ", previous_name, name) + # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True) + + prev_previous_type = previous_type + prev_previous_name = previous_name + previous_type = type(module) + previous_name = name \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/transforms.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..350cbc11662633ad7f8968eb10be2e7de6e384e9 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/transforms.py @@ -0,0 +1,234 @@ +import numpy as np +import cv2 +import math + + +def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): + """Rezise the sample to ensure the given size. Keeps aspect ratio. + + Args: + sample (dict): sample + size (tuple): image size + + Returns: + tuple: new size + """ + shape = list(sample["disparity"].shape) + + if shape[0] >= size[0] and shape[1] >= size[1]: + return sample + + scale = [0, 0] + scale[0] = size[0] / shape[0] + scale[1] = size[1] / shape[1] + + scale = max(scale) + + shape[0] = math.ceil(scale * shape[0]) + shape[1] = math.ceil(scale * shape[1]) + + # resize + sample["image"] = cv2.resize( + sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method + ) + + sample["disparity"] = cv2.resize( + sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST + ) + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + tuple(shape[::-1]), + interpolation=cv2.INTER_NEAREST, + ) + sample["mask"] = sample["mask"].astype(bool) + + return tuple(shape) + + +class Resize(object): + """Resize sample to given size (width, height). + """ + + def __init__( + self, + width, + height, + resize_target=True, + keep_aspect_ratio=False, + ensure_multiple_of=1, + resize_method="lower_bound", + image_interpolation_method=cv2.INTER_AREA, + ): + """Init. + + Args: + width (int): desired output width + height (int): desired output height + resize_target (bool, optional): + True: Resize the full sample (image, mask, target). + False: Resize image only. + Defaults to True. + keep_aspect_ratio (bool, optional): + True: Keep the aspect ratio of the input sample. + Output sample might not have the given width and height, and + resize behaviour depends on the parameter 'resize_method'. + Defaults to False. + ensure_multiple_of (int, optional): + Output width and height is constrained to be multiple of this parameter. + Defaults to 1. + resize_method (str, optional): + "lower_bound": Output will be at least as large as the given size. + "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) + "minimal": Scale as least as possible. (Output size might be smaller than given size.) + Defaults to "lower_bound". + """ + self.__width = width + self.__height = height + + self.__resize_target = resize_target + self.__keep_aspect_ratio = keep_aspect_ratio + self.__multiple_of = ensure_multiple_of + self.__resize_method = resize_method + self.__image_interpolation_method = image_interpolation_method + + def constrain_to_multiple_of(self, x, min_val=0, max_val=None): + y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if max_val is not None and y > max_val: + y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) + + if y < min_val: + y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) + + return y + + def get_size(self, width, height): + # determine new height and width + scale_height = self.__height / height + scale_width = self.__width / width + + if self.__keep_aspect_ratio: + if self.__resize_method == "lower_bound": + # scale such that output size is lower bound + if scale_width > scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "upper_bound": + # scale such that output size is upper bound + if scale_width < scale_height: + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + elif self.__resize_method == "minimal": + # scale as least as possbile + if abs(1 - scale_width) < abs(1 - scale_height): + # fit width + scale_height = scale_width + else: + # fit height + scale_width = scale_height + else: + raise ValueError( + f"resize_method {self.__resize_method} not implemented" + ) + + if self.__resize_method == "lower_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, min_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, min_val=self.__width + ) + elif self.__resize_method == "upper_bound": + new_height = self.constrain_to_multiple_of( + scale_height * height, max_val=self.__height + ) + new_width = self.constrain_to_multiple_of( + scale_width * width, max_val=self.__width + ) + elif self.__resize_method == "minimal": + new_height = self.constrain_to_multiple_of(scale_height * height) + new_width = self.constrain_to_multiple_of(scale_width * width) + else: + raise ValueError(f"resize_method {self.__resize_method} not implemented") + + return (new_width, new_height) + + def __call__(self, sample): + width, height = self.get_size( + sample["image"].shape[1], sample["image"].shape[0] + ) + + # resize sample + sample["image"] = cv2.resize( + sample["image"], + (width, height), + interpolation=self.__image_interpolation_method, + ) + + if self.__resize_target: + if "disparity" in sample: + sample["disparity"] = cv2.resize( + sample["disparity"], + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + + if "depth" in sample: + sample["depth"] = cv2.resize( + sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST + ) + + sample["mask"] = cv2.resize( + sample["mask"].astype(np.float32), + (width, height), + interpolation=cv2.INTER_NEAREST, + ) + sample["mask"] = sample["mask"].astype(bool) + + return sample + + +class NormalizeImage(object): + """Normlize image by given mean and std. + """ + + def __init__(self, mean, std): + self.__mean = mean + self.__std = std + + def __call__(self, sample): + sample["image"] = (sample["image"] - self.__mean) / self.__std + + return sample + + +class PrepareForNet(object): + """Prepare sample for usage as network input. + """ + + def __init__(self): + pass + + def __call__(self, sample): + image = np.transpose(sample["image"], (2, 0, 1)) + sample["image"] = np.ascontiguousarray(image).astype(np.float32) + + if "mask" in sample: + sample["mask"] = sample["mask"].astype(np.float32) + sample["mask"] = np.ascontiguousarray(sample["mask"]) + + if "disparity" in sample: + disparity = sample["disparity"].astype(np.float32) + sample["disparity"] = np.ascontiguousarray(disparity) + + if "depth" in sample: + depth = sample["depth"].astype(np.float32) + sample["depth"] = np.ascontiguousarray(depth) + + return sample diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/vit.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/vit.py new file mode 100644 index 0000000000000000000000000000000000000000..ea46b1be88b261b0dec04f3da0256f5f66f88a74 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/modules/midas/midas/vit.py @@ -0,0 +1,491 @@ +import torch +import torch.nn as nn +import timm +import types +import math +import torch.nn.functional as F + + +class Slice(nn.Module): + def __init__(self, start_index=1): + super(Slice, self).__init__() + self.start_index = start_index + + def forward(self, x): + return x[:, self.start_index :] + + +class AddReadout(nn.Module): + def __init__(self, start_index=1): + super(AddReadout, self).__init__() + self.start_index = start_index + + def forward(self, x): + if self.start_index == 2: + readout = (x[:, 0] + x[:, 1]) / 2 + else: + readout = x[:, 0] + return x[:, self.start_index :] + readout.unsqueeze(1) + + +class ProjectReadout(nn.Module): + def __init__(self, in_features, start_index=1): + super(ProjectReadout, self).__init__() + self.start_index = start_index + + self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU()) + + def forward(self, x): + readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :]) + features = torch.cat((x[:, self.start_index :], readout), -1) + + return self.project(features) + + +class Transpose(nn.Module): + def __init__(self, dim0, dim1): + super(Transpose, self).__init__() + self.dim0 = dim0 + self.dim1 = dim1 + + def forward(self, x): + x = x.transpose(self.dim0, self.dim1) + return x + + +def forward_vit(pretrained, x): + b, c, h, w = x.shape + + glob = pretrained.model.forward_flex(x) + + layer_1 = pretrained.activations["1"] + layer_2 = pretrained.activations["2"] + layer_3 = pretrained.activations["3"] + layer_4 = pretrained.activations["4"] + + layer_1 = pretrained.act_postprocess1[0:2](layer_1) + layer_2 = pretrained.act_postprocess2[0:2](layer_2) + layer_3 = pretrained.act_postprocess3[0:2](layer_3) + layer_4 = pretrained.act_postprocess4[0:2](layer_4) + + unflatten = nn.Sequential( + nn.Unflatten( + 2, + torch.Size( + [ + h // pretrained.model.patch_size[1], + w // pretrained.model.patch_size[0], + ] + ), + ) + ) + + if layer_1.ndim == 3: + layer_1 = unflatten(layer_1) + if layer_2.ndim == 3: + layer_2 = unflatten(layer_2) + if layer_3.ndim == 3: + layer_3 = unflatten(layer_3) + if layer_4.ndim == 3: + layer_4 = unflatten(layer_4) + + layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1) + layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2) + layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3) + layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4) + + return layer_1, layer_2, layer_3, layer_4 + + +def _resize_pos_embed(self, posemb, gs_h, gs_w): + posemb_tok, posemb_grid = ( + posemb[:, : self.start_index], + posemb[0, self.start_index :], + ) + + gs_old = int(math.sqrt(len(posemb_grid))) + + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear") + posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1) + + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + + return posemb + + +def forward_flex(self, x): + b, c, h, w = x.shape + + pos_embed = self._resize_pos_embed( + self.pos_embed, h // self.patch_size[1], w // self.patch_size[0] + ) + + B = x.shape[0] + + if hasattr(self.patch_embed, "backbone"): + x = self.patch_embed.backbone(x) + if isinstance(x, (list, tuple)): + x = x[-1] # last feature if backbone outputs list/tuple of features + + x = self.patch_embed.proj(x).flatten(2).transpose(1, 2) + + if getattr(self, "dist_token", None) is not None: + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + dist_token = self.dist_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, dist_token, x), dim=1) + else: + cls_tokens = self.cls_token.expand( + B, -1, -1 + ) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + x = x + pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + + return x + + +activations = {} + + +def get_activation(name): + def hook(model, input, output): + activations[name] = output + + return hook + + +def get_readout_oper(vit_features, features, use_readout, start_index=1): + if use_readout == "ignore": + readout_oper = [Slice(start_index)] * len(features) + elif use_readout == "add": + readout_oper = [AddReadout(start_index)] * len(features) + elif use_readout == "project": + readout_oper = [ + ProjectReadout(vit_features, start_index) for out_feat in features + ] + else: + assert ( + False + ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'" + + return readout_oper + + +def _make_vit_b16_backbone( + model, + features=[96, 192, 384, 768], + size=[384, 384], + hooks=[2, 5, 8, 11], + vit_features=768, + use_readout="ignore", + start_index=1, +): + pretrained = nn.Module() + + pretrained.model = model + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) + + # 32, 48, 136, 384 + pretrained.act_postprocess1 = nn.Sequential( + readout_oper[0], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[0], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[0], + out_channels=features[0], + kernel_size=4, + stride=4, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess2 = nn.Sequential( + readout_oper[1], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[1], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[1], + out_channels=features[1], + kernel_size=2, + stride=2, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess3 = nn.Sequential( + readout_oper[2], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[2], + kernel_size=1, + stride=1, + padding=0, + ), + ) + + pretrained.act_postprocess4 = nn.Sequential( + readout_oper[3], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[3], + kernel_size=1, + stride=1, + padding=0, + ), + nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ), + ) + + pretrained.model.start_index = start_index + pretrained.model.patch_size = [16, 16] + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_large_patch16_384", pretrained=pretrained) + + hooks = [5, 11, 17, 23] if hooks == None else hooks + return _make_vit_b16_backbone( + model, + features=[256, 512, 1024, 1024], + hooks=hooks, + vit_features=1024, + use_readout=use_readout, + ) + + +def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout + ) + + +def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout + ) + + +def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None): + model = timm.create_model( + "vit_deit_base_distilled_patch16_384", pretrained=pretrained + ) + + hooks = [2, 5, 8, 11] if hooks == None else hooks + return _make_vit_b16_backbone( + model, + features=[96, 192, 384, 768], + hooks=hooks, + use_readout=use_readout, + start_index=2, + ) + + +def _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=[0, 1, 8, 11], + vit_features=768, + use_vit_only=False, + use_readout="ignore", + start_index=1, +): + pretrained = nn.Module() + + pretrained.model = model + + if use_vit_only == True: + pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1")) + pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2")) + else: + pretrained.model.patch_embed.backbone.stages[0].register_forward_hook( + get_activation("1") + ) + pretrained.model.patch_embed.backbone.stages[1].register_forward_hook( + get_activation("2") + ) + + pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3")) + pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4")) + + pretrained.activations = activations + + readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) + + if use_vit_only == True: + pretrained.act_postprocess1 = nn.Sequential( + readout_oper[0], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[0], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[0], + out_channels=features[0], + kernel_size=4, + stride=4, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + + pretrained.act_postprocess2 = nn.Sequential( + readout_oper[1], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[1], + kernel_size=1, + stride=1, + padding=0, + ), + nn.ConvTranspose2d( + in_channels=features[1], + out_channels=features[1], + kernel_size=2, + stride=2, + padding=0, + bias=True, + dilation=1, + groups=1, + ), + ) + else: + pretrained.act_postprocess1 = nn.Sequential( + nn.Identity(), nn.Identity(), nn.Identity() + ) + pretrained.act_postprocess2 = nn.Sequential( + nn.Identity(), nn.Identity(), nn.Identity() + ) + + pretrained.act_postprocess3 = nn.Sequential( + readout_oper[2], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[2], + kernel_size=1, + stride=1, + padding=0, + ), + ) + + pretrained.act_postprocess4 = nn.Sequential( + readout_oper[3], + Transpose(1, 2), + nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), + nn.Conv2d( + in_channels=vit_features, + out_channels=features[3], + kernel_size=1, + stride=1, + padding=0, + ), + nn.Conv2d( + in_channels=features[3], + out_channels=features[3], + kernel_size=3, + stride=2, + padding=1, + ), + ) + + pretrained.model.start_index = start_index + pretrained.model.patch_size = [16, 16] + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) + + # We inject this function into the VisionTransformer instances so that + # we can use it with interpolated position embeddings without modifying the library source. + pretrained.model._resize_pos_embed = types.MethodType( + _resize_pos_embed, pretrained.model + ) + + return pretrained + + +def _make_pretrained_vitb_rn50_384( + pretrained, use_readout="ignore", hooks=None, use_vit_only=False +): + model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained) + + hooks = [0, 1, 8, 11] if hooks == None else hooks + return _make_vit_b_rn50_backbone( + model, + features=[256, 512, 768, 768], + size=[384, 384], + hooks=hooks, + use_vit_only=use_vit_only, + use_readout=use_readout, + ) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/util.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/util.py new file mode 100644 index 0000000000000000000000000000000000000000..8c09ca1c72f7ceb3f9d7f9546aae5561baf62b13 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ldm/util.py @@ -0,0 +1,197 @@ +import importlib + +import torch +from torch import optim +import numpy as np + +from inspect import isfunction +from PIL import Image, ImageDraw, ImageFont + + +def log_txt_as_img(wh, xc, size=10): + # wh a tuple of (width, height) + # xc a list of captions to plot + b = len(xc) + txts = list() + for bi in range(b): + txt = Image.new("RGB", wh, color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) + nc = int(40 * (wh[0] / 256)) + lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) + + try: + draw.text((0, 0), lines, fill="black", font=font) + except UnicodeEncodeError: + print("Cant encode string for logging. Skipping.") + + txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 + txts.append(txt) + txts = np.stack(txts) + txts = torch.tensor(txts) + return txts + + +def ismap(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] > 3) + + +def isimage(x): + if not isinstance(x,torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def mean_flat(tensor): + """ + https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def count_params(model, verbose=False): + total_params = sum(p.numel() for p in model.parameters()) + if verbose: + print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") + return total_params + + +def instantiate_from_config(config): + if not "target" in config: + if config == '__is_first_stage__': + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict())) + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +class AdamWwithEMAandWings(optim.Optimizer): + # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298 + def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using + weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code + ema_power=1., param_names=()): + """AdamW that saves EMA versions of the parameters.""" + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= ema_decay <= 1.0: + raise ValueError("Invalid ema_decay value: {}".format(ema_decay)) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay, + ema_power=ema_power, param_names=param_names) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + ema_params_with_grad = [] + state_sums = [] + max_exp_avg_sqs = [] + state_steps = [] + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + ema_decay = group['ema_decay'] + ema_power = group['ema_power'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('AdamW does not support sparse gradients') + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of parameter values + state['param_exp_avg'] = p.detach().float().clone() + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + ema_params_with_grad.append(state['param_exp_avg']) + + if amsgrad: + max_exp_avg_sqs.append(state['max_exp_avg_sq']) + + # update the steps for each param group update + state['step'] += 1 + # record the step after step update + state_steps.append(state['step']) + + optim._functional.adamw(params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + maximize=False) + + cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power) + for param, ema_param in zip(params_with_grad, ema_params_with_grad): + ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay) + + return loss \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_bf16.txt b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_bf16.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ebc5516a360c16f3cb81f20c94bbe92273d375f --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_bf16.txt @@ -0,0 +1,27 @@ +_convolution.deprecated +_convolution +conv1d +conv2d +conv3d +conv_tbc +conv_transpose1d +conv_transpose2d.input +conv_transpose3d.input +convolution +prelu +addmm +addmv +addr +matmul +einsum +mm +mv +silu +linear +addbmm +baddbmm +bmm +chain_matmul +linalg_multi_dot +layer_norm +group_norm diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_fp32.txt b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_fp32.txt new file mode 100644 index 0000000000000000000000000000000000000000..ff88b9b8a542a885c76e7727822fc1b0c0f32f4b --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/ops_fp32.txt @@ -0,0 +1,44 @@ +acos +asin +cosh +erfinv +exp +expm1 +log +log10 +log2 +log1p +reciprocal +rsqrt +sinh +tan +pow.Tensor_Scalar +pow.Tensor_Tensor +pow.Scalar +softplus +frobenius_norm +frobenius_norm.dim +nuclear_norm +nuclear_norm.dim +cosine_similarity +poisson_nll_loss +cosine_embedding_loss +nll_loss +nll_loss2d +hinge_embedding_loss +kl_div +l1_loss +smooth_l1_loss +huber_loss +mse_loss +margin_ranking_loss +multilabel_margin_loss +soft_margin_loss +triplet_margin_loss +multi_margin_loss +binary_cross_entropy_with_logits +dist +pdist +cdist +renorm +logsumexp \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/requirements.txt b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d10dee4b0b86f07005e923a9eed65c1ec5b8fa43 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/requirements.txt @@ -0,0 +1,9 @@ +lightning==2.1.2 +lightning-habana==1.3.0 +omegaconf==2.1.1 +einops==0.3.0 +transformers==4.37.1 +open-clip-torch==2.7.0 +gradio==4.19.2 +k_diffusion==0.0.14 +-e . diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/setup.py b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..00f5b4d874f0f19ece54fac2dd50b39774b86c5b --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/generative_models/stable-diffusion-v-2-1/setup.py @@ -0,0 +1,13 @@ +from setuptools import setup, find_packages + +setup( + name='stable-diffusion', + version='0.0.1', + description='', + packages=find_packages(), + install_requires=[ + 'torch', + 'numpy', + 'tqdm', + ], +) \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/LICENSE b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f8a664f4989f2df1c3b634429bbb961ff6ce3175 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/LICENSE @@ -0,0 +1,202 @@ +Copyright (c) 2022 Habana Labs, Ltd. an Intel Company + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/README.md b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/README.md new file mode 100644 index 0000000000000000000000000000000000000000..58e141dc97c542799c07229135761c1220ecc7c6 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/README.md @@ -0,0 +1,145 @@ +# BART for PyTorch + +This folder contains scripts to fine-tune BART model on Intel® Gaudi® AI Accelerator. To obtain model performance data, refer to the [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-training-models/#performance). + +For more information about training deep learning models using Gaudi, visit [developer.habana.ai](https://developer.habana.ai/resources/). + +## Table of Contents + * [Model-References](../../../../README.md) + * [Model Overview](#model-overview) + * [Setup](#setup) + * [Training Examples ](#training-examples) + * [Supported Configurations](#supported-configurations) + * [Changelog](#changelog) + * [Known Issues](#known-issues) + +## Model Overview + +BART, Bidirectional and Auto-Regressive Transformers, is proposed in this paper: [Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://aclanthology.org/2020.acl-main.703/), ACL 2020. It is a denoising autoencoder that maps a corrupted document to the original document it was derived from. BART is implemented as a sequence-to-sequence model with a bidirectional encoder over corrupted text and a left-to-right autoregressive decoder. According to the paper, BART's architecture is related to that used in BERT, with these differences: (1) each layer of the decoder additionally performs cross-attention over the final hidden layer of the encoder; and (2) BERT uses an additional feed-forward network before wordprediction, which BART does not. BART contains roughly 10% more parameters than the equivalently sized BERT model. + +### BART Fine-Tuning +- Suited for tasks: + - Text paraphrasing: The model aims to generate paraphrases of the given input sentence. + - Text summarization: The model aims to generate a summary of the given input sentence. +- Uses optimizer: FusedAdamW (AdamW: “ADAM with Weight Decay Regularization”). +- Based on model weights trained with pre-training. +- Light-weight: The training takes a few minutes. + +The BART demo uses training scripts from simple transformers https://github.com/ThilinaRajapakse/simpletransformers. + +## Setup +Please follow the instructions provided in the [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) +to set up the environment including the `$PYTHON` environment variable. To achieve the best performance, please follow the methods outlined in the [Optimizing Training Platform guide](https://docs.habana.ai/en/latest/PyTorch/Model_Optimization_PyTorch/Optimization_in_Training_Platform.html). +The guides will walk you through the process of setting up your system to run the model on Gaudi. + +### Clone Habana Model-References +In the docker container, clone this repository and switch to the branch that +matches your SynapseAI version. You can run the +[`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) +utility to determine the SynapseAI version. +```bash +git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References +``` + +Then, navigate to the BART model directory: +```bash +cd Model-References/PyTorch/nlp/BART/simpletransformers +``` + +### Install Model Requirements +Install the python packages required for fine-tuning: +```bash +cd Model-References/PyTorch/nlp/BART/simpletransformers +pip install -e . +pip install bert_score +``` + +### Fine-tuning Dataset Preparation + +Public datasets can be downloaded with this script: +```bash +bash ./examples/seq2seq/paraphrasing/data_download.sh +``` + +**Note:** Going forward it is assumed that the dataset is located in `./data` directory. + +## Training Examples + +### Single Card and Multi-Card Training Examples + +**Run training on 1 HPU - Lazy mode:** + +- 1 HPU, BART fine-tuning on the dataset using BF16 mixed precision: + ```python + PT_HPU_AUTOCAST_LOWER_PRECISION_OPS_LIST=ops_bf16_bart.txt PT_HPU_AUTOCAST_FP32_OPS_LIST=ops_fp32_bart.txt $PYTHON examples/seq2seq/paraphrasing/train.py --use_habana --no_cuda --use_fused_adam --use_fused_clip_norm --max_seq_length 128 --train_batch_size 32 --num_train_epochs 5 --logging_steps 50 --save_best_model --output_dir output --bf16 autocast + ``` +- 1 HPU, BART fine-tuning on the dataset using FP32 data type: + ```python + $PYTHON examples/seq2seq/paraphrasing/train.py --use_habana --no_cuda --use_fused_adam --use_fused_clip_norm --max_seq_length 128 --train_batch_size 32 --num_train_epochs 5 --logging_steps 50 --save_best_model --output_dir output + ``` + +**Run training on 8 HPUs:** + +To run multi-card demo, make sure the host machine has 512 GB of RAM installed. Modify the docker run command to pass 8 Gaudi cards to the docker container. This ensures the docker has access to all the 8 cards required for multi-card training. + +**NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Scaling_Guide/DDP_Based_Scaling.html#mpirun-configuration). + +- 8 HPUs on a single server, BF16, batch size 32, Lazy mode: + ```bash + PT_HPU_AUTOCAST_LOWER_PRECISION_OPS_LIST=ops_bf16_bart.txt PT_HPU_AUTOCAST_FP32_OPS_LIST=ops_fp32_bart.txt mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root $PYTHON examples/seq2seq/paraphrasing/train.py --use_habana --no_cuda --use_fused_adam --use_fused_clip_norm --max_seq_length 128 --train_batch_size 32 --num_train_epochs 5 --logging_steps 50 --save_best_model --output_dir /tmp/multicards --bf16 autocast --distributed + ``` + +- 8 HPUs on a single server, FP32, batch size 32, Lazy mode: + ```bash + mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root $PYTHON examples/seq2seq/paraphrasing/train.py --use_habana --no_cuda --use_fused_adam --use_fused_clip_norm --max_seq_length 128 --train_batch_size 32 --num_train_epochs 5 --logging_steps 50 --save_best_model --output_dir /tmp/multicards --distributed + ``` + + +## Supported Configurations + +| Device | SynapseAI Version | PyTorch Version | +|-----|-----|-----| +| Gaudi | 1.14.0 | 2.1.1 | + +## Changelog +### 1.12.0 + - Eager mode support is deprecated. + - Removed PT_HPU_LAZY_MODE environment variable. + - Removed flag lazy_mode. + - Removed HMP; switched to Autocast. + - Updated run commands. +### 1.9.0 + - Enabled PyTorch autocast on Gaudi +### 1.6.0 + - Changed BART distributed API to initialize_distributed_hpu. +### 1.5.0 + - Removed unnecessary mark_step. +### 1.4.0 + - Removed wrapper script run_bart.py. + - Added support for reducing the print frequency of Running Loss to the frequency of logging_steps. + +### Training Script Modifications + +The following changes have been added to scripts & source: +modifications to the [simple transformer](https://github.com/ThilinaRajapakse/simpletransformers) source: + +1. Added Habana Device support (seq2seq_model.py). +2. Modifications for saving checkpoint: Bring tensors to CPU and save (seq2seq_model.py). +3. Introduced Habana BF16 Mixed precision, adding ops lists for BF16 and FP32 (seq2seq_model.py, ops_bf16_bart.txt, ops_fp32_bart.txt). +4. Change for supporting HMP disable for optimizer.step (seq2seq_model.py). +5. Use fused AdamW optimizer on Habana device (seq2seq_model.py, train.py). +6. Use fused clip norm for grad clipping on Habana device (seq2seq_model.py, train.py). +7. Modified training script to use mpirun for distributed training (train.py). +8. Gradients are used as views using gradient_as_bucket_view (seq2seq_model.py). +9. Default allreduce bucket size set to 200MB for better performance in distributed training (seq2seq_model.py). +10. Added changes to support Lazy mode with required mark_step (seq2seq_model.py). +11. Only print and save in the master process (seq2seq_model.py). +12. Added prediction (sentence generation) metrics (seq2seq_model.py). +13. Modified training script to use Habana data loader (seq2seq_model.py). +14. Add data_dir as an input argument for data directory. +15. Added this README. + +## Known Issues + +1. Placing mark_step() arbitrarily may lead to undefined behavior. Recommend to keep mark_step() as shown in provided scripts. +2. Sentence generation (prediction) is not enabled in this release. We plan to enable it in next release. diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/bin/simple-viewer b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/bin/simple-viewer new file mode 100644 index 0000000000000000000000000000000000000000..70173f0bfd6d52a66c4e1a1a9e73240d92693405 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/bin/simple-viewer @@ -0,0 +1,14 @@ +#!/bin/bash +cat >run_simple_transformers_streamlit_app.py <<'END_SCRIPT' +#!/usr/bin/env python +from simpletransformers.streamlit.simple_view import streamlit_runner + + +streamlit_runner() + +END_SCRIPT + +# Run +streamlit run run_simple_transformers_streamlit_app.py + +rm run_simple_transformers_streamlit_app.py diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/data_download.sh b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/data_download.sh new file mode 100644 index 0000000000000000000000000000000000000000..45604e8d005d8401def010efa13c4c1b5653d4eb --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/data_download.sh @@ -0,0 +1,7 @@ +mkdir data +wget https://storage.googleapis.com/paws/english/paws_wiki_labeled_final.tar.gz -P data +tar -xvf data/paws_wiki_labeled_final.tar.gz -C data +mv data/final/* data +rm -r data/final + +wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv -P data diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/train.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/train.py new file mode 100644 index 0000000000000000000000000000000000000000..58cbe064a22438cc0f9e2a3cd397d0ba9483ab7c --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/train.py @@ -0,0 +1,520 @@ +import os +import sys +import datetime +import logging +import time +import pandas as pd +from sklearn.model_selection import train_test_split + +import torch +sys.path.append(os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../"))) +from simpletransformers.config.model_args import Seq2SeqArgs +from simpletransformers.seq2seq.seq2seq_model import Seq2SeqModel + + +from utils import load_data, clean_unnecessary_spaces +import argparse +import random +import hb_utils + +try: + from apex import amp +except ImportError: + amp = None + +logging.basicConfig(level=logging.INFO) +transformers_logger = logging.getLogger("transformers") +transformers_logger.setLevel(logging.ERROR) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--use_habana", + action="store_true", + help="Whether not to use Habana device when available" + ) + parser.add_argument( + "--output_dir", + default='/tmp/bart', + type=str, + help="Output dir", + ) + parser.add_argument( + "--no_cache", + action="store_true", + help="Whether not to cache data" + ) + parser.add_argument( + "--reprocess_input_data", + action="store_true", + help="Whether or not to reprocess input data" + ) + parser.add_argument( + "--no_cuda", + action="store_true", + help="Whether not to use CUDA when available" + ) + parser.add_argument( + "--use_fused_adam", + action="store_true", + help="Whether to use fused adamw on habana device" + ) + parser.add_argument( + "--use_fused_clip_norm", + action="store_true", + help="Whether to use fused clip norm on habana device" + ) + parser.add_argument( + "--local_rank", + type=int, + default=-1, + help="local_rank for distributed training on gpus" + ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="random seed for initialization" + ) + parser.add_argument( + "--max_seq_length", + type=int, + default=128, + help="maximum input sequence length" + ) + parser.add_argument( + "--train_batch_size", + type=int, + default=8, + help="batch size for training" + ) + parser.add_argument( + "--fp16", + action="store_true", + help="Whether to use fp16" + ) + parser.add_argument( + "--bf16", + type=str, + help="Type of bf16 mixed precision implementation", + choices=["none", "autocast"] + ) + parser.add_argument( + "--debug", + action="store_true", + help="Whether in debug mode" + ) + parser.add_argument( + "--save_steps", + type=int, + default=-1, + help="number of steps to save the model" + ) + parser.add_argument( + "--max_steps", + type=int, + default=-1, + help="number of maximum training steps" + ) + parser.add_argument( + "--save_optimizer_and_scheduler", + action="store_true", + help="Whether save optimizer and scheduler" + ) + parser.add_argument( + "--eval_batch_size", + type=int, + default=64, + help="batch size for evaluation" + ) + parser.add_argument( + "--evaluate_during_training", + action="store_true", + help="Whether evaluate during training" + ) + parser.add_argument( + "--evaluate_during_training_steps", + type=int, + default=-1, + help="evaluate every training steps" + ) + parser.add_argument( + "--evaluate_each_epoch", + action="store_true", + help="Whether evaluate after each epoch" + ) + parser.add_argument( + "--evaluate_generated_text", + action="store_true", + help="Whether evaluate the generated text" + ) + parser.add_argument( + "--save_model_every_epoch", + action="store_true", + help="Whether save the model after each epoch" + ) + parser.add_argument( + "--save_eval_checkpoints", + action="store_true", + help="Whether save the checkpoint after evaluation" + ) + parser.add_argument( + "--save_best_model", + action="store_true", + help="Whether save the best model" + ) + parser.add_argument( + "--logging_steps", + type=int, + default=50, + help="number of logging steps" + ) + parser.add_argument( + "--num_train_epochs", + type=int, + default=3, + help="number of epochs for training" + ) + parser.add_argument( + "--num_return_sequences", + type=int, + default=1, + help="number of return sequences during beam sampling" + ) + parser.add_argument( + "--predict", + action="store_true", + help="Whether generate text given input" + ) + #################### distributed training ###################### + parser.add_argument( + '--dl_worker_type', + default='HABANA', + type=lambda x: x.upper(), + choices = ["MT", "MP", "HABANA"], + help='select multithreading or multiprocessing' + ) + parser.add_argument( + '--world_size', + default=1, + type=int, + metavar='N', + help='number of total workers (default: 1)' + ) + parser.add_argument( + '--process_per_node', + default=8, + type=int, + metavar='N', + help='Number of process per node' + ) + parser.add_argument( + '--distributed', + action='store_true', + help='whether to enable distributed mode and run on multiple devices' + ) + parser.add_argument( + '--dist_url', + default='env://', + help='url used to set up distributed training' + ) + parser.add_argument( + "--data_dir", + default="", + type=str, + help="The input data dir. If no data dir, will run with ./data under local directory.", + ) + args = parser.parse_args() + + model_args = Seq2SeqArgs() + model_args.debug = True if args.debug else False + model_args.eval_batch_size = args.eval_batch_size + model_args.evaluate_during_training = True if args.evaluate_during_training else False + model_args.evaluate_during_training_steps = args.evaluate_during_training_steps + model_args.evaluate_each_epoch = True if args.evaluate_each_epoch else False + model_args.evaluate_during_training_verbose = True + model_args.evaluate_generated_text = True if args.evaluate_generated_text else False + model_args.fp16 = True if args.fp16 else False + model_args.bf16 = args.bf16 + model_args.learning_rate = 5e-5 + model_args.gradient_accumulation_steps = 1 + model_args.max_seq_length = args.max_seq_length + model_args.num_train_epochs = args.num_train_epochs + model_args.overwrite_output_dir = True + model_args.reprocess_input_data = True if args.reprocess_input_data else False + model_args.logging_steps = args.logging_steps + model_args.save_eval_checkpoints = True if args.save_eval_checkpoints else False + model_args.save_steps = args.save_steps + model_args.save_model_every_epoch = True if args.save_model_every_epoch else False + model_args.save_best_model = True if args.save_best_model else False + model_args.save_optimizer_and_scheduler = True if args.save_optimizer_and_scheduler else False + model_args.train_batch_size = args.train_batch_size + model_args.use_multiprocessing = False + model_args.use_multiprocessing_for_evaluation = False + model_args.predict = True if args.predict else False + model_args.do_sample = True + model_args.num_beams = None + model_args.num_return_sequences = args.num_return_sequences + model_args.max_length = args.max_seq_length + model_args.top_k = 50 + model_args.top_p = 0.95 + + model_args.max_steps = args.max_steps + model_args.seed = args.seed + model_args.use_habana = args.use_habana + model_args.use_fused_adam = args.use_fused_adam + model_args.use_fused_clip_norm = args.use_fused_clip_norm + model_args.output_dir = args.output_dir + model_args.best_model_dir = args.output_dir + model_args.tensorboard_dir = args.output_dir + model_args.no_cache = True if args.no_cache else False + model_args.cache_dir = args.output_dir + + if args.use_habana and args.use_fused_adam: + model_args.optimizer = 'FusedAdamW' + model_args.max_grad_norm = 1.0 + else: + model_args.optimizer = 'AdamW' + model_args.adafactor_relative_step = False + model_args.adafactor_scale_parameter = False + model_args.adafactor_warmup_init = False + + model_args.scheduler = "linear_schedule_with_warmup" + return args, model_args + +def load_train_val_data(): + if args.local_rank not in [-1, 0]: + if args.use_habana: + hb_utils.barrier() + else: + torch.distributed.barrier() + + if args.local_rank in [-1, 0]: + # Google Data + train_df = pd.read_csv(os.path.join(args.data_dir,"data/train.tsv"), sep="\t").astype(str) + eval_df = pd.read_csv(os.path.join(args.data_dir,"data/dev.tsv"), sep="\t").astype(str) + + train_df = train_df.loc[train_df["label"] == "1"] + eval_df = eval_df.loc[eval_df["label"] == "1"] + + train_df = train_df.rename( + columns={"sentence1": "input_text", "sentence2": "target_text"} + ) + eval_df = eval_df.rename( + columns={"sentence1": "input_text", "sentence2": "target_text"} + ) + + train_df = train_df[["input_text", "target_text"]] + eval_df = eval_df[["input_text", "target_text"]] + + train_df["prefix"] = "paraphrase" + eval_df["prefix"] = "paraphrase" + + # MSRP Data + ''' + train_df = pd.concat( + [ + train_df, + load_data("data/msr_paraphrase_train.txt", "Quality", "#1_String", "#2_String"), + ] + ) + eval_df = pd.concat( + [ + eval_df, + load_data("data/msr_paraphrase_test.txt", "#1 String", "#2 String", "Quality"), + ] + ) + ''' + train_df = [] + eval_df = [] + + # Quora Data + + # The Quora Dataset is not separated into train/test, so we do it manually the first time. + if not os.path.exists("data/quora_train.tsv") or not os.path.exists("data/quora_test.tsv"): + df = load_data( + os.path.join(args.data_dir, "data/quora_duplicate_questions.tsv"), "question1", "question2", "is_duplicate" + ) + q_train, q_test = train_test_split(df) + print('Splitting train and test...') + q_train.to_csv(os.path.join(args.data_dir, "data/quora_train.tsv"), sep="\t") + q_test.to_csv(os.path.join(args.data_dir, "data/quora_test.tsv"), sep="\t") + else: + # The code block above only needs to be run once. + # After that, the two lines below are sufficient to load the Quora dataset. + print('Reading train and test...') + q_train = pd.read_csv("data/quora_train.tsv", sep="\t") + q_test = pd.read_csv("data/quora_test.tsv", sep="\t") + + train_df = q_train #pd.concat([train_df, q_train]) + eval_df = q_test #pd.concat([eval_df, q_test]) + + train_df = train_df[["prefix", "input_text", "target_text"]] + eval_df = eval_df[["prefix", "input_text", "target_text"]] + + train_df = train_df.dropna() + eval_df = eval_df.dropna() + + train_df["input_text"] = train_df["input_text"].apply(clean_unnecessary_spaces) + train_df["target_text"] = train_df["target_text"].apply(clean_unnecessary_spaces) + + eval_df["input_text"] = eval_df["input_text"].apply(clean_unnecessary_spaces) + eval_df["target_text"] = eval_df["target_text"].apply(clean_unnecessary_spaces) + + + if args.local_rank == 0: + if args.use_habana: + hb_utils.barrier() + else: + torch.distributed.barrier() + return train_df, eval_df + + +def main(args, model_args): + if args.dl_worker_type == "MP": + try: + # Default 'fork' doesn't work with synapse. Use 'forkserver' or 'spawn' + torch.multiprocessing.set_start_method('spawn') + except RuntimeError: + pass + elif args.dl_worker_type == "HABANA": + try: + import habana_dataloader + except ImportError: + assert False, "Could Not import habana dataloader package" + + #if args.apex: + # if sys.version_info < (3, 0): + # raise RuntimeError("Apex currently only supports Python 3. Aborting.") + # if amp is None: + # raise RuntimeError("Failed to import apex. Please install apex from https://www.github.com/nvidia/apex " + # "to enable mixed-precision training.") + hb_utils.init_distributed_mode(args) + if hasattr(args, "rank"): + args.local_rank = args.rank + print('####################### These are args: ######################') + print(args) + + model_args.dl_worker_type = args.dl_worker_type + model_args.world_size = args.world_size + model_args.process_per_node = args.process_per_node + model_args.distributed = args.distributed + model_args.dist_url = args.dist_url + + args.is_master = False + if args.local_rank in [-1, 0]: + args.is_master = True + model_args.is_master = args.is_master + model_args.local_rank = args.local_rank + print("############### local_rank is_master #############", model_args.local_rank, model_args.is_master) + + + if model_args.use_habana is True: + device = torch.device("hpu") + args.n_gpu = 0 + print("########## HPU ##########") + + if args.no_cuda is False: + if torch.cuda.is_available(): + n_gpu = torch.cuda.device_count() + if n_gpu > 1: + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + else: + device = torch.device("cuda") + args.n_gpu = n_gpu + print("########## GPU n_gpu ##########", args.n_gpu) + else: + device = torch.device("cpu") + args.n_gpu = 0 + print("########## CPU ##########") + + model_args.device = device + model_args.n_gpu = args.n_gpu + + #if args.deterministic: + # seed = args.seed + # random.seed(seed) + # if args.device == 'cuda': + # torch.cuda.manual_seed(seed) + #else: + # seed = None + + + train_df, eval_df = load_train_val_data() + + if model_args.device == 'hpu' and model_args.workers > 0: + # patch torch cuda functions that are being unconditionally invoked + # in the multiprocessing data loader + torch.cuda.current_device = lambda: None + torch.cuda.set_device = lambda x: None + + model = Seq2SeqModel( + encoder_decoder_type="bart", + encoder_decoder_name="facebook/bart-base", + args=model_args, + use_cuda=True if args.n_gpu > 0 else False, + cuda_device=args.local_rank if args.n_gpu > 0 else -1, + ) + + start_time = time.time() + + model.train_model(train_df, eval_data=eval_df, output_dir=args.output_dir) + + ####################### prediction ####################### + if args.predict and args.local_rank in [-1, 0]: + to_predict = [ + prefix + ": " + str(input_text) + for prefix, input_text in zip( + eval_df["prefix"].tolist(), eval_df["input_text"].tolist() + ) + ] + truth = eval_df["target_text"].tolist() + + print("Start testing") + start_time = time.time() + # + preds = model.predict(to_predict) + # + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Testing time {}'.format(total_time_str)) + + os.makedirs(os.path.join(args.output_dir, "predictions"), exist_ok=True) + pred_time = f"_{datetime.datetime.now()}" + pred_text = os.path.join(args.output_dir, "predictions", "pred_text"+pred_time+".txt") + + with open(pred_text, "w") as f: + for i, text in enumerate(eval_df["input_text"].tolist()): + f.write(str(text) + "\n\n") + + f.write("Truth:\n") + f.write(truth[i] + "\n\n") + + f.write("Prediction:\n") + for pred in preds[i]: + f.write(str(pred) + "\n") + f.write( + "________________________________________________________________________________\n" + ) + + results = model.compute_metrics( + truth, preds + ) + print('Prediction results:') + print(results) + + pred_results = os.path.join(args.output_dir, "predictions", "pred_results"+pred_time+".csv") + report = pd.DataFrame(results, index=[0]) + report.to_csv( + pred_results, + index=False, + ) + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Total time {}'.format(total_time_str)) + +if __name__ == "__main__": + args, model_args = parse_args() + main(args, model_args) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/utils.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d21f6af3d43b4825ebab505b0188874f4c09909 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/examples/seq2seq/paraphrasing/utils.py @@ -0,0 +1,36 @@ +import warnings + +import pandas as pd + + +def load_data( + file_path, input_text_column, target_text_column, label_column, keep_label=1 +): + df = pd.read_csv(file_path, sep="\t")#, error_bad_lines=False) + df = df.loc[df[label_column] == keep_label] + df = df.rename( + columns={input_text_column: "input_text", target_text_column: "target_text"} + ) + df = df[["input_text", "target_text"]] + df["prefix"] = "paraphrase" + + return df + + +def clean_unnecessary_spaces(out_string): + if not isinstance(out_string, str): + warnings.warn(f">>> {out_string} <<< is not a string.") + out_string = str(out_string) + out_string = ( + out_string.replace(" .", ".") + .replace(" ?", "?") + .replace(" !", "!") + .replace(" ,", ",") + .replace(" ' ", "'") + .replace(" n't", "n't") + .replace(" 'm", "'m") + .replace(" 's", "'s") + .replace(" 've", "'ve") + .replace(" 're", "'re") + ) + return out_string diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/hb_utils.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/hb_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..226b0f7b359bd7175b8d86a2050948d32d1aff0b --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/hb_utils.py @@ -0,0 +1,238 @@ +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +from __future__ import print_function +from collections import defaultdict, deque +import datetime +import time +import torch +import torch.distributed as dist +import errno +import os +mpi_comm = None +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + def synchronize_between_processes(self,device): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + if device.type == 'hpu': + t = torch.tensor([self.count, self.total], dtype=torch.float32).to('hpu') + else: + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + @property + def global_avg(self): + return self.total / self.count + @property + def max(self): + return max(self.deque) + @property + def value(self): + return self.deque[-1] + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) +class MetricLogger(object): + def __init__(self, delimiter="\t",device=torch.device('cuda')): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + self.device = device + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes(self.device) + def add_meter(self, name, meter): + self.meters[name] = meter + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {}'.format(header, total_time_str)) +# Modified version of accuracy. target and pred tensors are pytorch Long +# which is not supported by habana kernels yet. So fall back to CPU for +# ops involving these(and remain on CPU since this is the last oprton of +# iteration and we need the accuracy values to be printed out on host) +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + #pdb.set_trace() + maxk = max(topk) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + pred_cpu = torch.tensor(pred, device='cpu') + target_cpu = torch.tensor(target, device='cpu') + correct = pred_cpu.eq(target_cpu[None]) + res = [] + for k in topk: + correct_k = correct[:k].flatten().sum(dtype=torch.float32) + res.append(correct_k * (100.0 / batch_size)) + return res +#Original accuracy code +def accuracy_orig(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target[None]) + res = [] + for k in topk: + correct_k = correct[:k].flatten().sum(dtype=torch.float32) + res.append(correct_k * (100.0 / batch_size)) + return res +def mkdir(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + __builtin__.print = print +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() +def is_main_process(): + return get_rank() == 0 +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) +def barrier(): + if mpi_comm is not None: + mpi_comm.Barrier() +def init_distributed_mode(args): + from habana_frameworks.torch.utils.distributed_utils import initialize_distributed_hpu + args.world_size, args.rank, args.local_rank = initialize_distributed_hpu() + if args.world_size == 1: + args.distributed = False + return + + args.distributed = True + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + if args.use_habana: + args.dist_backend = 'hccl' + dist.init_process_group(args.dist_backend, rank=args.rank, world_size=args.world_size) + else: + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + setup_for_distributed(args.rank == 0) \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_bf16_bart.txt b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_bf16_bart.txt new file mode 100644 index 0000000000000000000000000000000000000000..f6fb827f24d0b80300cacd3099b8bb68cc586086 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_bf16_bart.txt @@ -0,0 +1,15 @@ +add +addmm +bmm +dot +dropout +gelu +iadd +layer_norm +linear +matmul +mm +mv +relu +softmax +sum diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_fp32_bart.txt b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_fp32_bart.txt new file mode 100644 index 0000000000000000000000000000000000000000..0e881a12097cab3459d68afd6bc1098c95312dfe --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/ops_fp32_bart.txt @@ -0,0 +1,7 @@ +div +embedding +log_softmax +nll_loss +truediv +sort +topk diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/requirements.txt b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..eda818854048fee84f143280c8159028cff4745e --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/requirements.txt @@ -0,0 +1,7 @@ +scikit-learn +datasets==1.9.0 +sentencepiece +seqeval +streamlit +tensorboardx +wandb diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.cfg b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..5f3cfa585da676a26421f7f9fcfc88db1c540d67 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.cfg @@ -0,0 +1,18 @@ +[tool:pytest] +python_functions=test_ + +codestyle_max_line_length = 119 + +log_cli = true +log_cli_level = WARNING + +[metadata] +description-file = README.md +license_file = LICENSE + +[pycodestyle] +max-line-length = 119 + +[flake8] +max-line-length = 119 +ignore = E203 , W503, F401 diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..4d3d09679bacb2566512b0ca125108da603d8282 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/setup.py @@ -0,0 +1,41 @@ +from setuptools import find_packages, setup + +with open("README.md", "r") as fh: + long_description = fh.read() + +setup( + name="simpletransformers", + version="0.61.9", + author="Thilina Rajapakse", + author_email="chaturangarajapakshe@gmail.com", + description="An easy-to-use wrapper library for the Transformers library.", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/ThilinaRajapakse/simpletransformers/", + packages=find_packages(), + scripts=["bin/simple-viewer"], + classifiers=[ + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + ], + python_requires=">=3.6", + install_requires=[ + "numpy", + "requests", + "tqdm>=4.47.0", + "regex", + "transformers>=4.2.0", + "datasets", + "scipy", + "scikit-learn", + "seqeval", + "tensorboardx", + "pandas", + "tokenizers", + "wandb>=0.10.32", + "streamlit", + "sentencepiece", + ], +) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/simpletransformers/seq2seq/seq2seq_utils.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/simpletransformers/seq2seq/seq2seq_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..55c960c6fac1691096f98b8f6b5ad2f27184bded --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/BART/simpletransformers/simpletransformers/seq2seq/seq2seq_utils.py @@ -0,0 +1,554 @@ +import logging +import os +import pickle +from multiprocessing import Pool +from functools import partial +from typing import Tuple + +import pandas as pd +import torch +import transformers +from tokenizers.implementations import ByteLevelBPETokenizer +from tokenizers.processors import BertProcessing +from torch.utils.data import Dataset +from tqdm.auto import tqdm +from transformers import PreTrainedTokenizer +from transformers.models.bart.modeling_bart import ( + shift_tokens_right as _shift_tokens_right, +) +from datasets import Features, Sequence, Value, load_dataset +from datasets import Dataset as HFDataset +from transformers import ( + DPRContextEncoder, + DPRContextEncoderTokenizerFast, +) + + +logger = logging.getLogger(__name__) + +if transformers.__version__ < "4.2.0": + shift_tokens_right = lambda input_ids, pad_token_id, decoder_start_token_id: _shift_tokens_right( + input_ids, pad_token_id + ) +else: + shift_tokens_right = _shift_tokens_right + + +def preprocess_batch_for_hf_dataset( + dataset, encoder_tokenizer, decoder_tokenizer, args +): + if args.model_type == "bart": + input_ids = encoder_tokenizer.batch_encode_plus( + dataset["input_text"], + max_length=args.max_seq_length, + padding="max_length", + return_tensors="np", + truncation=True, + ) + + target_ids = encoder_tokenizer.batch_encode_plus( + dataset["target_text"], + max_length=args.max_seq_length, + padding="max_length", + return_tensors="np", + truncation=True, + ) + + return { + "source_ids": input_ids["input_ids"].squeeze(), + "source_mask": input_ids["attention_mask"].squeeze(), + "target_ids": target_ids["input_ids"].squeeze(), + } + elif args.model_type == "mbart": + tokenized_example = encoder_tokenizer.prepare_seq2seq_batch( + src_texts=dataset["input_text"], + tgt_texts=dataset["target_text"], + src_lang=args.src_lang, + tgt_lang=args.tgt_lang, + max_length=args.max_seq_length, + padding="max_length", # pad_to_max_length=True won't work in this case + return_tensors="np", + truncation=True, + ) + + decoder_input_ids = tokenized_example["labels"].clone() + decoder_input_ids = shift_tokens_right( + decoder_input_ids, + encoder_tokenizer.pad_token_id, + encoder_tokenizer.lang_code_to_id[args.tgt_lang], + ) + + labels = tokenized_example["labels"] + labels[labels == encoder_tokenizer.pad_token_id] = -100 + + return { + "input_ids": tokenized_example["input_ids"].squeeze(), + "attention_mask": tokenized_example["attention_mask"].squeeze(), + "decoder_input_ids": decoder_input_ids.squeeze(), + "labels": labels.squeeze(), + } + elif args.model_type in ["rag-token", "rag-sequence"]: + source_inputs = encoder_tokenizer( + dataset["input_text"], + max_length=args.max_seq_length, + padding="max_length", + return_tensors="np", + truncation=True, + ) + try: + target_inputs = encoder_tokenizer.generator( + dataset["target_text"], + max_length=args.max_seq_length, + padding="max_length", + return_tensors="np", + truncation=True, + ) + except TypeError: + logger.warn( + """Error encountered while converting target_text. + All target_text values have been manually cast to String as a workaround. + This may have been caused by NaN values present in the data.""" + ) + dataset["target_text"] = [str(d) for d in dataset["target_text"]] + target_inputs = encoder_tokenizer.generator( + dataset["target_text"], + max_length=args.max_seq_length, + padding="max_length", + return_tensors="np", + truncation=True, + ) + source_ids = source_inputs["input_ids"].squeeze() + target_ids = target_inputs["input_ids"].squeeze() + src_mask = source_inputs["attention_mask"].squeeze() + return { + "input_ids": source_ids, + "attention_mask": src_mask, + "decoder_input_ids": target_ids, + } + else: + source_inputs = encoder_tokenizer( + dataset["input_text"], + max_length=args.max_seq_length, + padding="max_length", + return_tensors="np", + truncation=True, + ) + + target_inputs = decoder_tokenizer( + dataset["target_text"], + max_length=args.max_seq_length, + padding="max_length", + return_tensors="np", + truncation=True, + ) + source_ids = source_inputs["input_ids"].squeeze() + target_ids = target_inputs["input_ids"].squeeze() + src_mask = source_inputs["attention_mask"].squeeze() + return { + "input_ids": source_ids, + "attention_mask": src_mask, + "decoder_input_ids": target_ids, + } + + +def load_hf_dataset(data, encoder_tokenizer, decoder_tokenizer, args): + if isinstance(data, str): + dataset = load_dataset( + "csv", + data_files=data, + delimiter="\t", + download_mode="force_redownload" + if args.reprocess_input_data + else "reuse_dataset_if_exists", + ) + else: + dataset = HFDataset.from_pandas(data) + + dataset = dataset.map( + lambda x: preprocess_batch_for_hf_dataset( + x, + encoder_tokenizer=encoder_tokenizer, + decoder_tokenizer=decoder_tokenizer, + args=args, + ), + batched=True, + ) + + if args.model_type == "bart": + column_names = [ + "source_ids", + "source_mask", + "target_ids", + ] + elif args.model_type == "mbart": + column_names = [ + "input_ids", + "attention_mask", + "decoder_input_ids", + "labels", + ] + else: + column_names = [ + "input_ids", + "attention_mask", + "decoder_input_ids", + ] + + dataset.set_format(type="pt", columns=column_names) + + if isinstance(data, str): + # This is not necessarily a train dataset. The datasets library insists on calling it train. + return dataset["train"] + else: + return dataset + + +def preprocess_data(data): + input_text, target_text, encoder_tokenizer, decoder_tokenizer, args = data + + if args.model_type in ["rag-token", "rag-sequence"]: + source_inputs = encoder_tokenizer( + input_text, + max_length=args.max_seq_length, + padding="max_length", + return_tensors="pt", + truncation=True, + ) + target_inputs = encoder_tokenizer.generator( + target_text, + max_length=args.max_seq_length, + padding="max_length", + return_tensors="pt", + truncation=True, + ) + source_ids = source_inputs["input_ids"].squeeze() + target_ids = target_inputs["input_ids"].squeeze() + src_mask = source_inputs["attention_mask"].squeeze() + return { + "input_ids": source_ids, + "attention_mask": src_mask, + "decoder_input_ids": target_ids, + } + else: + input_text = encoder_tokenizer.encode( + input_text, + max_length=args.max_seq_length, + padding="max_length", + return_tensors="pt", + truncation=True, + ) + + target_text = decoder_tokenizer.encode( + target_text, + max_length=args.max_seq_length, + padding="max_length", + return_tensors="pt", + truncation=True, + ) + return (torch.flatten(input_text), torch.flatten(target_text)) + + +class Seq2SeqDataset(Dataset): + def __init__(self, encoder_tokenizer, decoder_tokenizer, args, data, mode): + cached_features_file = os.path.join( + args.cache_dir, + args.model_name.replace("/", "_") + + "_cached_" + + str(args.max_seq_length) + + str(len(data)), + ) + + if os.path.exists(cached_features_file) and ( + (not args.reprocess_input_data and not args.no_cache) + or (mode == "dev" and args.use_cached_eval_features and not args.no_cache) + ): + if args.is_master: + logger.info(" Loading features from cached file %s", cached_features_file) + with open(cached_features_file, "rb") as handle: + self.examples = pickle.load(handle) + else: + #logger.info + print(f" Creating features from dataset file at {args.cache_dir}") + + data = [ + (input_text, target_text, encoder_tokenizer, decoder_tokenizer, args) + for input_text, target_text in zip( + data["input_text"], data["target_text"] + ) + ] + + if (mode == "train" and args.use_multiprocessing) or ( + mode == "dev" and args.use_multiprocessing_for_evaluation + ): + if args.multiprocessing_chunksize == -1: + chunksize = max(len(data) // (args.process_count * 2), 500) + else: + chunksize = args.multiprocessing_chunksize + + with Pool(args.process_count) as p: + self.examples = list( + tqdm( + p.imap(preprocess_data, data, chunksize=chunksize), + total=len(data), + disable=not args.is_master,#args.silent, + ) + ) + else: + self.examples = [ + preprocess_data(d) for d in tqdm(data, disable=not args.is_master)#args.silent) + ] + + if not args.no_cache and args.is_master: + logger.info( + " Saving features into cached file %s", cached_features_file + ) + with open(cached_features_file, "wb") as handle: + pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) + + def __len__(self): + return len(self.examples) + + def __getitem__(self, index): + return self.examples[index] + + +def preprocess_data_bart(data): + input_text, target_text, tokenizer, args = data + + input_ids = tokenizer.batch_encode_plus( + [input_text], + max_length=args.max_seq_length, + padding="max_length", + return_tensors="pt", + truncation=True, + ) + + target_ids = tokenizer.batch_encode_plus( + [target_text], + max_length=args.max_seq_length, + padding="max_length", + return_tensors="pt", + truncation=True, + ) + + return { + "source_ids": input_ids["input_ids"].squeeze(), + "source_mask": input_ids["attention_mask"].squeeze(), + "target_ids": target_ids["input_ids"].squeeze(), + } + + +def preprocess_data_mbart(data): + input_text, target_text, tokenizer, args = data + + tokenized_example = tokenizer.prepare_seq2seq_batch( + src_texts=[input_text], + tgt_texts=[target_text], + src_lang=args.src_lang, + tgt_lang=args.tgt_lang, + max_length=args.max_seq_length, + padding="max_length", # pad_to_max_length=True won't work in this case + return_tensors="pt", + truncation=True, + ) + + decoder_input_ids = tokenized_example["labels"].clone() + decoder_input_ids = shift_tokens_right( + decoder_input_ids, + tokenizer.pad_token_id, + tokenizer.lang_code_to_id[args.tgt_lang], + ) + + labels = tokenized_example["labels"] + labels[labels == tokenizer.pad_token_id] = -100 + + return { + "input_ids": tokenized_example["input_ids"].squeeze(), + "attention_mask": tokenized_example["attention_mask"].squeeze(), + "decoder_input_ids": decoder_input_ids.squeeze(), + "labels": labels.squeeze(), + } + + +class SimpleSummarizationDataset(Dataset): + def __init__(self, tokenizer, args, data, mode): + self.tokenizer = tokenizer + + cached_features_file = os.path.join( + args.cache_dir, + args.model_name.replace('/', '_') + "_cached_" + str(args.max_seq_length) + str(len(data)), + ) + + if os.path.exists(cached_features_file) and ( + (not args.reprocess_input_data and not args.no_cache) + or (mode == "dev" and args.use_cached_eval_features and not args.no_cache) + ): + if args.is_master: + logger.info(" Loading features from cached file %s", cached_features_file) + with open(cached_features_file, "rb") as handle: + self.examples = pickle.load(handle) + else: + #logger.info + print(f" Creating features from dataset file at { args.cache_dir}") + + data = [ + (input_text, target_text, tokenizer, args) + for input_text, target_text in zip( + data["input_text"], data["target_text"] + ) + ] + + preprocess_fn = ( + preprocess_data_mbart + if args.model_type == "mbart" + else preprocess_data_bart + ) + + if (mode == "train" and args.use_multiprocessing) or ( + mode == "dev" and args.use_multiprocessing_for_evaluation + ): + if args.multiprocessing_chunksize == -1: + chunksize = max(len(data) // (args.process_count * 2), 500) + else: + chunksize = args.multiprocessing_chunksize + + with Pool(args.process_count) as p: + self.examples = list( + tqdm( + p.imap(preprocess_fn, data, chunksize=chunksize), + total=len(data), + disable=not args.is_master,#args.silent, + ) + ) + else: + self.examples = [ + preprocess_fn(d) for d in tqdm(data, disable=not args.is_master)#args.silent) + ] + + if not args.no_cache and args.is_master: + logger.info( + " Saving features into cached file %s", cached_features_file + ) + with open(cached_features_file, "wb") as handle: + pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL) + + def __len__(self): + return len(self.examples) + + def __getitem__(self, index): + return self.examples[index] + + +def split_text(text, n=100, character=" "): + """Split the text every ``n``-th occurrence of ``character``""" + text = text.split(character) + return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] + + +def split_documents( + documents, split_text_n=100, split_text_character=" ", include_title=True +): + """Split documents into passages""" + titles, texts = [], [] + if include_title: + for title, text in zip(documents["title"], documents["text"]): + if text is not None: + for passage in split_text( + text, n=split_text_n, character=split_text_character + ): + titles.append(title if title is not None else "") + texts.append(passage) + else: + for text in documents["text"]: + if text is not None: + for passage in split_text( + text, n=split_text_n, character=split_text_character + ): + titles.append("") + texts.append(passage) + return {"title": titles, "text": texts} + + +def embed(documents, ctx_encoder, ctx_tokenizer, device): + """Compute the DPR embeddings of document passages""" + input_ids = ctx_tokenizer( + documents["title"], + documents["text"], + truncation=True, + padding="longest", + return_tensors="pt", + )["input_ids"] + embeddings = ctx_encoder( + input_ids.to(device=device), return_dict=True + ).pooler_output + return {"embeddings": embeddings.detach().cpu().numpy()} + + +def generate_faiss_index_dataset(data, ctx_encoder_name, args, device): + """ + Adapted from Huggingface example script at https://github.com/huggingface/transformers/blob/master/examples/research_projects/rag/use_own_knowledge_dataset.py + """ + import faiss + + if isinstance(data, str): + if args.include_title_in_knowledge_dataset: + dataset = load_dataset( + "csv", data_files=data, delimiter="\t", column_names=["title", "text"] + ) + else: + dataset = load_dataset( + "csv", data_files=data, delimiter="\t", column_names=["text"] + ) + else: + dataset = HFDataset.from_pandas(data) + + dataset = dataset.map( + partial( + split_documents, + split_text_n=args.split_text_n, + split_text_character=args.split_text_character, + include_title=args.include_title_in_knowledge_dataset, + ), + batched=True, + num_proc=args.process_count, + ) + + ctx_encoder = DPRContextEncoder.from_pretrained(ctx_encoder_name).to(device=device) + ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(ctx_encoder_name) + + new_features = Features( + { + "text": Value("string"), + "title": Value("string"), + "embeddings": Sequence(Value("float32")), + } + ) # optional, save as float32 instead of float64 to save space + dataset = dataset.map( + partial( + embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer, device=device + ), + batched=True, + batch_size=args.rag_embed_batch_size, + features=new_features, + ) + if isinstance(data, str): + dataset = dataset["train"] + + if args.save_knowledge_dataset: + output_dataset_directory = os.path.join(args.output_dir, "knowledge_dataset") + os.makedirs(output_dataset_directory, exist_ok=True) + dataset.save_to_disk(output_dataset_directory) + + index = faiss.IndexHNSWFlat(args.faiss_d, args.faiss_m, faiss.METRIC_INNER_PRODUCT) + dataset.add_faiss_index("embeddings", custom_index=index) + + return dataset + + +def add_faiss_index_to_dataset(dataset): + import faiss + + index = faiss.IndexHNSWFlat(768, 128, faiss.METRIC_INNER_PRODUCT) + dataset.add_faiss_index("embeddings", custom_index=index) + + return dataset diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/LICENSE b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c2a26b35dd47c20d661a93cb5deb84de32df74e4 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/LICENSE @@ -0,0 +1,204 @@ +Copyright (c) 2021 Habana Labs, Ltd. an Intel Company + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2019 NVIDIA CORPORATION. All rights reserved. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/README.md b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2a3966798a55cc4e1635efa21c5f5f9fef4d2e26 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/README.md @@ -0,0 +1,898 @@ +# BERT for PyTorch + +This folder contains scripts to pre-train , finetune BERT model and run inference on finetuned BERT model on Intel® Gaudi® AI Accelerator to achieve state-of-the-art accuracy. To obtain model performance data, refer to the [Habana Model Performance Data page](https://developer.habana.ai/resources/habana-training-models/#performance) + +For more information about training deep learning models using Gaudi, visit [developer.habana.ai](https://developer.habana.ai/resources/). + +**Note**: BERT is enabled on both Gaudi and Gaudi2. +## Table of Contents +- [Model References](../../../README.md) +- [Model Overview](#model-overview) +- [Setup](#setup) +- [Training and Examples](#training-and-examples) +- [Inference and Examples](#inference-and-examples) +- [Pre-trained Model](#pre-trained-model) +- [Supported Configurations](#supported-configurations) +- [Changelog](#changelog) +- [Known Issues](#known-issues) + +## Model Overview +Bidirectional Encoder Representations from Transformers (BERT) is a technique for natural language processing (NLP) pre-training developed by Google. +The original English-language BERT model comes with two pre-trained general types: (1) the BERTBASE model, a 12-layer, 768-hidden, 12-heads, 110M parameter neural network architecture, and (2) the BERTLARGE model, a 24-layer, 1024-hidden, 16-heads, 340M parameter neural network architecture; both of which were trained on the BooksCorpus with 800M words, and a version of the English Wikipedia with 2,500M words. +The base training and modeling scripts for pre-training are based on a clone of https://github.com/NVIDIA/DeepLearningExamples.git and fine-tuning is based on https://github.com/huggingface/transformers.git. + +The scripts included in this release are as follows: +- BERT Large pre-training for BF16 mixed precision for Wikipedia BookCorpus and Wiki dataset in Lazy mode. +- BERT Large finetuning for BF16 mixed precision for Wikipedia BookCorpus and SQUAD dataset in Lazy mode. +- Multi-card (1 server = 8 cards) support for BERT Large pre-training and finetuning with BF16 mixed precision in Lazy mode. +- Multi-server (4 servers = 32 cards) support for BERT Large pre-training with BF16 mixed precision in Lazy mode. +- BERT pre-training 1.2B parameters using ZeroRedundancyOptimizer with BF16 mixed precision in Lazy mode. + + +Additional environment variables are used in training scripts in order to achieve optimal results for each workload. + +### Pre-Training +- Located in: `Model-References/PyTorch/nlp/bert/` +- Suited for datasets: + - `wiki`, `bookswiki`(combination of BooksCorpus and Wiki datasets) +- Uses optimizer: **LAMB** ("Layer-wise Adaptive Moments optimizer for Batch training"). +- Consists of two tasks: + - Task 1 - **Masked Language Model** - where given a sentence, a randomly chosen word is guessed. + - Task 2 - **Next Sentence Prediction** - where the model guesses whether sentence B comes after sentence A. +- The resulting (trained) model weights are language-specific (here: english) and has to be further "fitted" to do a specific task (with fine-tuning). +- Heavy-weight: the training takes several hours or days. + +BERT training script supports pre-training of dataset on BERT large for both FP32 and BF16 mixed precision data type using **Lazy mode**. + +### Finetuning +- Located in: `Model-References/PyTorch/nlp/bert/` +- Suited for dataset: + - `SQUAD`(Stanford Question Answering Dataset) +- Uses optimizer: **Fused ADAM**. +- Light-weight: the finetuning takes several minutes. + +BERT finetuning script supports fine-tuning of SQUAD dataset on BERT large for both FP32 and BF16 mixed precision data type using **Lazy mode**. + +## Setup +Please follow the instructions provided in the [Gaudi Installation +Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) to set up the +environment including the `$PYTHON` environment variable. +The guide will walk you through the process of setting up your system to run the model on Gaudi. + +### Clone Habana Model-References +In the docker container, clone this repository and switch to the branch that +matches your SynapseAI version. You can run the +[`hl-smi`](https://docs.habana.ai/en/latest/Management_and_Monitoring/System_Management_Tools_Guide/System_Management_Tools.html#hl-smi-utility-options) +utility to determine the SynapseAI version. + +```bash +git clone -b [SynapseAI version] https://github.com/HabanaAI/Model-References +``` + +### Install Model Requirements +1. In the docker container, go to the BERT directory +```bash +cd Model-References/PyTorch/nlp/bert +``` +2. Install the required packages using pip: +```bash +$PYTHON -m pip install -r requirements.txt +``` +### Vocab File +Download the Vocab file located [here](https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip). + +### Download Dataset + +#### Pre-Training: + +`Model-References/PyTorch/nlp/bert/data` provides scripts to download, extract and pre-process [Wikipedia](https://dumps.wikimedia.org/) and [BookCorpus](http://yknzhu.wixsite.com/mbweb) datasets. + +Go to the `data` folder and run the data preparation script. +``` +cd Model-References/PyTorch/nlp/bert/data +``` +It is highly recommended to download Wiki dataset alone using the following command. +``` +bash create_datasets_from_start.sh +``` +Wiki and BookCorpus datasets can be downloaded by running the script as follows. +``` +bash create_datasets_from_start.sh wiki_books +``` +Note that the pre-training dataset is huge and takes several hours to download. BookCorpus may have access and download constraints. The final accuracy may vary depending on the dataset and its size. +The script creates formatted dataset for Phase 1 and Phase 2 of the pre-training. + +#### Finetuning: +This section provides steps to extract and pre-process Squad Dataset(V1.1). + +1. Go to `squad` folder. +``` +cd Model-References/PyTorch/nlp/bert/data/squad +``` +2. Download Squad dataset. +``` +bash squad_download.sh +``` + +### Packing the Data +Habana supports using a [Data packing technique](https://github.com/HabanaAI/Gaudi-tutorials/blob/main/TensorFlow/DataPackingMLperfBERT/Data_Packing_Process_for_MLPERF_BERT.ipynb), +called Non-Negative Least Squares Histogram. Here, instead of padding with zero, +several short sequences are packed into one multi-sequence of size `max_seq_len`. +Thus, this removes most of the padding, which can lead to a speedup of up to 2× +in time-to-train (TTT). This packing technique can be applied on other datasets +with high variability in samples length. + +Please note that for each NLP dataset with sequential data samples, the speedup +with data packing is determined by the ratio of `max_seq_len` to +`average_seq_len` in that particular dataset. The larger the ratio, the higher +the speedup. + +To pack the dataset, in docker run: +```bash +cd /root/Model-References/PyTorch/nlp/bert + +$PYTHON pack_pretraining_data_pytorch.py --input_dir --output_dir --max_sequence_length 128 --max_predictions_per_sequence 20 + +$PYTHON pack_pretraining_data_pytorch.py --input_dir --output_dir --max_sequence_length 512 --max_predictions_per_sequence 80 +``` +**Note:** This will generate json at the path /../_metadata.json with meta data info like: "avg_seq_per_sample" etc. This json will be +used as an input to run_pretraining.py to extract "avg_seq_per_sample" in case of packed dataset mode. + + +## Training and Examples + +Please create a log directory to store `dllogger.json` and specify its location for `--json_summary` attribute. + +### Single Card and Multi-Card Pre-Training Examples +**Run training on 1 HPU:** + +- Using packed data: lazy mode, 1 HPU, BF16 mixed precision, batch size 64 for Phase 1 and batch size 8 for Phase 2: + +```bash +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \ + --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \ + --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \ + --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 +``` + +```bash +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \ + --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \ + --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \ + --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \ + --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2 +``` + +- Using packed data: Eager mode with torch.compile enabled, 1 HPU, BF16 mixed precision, batch size 64 for Phase 1 on **Gaudi2**:: +```bash +export PT_HPU_LAZY_MODE=0 +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \ + --use_fused_lamb --use_torch_compile \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \ + --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \ + --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 +``` + + +- Using packed data: lazy mode, 1 HPU, BF16 mixed precision, batch size 64 for Phase 1 and batch size 16 for Phase 2 on **Gaudi2**: + +```bash +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \ + --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \ + --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \ + --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 +``` + +```bash +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \ + --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \ + --train_batch_size=8192 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \ + --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \ + --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2 +``` + +- Lazy mode, 1 HPU, unpacked data, BF16 mixed precision, batch size 64 for Phase1 and batch size 8 for Phase2: + +```bash +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \ + --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \ + --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 \ + --enable_packed_data_mode False +``` + + +```bash +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512_max_pred_80_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5/books_wiki_en_corpus \ + --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \ + --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004\ + --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2 \ + --enable_packed_data_mode False +``` + +- Lazy mode, 1 HPU, unpacked data, FP32 precision, batch size 32 for Phase 1 and batch size 4 for Phase 2: + +```bash +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \ + --train_batch_size=512 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \ + --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=32 \ + --enable_packed_data_mode False +``` + +```bash +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512/books_wiki_en_corpus \ + --train_batch_size=128 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \ + --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \ + --gradient_accumulation_steps=64 --resume_from_checkpoint --phase1_end_step=7038 --phase2 \ + --enable_packed_data_mode False +``` + +**Run training on 8 HPUs:** + +To run multi-card demo, make sure the host machine has 512 GB of RAM installed. Modify the docker run command to pass 8 Gaudi cards to the docker container. This ensures the docker has access to all the 8 cards required for multi-card demo. + +**NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Scaling_Guide/DDP_Based_Scaling.html#mpirun-configuration). + +- Using packed data: lazy mode, 8 HPUs, BF16 mixed precision, per chip batch size of 64 for Phase 1 and 8 for Phase 2: + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json --use_habana \ + --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \ + --output_dir=/tmp/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \ + --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \ + --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 +``` + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json --use_habana \ + --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \ + --output_dir=/tmp/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \ + --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \ + --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \ + --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2 +``` + +- Using packed data: lazy mode, 8 HPUs, BF16 mixed precision, per chip batch size of 64 for Phase 1 and 16 for Phase 2 on **Gaudi2**: + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json --use_habana \ + --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \ + --output_dir=/tmp/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \ + --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \ + --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 +``` + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json --use_habana \ + --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \ + --output_dir=/tmp/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \ + --train_batch_size=8192 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \ + --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \ + --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2 +``` + +- Eager mode with torch.compile enabled, 8 HPUs, packed data, BF16 mixed precision, per chip batch size of 64 for Phase 1 on **Gaudi2**: + +```bash +export PT_HPU_LAZY_MODE=0 +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --use_torch_compile \ + --config_file=./bert_config.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \ + --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --warmup_proportion=0.2843 \ + --max_steps=7038 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 +``` + +- Lazy mode, 8 HPUs, unpacked data, BF16 mixed precision, per chip batch size of 64 for Phase 1 and 8 for Phase 2: +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --use_lazy_mode=True \ + --config_file=./bert_config.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \ + --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --warmup_proportion=0.2843 \ + --max_steps=7038 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 \ + --enable_packed_data_mode False +``` + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --use_lazy_mode=True \ + --config_file=./bert_config.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512/books_wiki_en_corpus \ + --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --warmup_proportion=0.128 \ + --max_steps=5 --num_steps_per_checkpoint=200 --learning_rate=0.004 --gradient_accumulation_steps=512 --resume_from_checkpoint --phase1_end_step=7038 --phase2 \ + --enable_packed_data_mode False +``` + +- Lazy mode, 8 HPUs, unpacked data, FP32 precision, per chip batch size of 32 for Phase 1 and 4 for Phase 2: + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \ + --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \ + --train_batch_size=8192 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=3 --warmup_proportion=0.2843 \ + --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=256 \ + --enable_packed_data_mode False +``` + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 --json-summary=/tmp/log_directory/dllogger.json \ + --output_dir=/tmp/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512/books_wiki_en_corpus \ + --train_batch_size=4096 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 --warmup_proportion=0.128 \ + --num_steps_per_checkpoint=200 --learning_rate=0.004 --gradient_accumulation_steps=512 \ + --resume_from_checkpoint --phase1_end_step=7038 --phase2 \ + --enable_packed_data_mode False +``` + + +### Single Card and Multi-Card Finetuning Examples +**Run training on 1 HPU:** +- Lazy mode, 1 HPU, BF16 mixed precision, batch size 24 for train and batch size 8 for test: + +```bash +$PYTHON run_squad.py --do_train --bert_model=bert-large-uncased \ + --config_file=./bert_config.json \ + --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --train_batch_size=24 --predict_batch_size=8 --seed=1 --max_seq_length=384 \ + --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \ + --init_checkpoint= \ + --vocab_file= \ + --train_file=data/squad/v1.1/train-v1.1.json \ + --skip_cache --do_predict \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20 +``` + +- Lazy mode, 1 HPU, FP32 precision, batch size 12 for train and batch size 8 for test: + +```bash +$PYTHON run_squad.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \ + --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --train_batch_size=12 --predict_batch_size=8 --seed=1 --max_seq_length=384 \ + --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \ + --init_checkpoint= \ + --vocab_file= \ + --train_file=data/squad/v1.1/train-v1.1.json \ + --skip_cache --do_predict \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20 +``` + +- Eager mode with torch.compile enabled, 1 HPU, FP32 precision, batch size 12 for train and batch size 8 for test: + +```bash +export PT_HPU_LAZY_MODE=0 +$PYTHON run_squad.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \ + --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json --use_torch_compile \ + --train_batch_size=12 --predict_batch_size=8 --seed=1 --max_seq_length=384 \ + --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \ + --init_checkpoint= \ + --vocab_file= \ + --train_file=data/squad/v1.1/train-v1.1.json \ + --skip_cache --do_predict \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20 +``` + +**Run training on 8 HPUs:** + +To run multi-card demo, make sure the host machine has 512 GB of RAM installed. Modify the docker run command to pass 8 Gaudi cards to the docker container. This ensures the docker has access to all the 8 cards required for multi-card demo. + +**NOTE:** mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Scaling_Guide/DDP_Based_Scaling.html#mpirun-configuration). + +- Lazy mode, 8 HPUs, BF16 mixed precision, per chip batch size of 24 for train and 8 for test: +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_squad.py --do_train --bert_model=bert-large-uncased \ + --config_file=./bert_config.json \ + --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --train_batch_size=24 --predict_batch_size=8 --seed=1 --max_seq_length=384 \ + --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \ + --init_checkpoint= \ + --vocab_file= \ + --train_file=data/squad/v1.1/train-v1.1.json \ + --skip_cache --do_predict \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20 +``` + +- Lazy mode, 8 HPUs, FP32 precision, per chip batch size of 12 for train and 8 for test: + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_squad.py --do_train --bert_model=bert-large-uncased --config_file=./bert_config.json \ + --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --train_batch_size=12 --predict_batch_size=8 --seed=1 --max_seq_length=384 \ + --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \ + --init_checkpoint= \ + --vocab_file= \ + --train_file=data/squad/v1.1/train-v1.1.json \ + --skip_cache --do_predict \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20 +``` + +- Eager mode with torch.compile enabled, 8 HPUs, BF16 mixed precision, per chip batch size of 24 for train and 8 for test: +```bash +export PT_HPU_LAZY_MODE=0 +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_squad.py --do_train --bert_model=bert-large-uncased \ + --config_file=./bert_config.json --use_torch_compile \ + --use_habana --use_fused_adam --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --train_batch_size=24 --predict_batch_size=8 --seed=1 --max_seq_length=384 \ + --doc_stride=128 --max_steps=-1 --learning_rate=3e-5 --num_train_epochs=2 \ + --init_checkpoint= \ + --vocab_file= \ + --train_file=data/squad/v1.1/train-v1.1.json \ + --skip_cache --do_predict \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py --log_freq 20 +``` + +- Habana provides the pretraining checkpoints for most of the models. The user can simply feed the data from [BERT checkpoint](https://developer.habana.ai/catalog/bert-pretraining-for-pytorch/) to provide the path-to-checkpoint for --init_checkpoint when you run the above model. + +### Multi-Server Training Examples +To run multi-server demo, make sure the host machine has 512 GB of RAM installed. +Also ensure you followed the [Gaudi Installation +Guide](https://docs.habana.ai/en/latest/Installation_Guide/index.html) +to install and set up docker, so that the docker has access to all the 8 cards +required for multi-node demo. Multi-server configuration for BERT PT training up to +4 servers, each with 8 Gaudi cards, have been verified. + +Before execution of the multi-server scripts, make sure all network interfaces are up. You can change the state of each network interface managed by the habanalabs driver using the following command: +``` +sudo ip link set up +``` +To identify if a specific network interface is managed by the habanalabs driver type, run: +``` +sudo ethtool -i +``` +#### Docker ssh Port Setup for Multi-Server Training + +By default, the Habana docker uses `port 22` for ssh. The default port configured in the script is `port 3022`. Run the following commands to configure the selected port number , `port 3022` in example below. + +```bash +sed -i 's/#Port 22/Port 3022/g' /etc/ssh/sshd_config +sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config +service ssh restart +``` +#### Set up password-less ssh +To set up password-less ssh between all connected servers used in scale-out training, follow the below steps: + +1. Run the following in all the nodes' docker sessions: + ```bash + mkdir ~/.ssh + cd ~/.ssh + ssh-keygen -t rsa -b 4096 + ``` + a. Copy id_rsa.pub contents from every node's docker to every other node's docker's ~/.ssh/authorized_keys (all public keys need to be in all hosts' authorized_keys): + ```bash + cat id_rsa.pub > authorized_keys + vi authorized_keys + ``` + b. Copy the contents from inside to other systems. + + c. Paste all hosts' public keys in all hosts' “authorized_keys” file. + +2. On each system, add all hosts (including itself) to known_hosts. The IP addresses used below are just for illustration: + ```bash + ssh-keyscan -p 3022 -H 10.10.100.101 >> ~/.ssh/known_hosts + ssh-keyscan -p 3022 -H 10.10.100.102 >> ~/.ssh/known_hosts + ssh-keyscan -p 3022 -H 10.10.100.103 >> ~/.ssh/known_hosts + ssh-keyscan -p 3022 -H 10.10.100.104 >> ~/.ssh/known_hosts + ``` +3. Install python packages required for BERT Pre-training model + ``` + pip install -r Model-References/PyTorch/nlp/bert/requirements.txt + ``` + +**Run training on 32 HPUs:** + +**NOTE:** +- mpirun map-by PE attribute value may vary on your setup. For the recommended calculation, refer to the instructions detailed in [mpirun Configuration](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Scaling_Guide/DDP_Based_Scaling.html#mpirun-configuration). +- `$MPI_ROOT` environment variable is set automatically during Setup. See [Gaudi Installation Guide](https://docs.habana.ai/en/latest/Installation_Guide/GAUDI_Installation_Guide.html) for details. + +- Using packed data: lazy mode, 32 HPUs, BF16 mixed precision, per chip batch size 64 for Phase 1 and batch size 8 for Phase 2: +```bash +export MASTER_ADDR="10.10.100.101" +export MASTER_PORT="12345" +mpirun --allow-run-as-root --mca plm_rsh_args "-p 3022" --bind-to core -n 32 --map-by ppr:4:socket:PE=6 \ +--rank-by core --report-bindings --prefix --mca btl_tcp_if_include 10.10.100.101/16 + $MPI_ROOT -H 10.10.100.101:16,10.10.100.102:16,10.10.100.103:16,10.10.100.104:16 -x LD_LIBRARY_PATH \ + -x HABANA_LOGS -x PYTHONPATH -x MASTER_ADDR \ + -x MASTER_PORT \ + $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \ + --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \ + --train_batch_size=2048 --max_seq_length=128 --max_predictions_per_seq=20 --max_steps=7038 \ + --warmup_proportion=0.2843 --num_steps_per_checkpoint=200 --learning_rate=0.006 \ + --gradient_accumulation_steps=32 +``` + +```bash +export MASTER_ADDR="10.10.100.101" +export MASTER_PORT="12345" +mpirun --allow-run-as-root --mca plm_rsh_args "-p 3022" --bind-to core -n 32 --map-by ppr:4:socket:PE=6 \ +--rank-by core --report-bindings --prefix --mca btl_tcp_if_include 10.10.100.101/16 \ + $MPI_ROOT -H 10.10.100.101:16,10.10.100.102:16,10.10.100.103:16,10.10.100.104:16 -x LD_LIBRARY_PATH \ + -x HABANA_LOGS -x PYTHONPATH -x MASTER_ADDR \ + -x MASTER_PORT \ + $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/results/checkpoints \ + --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \ + --train_batch_size=1024 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 --warmup_proportion=0.128 \ --num_steps_per_checkpoint=200 --learning_rate=0.004 --gradient_accumulation_steps=128 \ + --resume_from_checkpoint --phase1_end_step=7038 --phase2 +``` + +- Lazy mode, 32 HPUs, unpacked data, BF16 mixed precision, batch size 64 for Phase 1 and batch size 8 for Phase 2: + +```bash +export MASTER_ADDR="10.10.100.101" +export MASTER_PORT="12345" +mpirun --allow-run-as-root --mca plm_rsh_args -p3022 --bind-to core -n 32 --map-by ppr:4:socket:PE=6 \ +--rank-by core --report-bindings --prefix --mca btl_tcp_if_include 10.10.100.101/16 \ +$MPI_ROOT -H 10.10.100.101:16,10.10.100.102:16,10.10.100.103:16,10.10.100.104:16 \ + -x LD_LIBRARY_PATH -x HABANA_LOGS -x PYTHONPATH -x MASTER_ADDR -x MASTER_PORT -x https_proxy -x http_proxy \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased \ + --autocast --config_file=./bert_config.json \ + --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir= /tmp/results/checkpoints \ + --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_128/books_wiki_en_corpus \ + --train_batch_size=2048 --max_seq_length=128 --max_predictions_per_seq=20 + --max_steps=7038 --warmup_proportion=0.2843 \ + --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=32 \ + --enable_packed_data_mode False +``` + +```bash +export MASTER_ADDR="10.10.100.101" +export MASTER_PORT="12345" +mpirun --allow-run-as-root --mca plm_rsh_args -p3022 --bind-to core -n 32 --map-by ppr:4:socket:PE=6 \ +--rank-by core --report-bindings --prefix --mca btl_tcp_if_include 10.10.100.101/16 \ + $MPI_ROOT -H 10.10.100.101:16,10.10.100.102:16,10.10.100.103:16,10.10.100.104:16 -x LD_LIBRARY_PATH \ + -x HABANA_LOGS -x PYTHONPATH -x MASTER_ADDR -x MASTER_PORT \ + $PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast \ + --config_file=./bert_config.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir= /tmp/results/checkpoints \ + --use_fused_lamb --input_dir=/data/pytorch/bert_pretraining/hdf5_lower_case_1_seq_len_512/books_wiki_en_corpus \ + --train_batch_size=1024 --max_seq_length=512 --max_predictions_per_seq=80 --max_steps=1563 \ + --warmup_proportion=0.128 --num_steps_per_checkpoint=200 --learning_rate=0.004 \ + --gradient_accumulation_steps=128 --resume_from_checkpoint --phase1_end_step=7038 --phase2 \ + --enable_packed_data_mode False +``` + +### BERT Pre-Training with ZeroRedundancyOptimizer + +BERT training script supports pre-training of BERT 1.2B parameters using ZeroRedundancyOptimizer with BF16 mixed precision data type in **Lazy mode**. + +- Lazy mode, 8 HPUs, BF16 mixed precision, per chip batch size 8 for Phase 1 and batch size 2 for Phase 2: + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --use_lazy_mode=True \ + --config_file=./bert_config_1.2B.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase1/train_packed_new \ + --train_batch_size=1024 --max_seq_length=128 --max_predictions_per_seq=20 --warmup_proportion=0.2843 \ + --max_steps=7038 --num_steps_per_checkpoint=200 --learning_rate=0.006 --gradient_accumulation_steps=128 \ + --use_zero_optimizer True + +``` + +```bash +export MASTER_ADDR="localhost" +export MASTER_PORT="12345" +mpirun -n 8 --bind-to core --map-by socket:PE=6 --rank-by core --report-bindings --allow-run-as-root \ +$PYTHON run_pretraining.py --do_train --bert_model=bert-large-uncased --autocast --use_lazy_mode=True \ + --config_file=./bert_config_1.2B.json --use_habana --allreduce_post_accumulation --allreduce_post_accumulation_fp16 \ + --json-summary=/tmp/log_directory/dllogger.json --output_dir=/tmp/BERT_PRETRAINING/results/checkpoints --use_fused_lamb \ + --input_dir=/data/pytorch/bert_pretraining/packed_data/phase2/train_packed_new \ + --train_batch_size=1024 --max_seq_length=512 --max_predictions_per_seq=80 --warmup_proportion=0.128 \ + --max_steps=1563 --num_steps_per_checkpoint=200 --learning_rate=0.004 --gradient_accumulation_steps=512 \ + --resume_from_checkpoint --phase1_end_step=7038 --phase2 --use_zero_optimizer True + +``` +## Inference and Examples +**Run inference on 1 HPU:** +- Lazy mode, 1 HPU, BF16 mixed precision, batch size 24: + +```bash +$PYTHON run_squad.py --bert_model=bert-large-uncased --autocast \ + --config_file=./bert_config.json \ + --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --predict_batch_size=24 \ + --init_checkpoint= \ + --vocab_file= \ + --do_predict \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py +``` + +- HPU graphs, 1 HPU, BF16 mixed precision, batch size 24: + +```bash +$PYTHON run_squad.py --bert_model=bert-large-uncased --autocast --use_hpu_graphs \ + --config_file=./bert_config.json \ + --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --predict_batch_size=24 \ + --init_checkpoint= \ + --vocab_file= \ + --do_predict \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py +``` + +- Lazy mode, 1 HPU, FP16 mixed precision, batch size 24: + +```bash +$PYTHON run_squad.py --bert_model=bert-large-uncased --autocast \ + --config_file=./bert_config.json \ + --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --predict_batch_size=24 \ + --init_checkpoint= \ + --vocab_file= \ + --do_predict --fp16 \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py +``` + +- HPU graphs, 1 HPU, FP16 mixed precision, batch size 24: + +```bash +$PYTHON run_squad.py --bert_model=bert-large-uncased --autocast --use_hpu_graphs \ + --config_file=./bert_config.json \ + --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --predict_batch_size=24 \ + --init_checkpoint= \ + --vocab_file= \ + --do_predict --fp16 \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py +``` + +**Run inference on 1 HPU with torch.compile:** +- 1 HPU, BF16 mixed precision, batch size 24: + +```bash +$PYTHON run_squad.py --bert_model=bert-large-uncased --autocast \ + --config_file=./bert_config.json \ + --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --predict_batch_size=24 \ + --init_checkpoint= \ + --vocab_file= \ + --do_predict --use_torch_compile \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py +``` + +- 1 HPU, FP16 mixed precision, batch size 24: + +```bash +$PYTHON run_squad.py --bert_model=bert-large-uncased --autocast \ + --config_file=./bert_config.json \ + --use_habana --do_lower_case --output_dir=/tmp/results/checkpoints \ + --json-summary=/tmp/log_directory/dllogger.json \ + --predict_batch_size=24 \ + --init_checkpoint= \ + --vocab_file= \ + --do_predict --use_torch_compile --fp16 \ + --predict_file=data/squad/v1.1/dev-v1.1.json \ + --do_eval --eval_script=data/squad/v1.1/evaluate-v1.1.py +``` + +When not using torch.compile this model recommends using the ["HPU graph"](https://docs.habana.ai/en/latest/PyTorch/Inference_on_Gaudi/Inference_using_HPU_Graphs/Inference_using_HPU_Graphs.html) model type to minimize the host time spent in the `forward()` call. + +## Pre-trained Model and Checkpoint +PyTorch BERT is trained on Intel Gaudi AI Accelerators and the saved model & checkpoints are provided. You can use it for fine-tuning or transfer learning tasks with your own datasets. To download the saved model file, please refer to [Habana Catalog](https://developer.habana.ai/catalog/bert-pretraining-for-pytorch/) to obtain the URL. + + +## Supported Configurations + +| Validated on | SynapseAI Version | PyTorch Version | Mode | +|--------|-------------------|-----------------|----------------| +| Gaudi | 1.14.0 | 2.1.1 | Training | +| Gaudi | 1.14.0 | 2.1.1 | Inference | +| Gaudi2 | 1.14.0 | 2.1.1 | Training | +| Gaudi2 | 1.14.0 | 2.1.1 | Inference | + +## Changelog +### 1.14.0 +1. Added support for dynamic shapes in BERT Pretraining + +### 1.13.0 +1. Added tensorboard logging. +2. Added support for torch.compile inference. +3. Added support for FP16 through autocast. +4. Aligned profiler invocation between training and inference loops. +5. Added support for dynamic shapes in BERT Finetuning +6. Added torch.compile support - performance improvement feature for PyTorch eager mode for + BERT Pretraining. Supported only for phase1. +7. Added torch.compile support - performance improvement feature for PyTorch eager mode for + BERT Finetuning. + +### 1.12.0 +1. Removed HMP; switched to Autocast. +2. Eager mode support is deprecated. + +### 1.11.0 +1. Dynamic Shapes will be enabled by default in future releases. It is currently enabled in BERT Pretraining Model + training script as a temporary solution. + +### 1.10.0 +1. Support added for cached dataset for finetuning. + +### 1.9.0 +1. Enabled usage of PyTorch autocast +2. Enabled BERT finetuning(run_squad.py) with SQUAD dataset (training and inference). + +### 1.6.0 +1. ZeroReduancyOptimer is support is added and tested BERT 1.2B parameter config. + +### 1.5.0 +1. Packed dataset mode is set as default execution mode +2. Deprecated the flags `enable_packed_data_mode` and `avg_seq_per_pack` and added support for automatic detection of those parameters based on dataset metadata file. +3. Changes related to Saving and Loading checkpoint were removed. +4. Removed changes related to padding index and flatten. +5. Fixed throughput calculation for packed dataset. +6. Demo scripts were removed and references to custom demo script were replaced by community entry points in README +7. Reduced the number of distributed barrier calls to once per gradient accumulation steps +8. Simplified the distributed Initialization. +9. Added support for training on **Gaudi2** supporting up to 8 cards + +### 1.4.0 +1. Lazy mode is set as default execution mode,for eager mode set `use-lazy-mode` as False +2. Pretraining with packed dataset is supported + + +### 1.3.0 +1. Single worker thread changes are removed. +2. Loss computation brought it back to training script. +3. Removed setting the embedding padding index as 0 explicitly. +4. Removed the select op implementation using index select and squeeze and retained the default code. +5. Permute and view is replaced as flatten. +6. Change `python` or `python3` to `$PYTHON` to execute correct version based on environment setup. + +### 1.2.0 +1. Enabled HCCL flow for distributed training. +2. Removed changes related to data type conversions for input_ids, segment ids, position_ids and input_mask. +3. Removed changes related to position ids from training script. +4. Removed changes related to no pinned memory and skip last batch. + + +### Training Script Modifications +The following changes have been added to training (run_pretraining.py and run_squad.py) and modeling (modeling.py) scripts. + +1. Added support for Habana devices: + + a. Load Habana specific library. + + b. Support required for cpu to work. + + c. Required environment variables are defined for habana device. + + d. Added Habana BF16 Mixed precision support. + + e. Added python version of LAMB optimizer and will be used as default(from lamb.py). + + f. Support for distributed training on Habana device. + + g. Added changes to support Lazy mode with required mark_step(). + + h. Added changes to calculate the performance per step and report through dllogger. + + i. Using conventional torch layernorm, linear and activation functions. + + j. Changes for dynamic loading of HCCL library. + + k. Added support for FusedAdamW and FusedClipNorm in run_squad.py. + + l. optimizer_grouped_parameters config has changed for weight_decay from 0.01 to 0.0. + + +2. To improve performance: + + a. Added support for Fused LAMB optimizer in run_pretraining.py. + + b. Bucket size set to 230MB for better performance in distributed training. + + c. Added support to use distributed all_reduce instead of default Distributed Data Parallel in pre-training. + + d. Added support for lowering print frequency of loss and associated this with log_freq. + + e. Added support for Fused ADAMW optimizer and FusedClipNorm in run_squad.py. + + +## Known Issues +1. Placing mark_step() arbitrarily may lead to undefined behaviour. Recommend to keep mark_step() as shown in provided scripts. +2. BERT 1.2B parameter model is restricted to showcase the PyTorch ZeroReduancyOptimer feature and not for Model convergence +3. Only scripts and configurations mentioned in this README are supported and verified. diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config.json b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config.json new file mode 100644 index 0000000000000000000000000000000000000000..a7efa973d748abe535c6af1815f28eec3bf3a044 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config.json @@ -0,0 +1,13 @@ +{ + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 1024, + "initializer_range": 0.02, + "intermediate_size": 4096, + "max_position_embeddings": 512, + "num_attention_heads": 16, + "num_hidden_layers": 24, + "type_vocab_size": 2, + "vocab_size": 30522 +} diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config_1.2B.json b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config_1.2B.json new file mode 100644 index 0000000000000000000000000000000000000000..a21173760e12504b9cba4264a6fceb7382f0ca2b --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/bert_config_1.2B.json @@ -0,0 +1,13 @@ +{ + "attention_probs_dropout_prob": 0.1, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "hidden_size": 1536, + "initializer_range": 0.02, + "intermediate_size": 6144, + "max_position_embeddings": 512, + "num_attention_heads": 16, + "num_hidden_layers": 40, + "type_vocab_size": 2, + "vocab_size": 30522 +} diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/create_pretraining_data.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/create_pretraining_data.py new file mode 100644 index 0000000000000000000000000000000000000000..6566556dab7d33cbb6049ab8b10e56f36cd5d04b --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/create_pretraining_data.py @@ -0,0 +1,476 @@ +# coding=utf-8 +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Create masked LM/next sentence masked_lm TF examples for BERT.""" +from __future__ import absolute_import, division, print_function, unicode_literals + +import argparse +import logging +import os +import random +from io import open +import h5py +import numpy as np +from tqdm import tqdm, trange + +from tokenization import BertTokenizer +import tokenization as tokenization + +import random +import collections + + + + +class TrainingInstance(object): + """A single training instance (sentence pair).""" + + def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, + is_random_next): + self.tokens = tokens + self.segment_ids = segment_ids + self.is_random_next = is_random_next + self.masked_lm_positions = masked_lm_positions + self.masked_lm_labels = masked_lm_labels + + def __str__(self): + s = "" + s += "tokens: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.tokens])) + s += "segment_ids: %s\n" % (" ".join([str(x) for x in self.segment_ids])) + s += "is_random_next: %s\n" % self.is_random_next + s += "masked_lm_positions: %s\n" % (" ".join( + [str(x) for x in self.masked_lm_positions])) + s += "masked_lm_labels: %s\n" % (" ".join( + [tokenization.printable_text(x) for x in self.masked_lm_labels])) + s += "\n" + return s + + def __repr__(self): + return self.__str__() + + +def write_instance_to_example_file(instances, tokenizer, max_seq_length, + max_predictions_per_seq, output_file): + """Create TF example files from `TrainingInstance`s.""" + + + total_written = 0 + features = collections.OrderedDict() + + num_instances = len(instances) + features["input_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32") + features["input_mask"] = np.zeros([num_instances, max_seq_length], dtype="int32") + features["segment_ids"] = np.zeros([num_instances, max_seq_length], dtype="int32") + features["masked_lm_positions"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32") + features["masked_lm_ids"] = np.zeros([num_instances, max_predictions_per_seq], dtype="int32") + features["next_sentence_labels"] = np.zeros(num_instances, dtype="int32") + + + for inst_index, instance in enumerate(tqdm(instances)): + input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) + input_mask = [1] * len(input_ids) + segment_ids = list(instance.segment_ids) + assert len(input_ids) <= max_seq_length + + while len(input_ids) < max_seq_length: + input_ids.append(0) + input_mask.append(0) + segment_ids.append(0) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + masked_lm_positions = list(instance.masked_lm_positions) + masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) + masked_lm_weights = [1.0] * len(masked_lm_ids) + + while len(masked_lm_positions) < max_predictions_per_seq: + masked_lm_positions.append(0) + masked_lm_ids.append(0) + masked_lm_weights.append(0.0) + + next_sentence_label = 1 if instance.is_random_next else 0 + + + + features["input_ids"][inst_index] = input_ids + features["input_mask"][inst_index] = input_mask + features["segment_ids"][inst_index] = segment_ids + features["masked_lm_positions"][inst_index] = masked_lm_positions + features["masked_lm_ids"][inst_index] = masked_lm_ids + features["next_sentence_labels"][inst_index] = next_sentence_label + + total_written += 1 + + # if inst_index < 20: + # tf.logging.info("*** Example ***") + # tf.logging.info("tokens: %s" % " ".join( + # [tokenization.printable_text(x) for x in instance.tokens])) + + # for feature_name in features.keys(): + # feature = features[feature_name] + # values = [] + # if feature.int64_list.value: + # values = feature.int64_list.value + # elif feature.float_list.value: + # values = feature.float_list.value + # tf.logging.info( + # "%s: %s" % (feature_name, " ".join([str(x) for x in values]))) + + + print("saving data") + f= h5py.File(output_file, 'w') + f.create_dataset("input_ids", data=features["input_ids"], dtype='i4', compression='gzip') + f.create_dataset("input_mask", data=features["input_mask"], dtype='i1', compression='gzip') + f.create_dataset("segment_ids", data=features["segment_ids"], dtype='i1', compression='gzip') + f.create_dataset("masked_lm_positions", data=features["masked_lm_positions"], dtype='i4', compression='gzip') + f.create_dataset("masked_lm_ids", data=features["masked_lm_ids"], dtype='i4', compression='gzip') + f.create_dataset("next_sentence_labels", data=features["next_sentence_labels"], dtype='i1', compression='gzip') + f.flush() + f.close() + +def create_training_instances(input_files, tokenizer, max_seq_length, + dupe_factor, short_seq_prob, masked_lm_prob, + max_predictions_per_seq, rng): + """Create `TrainingInstance`s from raw text.""" + all_documents = [[]] + + # Input file format: + # (1) One sentence per line. These should ideally be actual sentences, not + # entire paragraphs or arbitrary spans of text. (Because we use the + # sentence boundaries for the "next sentence prediction" task). + # (2) Blank lines between documents. Document boundaries are needed so + # that the "next sentence prediction" task doesn't span between documents. + for input_file in input_files: + print("creating instance from {}".format(input_file)) + with open(input_file, "r") as reader: + while True: + line = tokenization.convert_to_unicode(reader.readline()) + if not line: + break + line = line.strip() + + # Empty lines are used as document delimiters + if not line: + all_documents.append([]) + tokens = tokenizer.tokenize(line) + if tokens: + all_documents[-1].append(tokens) + + # Remove empty documents + all_documents = [x for x in all_documents if x] + rng.shuffle(all_documents) + + vocab_words = list(tokenizer.vocab.keys()) + instances = [] + for _ in range(dupe_factor): + for document_index in range(len(all_documents)): + instances.extend( + create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) + + rng.shuffle(instances) + return instances + + +def create_instances_from_document( + all_documents, document_index, max_seq_length, short_seq_prob, + masked_lm_prob, max_predictions_per_seq, vocab_words, rng): + """Creates `TrainingInstance`s for a single document.""" + document = all_documents[document_index] + + # Account for [CLS], [SEP], [SEP] + max_num_tokens = max_seq_length - 3 + + # We *usually* want to fill up the entire sequence since we are padding + # to `max_seq_length` anyways, so short sequences are generally wasted + # computation. However, we *sometimes* + # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter + # sequences to minimize the mismatch between pre-training and fine-tuning. + # The `target_seq_length` is just a rough target however, whereas + # `max_seq_length` is a hard limit. + target_seq_length = max_num_tokens + if rng.random() < short_seq_prob: + target_seq_length = rng.randint(2, max_num_tokens) + + # We DON'T just concatenate all of the tokens from a document into a long + # sequence and choose an arbitrary split point because this would make the + # next sentence prediction task too easy. Instead, we split the input into + # segments "A" and "B" based on the actual "sentences" provided by the user + # input. + instances = [] + current_chunk = [] + current_length = 0 + i = 0 + while i < len(document): + segment = document[i] + current_chunk.append(segment) + current_length += len(segment) + if i == len(document) - 1 or current_length >= target_seq_length: + if current_chunk: + # `a_end` is how many segments from `current_chunk` go into the `A` + # (first) sentence. + a_end = 1 + if len(current_chunk) >= 2: + a_end = rng.randint(1, len(current_chunk) - 1) + + tokens_a = [] + for j in range(a_end): + tokens_a.extend(current_chunk[j]) + + tokens_b = [] + # Random next + is_random_next = False + if len(current_chunk) == 1 or rng.random() < 0.5: + is_random_next = True + target_b_length = target_seq_length - len(tokens_a) + + # This should rarely go for more than one iteration for large + # corpora. However, just to be careful, we try to make sure that + # the random document is not the same as the document + # we're processing. + for _ in range(10): + random_document_index = rng.randint(0, len(all_documents) - 1) + if random_document_index != document_index: + break + + #If picked random document is the same as the current document + if random_document_index == document_index: + is_random_next = False + + random_document = all_documents[random_document_index] + random_start = rng.randint(0, len(random_document) - 1) + for j in range(random_start, len(random_document)): + tokens_b.extend(random_document[j]) + if len(tokens_b) >= target_b_length: + break + # We didn't actually use these segments so we "put them back" so + # they don't go to waste. + num_unused_segments = len(current_chunk) - a_end + i -= num_unused_segments + # Actual next + else: + is_random_next = False + for j in range(a_end, len(current_chunk)): + tokens_b.extend(current_chunk[j]) + truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) + + assert len(tokens_a) >= 1 + assert len(tokens_b) >= 1 + + tokens = [] + segment_ids = [] + tokens.append("[CLS]") + segment_ids.append(0) + for token in tokens_a: + tokens.append(token) + segment_ids.append(0) + + tokens.append("[SEP]") + segment_ids.append(0) + + for token in tokens_b: + tokens.append(token) + segment_ids.append(1) + tokens.append("[SEP]") + segment_ids.append(1) + + (tokens, masked_lm_positions, + masked_lm_labels) = create_masked_lm_predictions( + tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) + instance = TrainingInstance( + tokens=tokens, + segment_ids=segment_ids, + is_random_next=is_random_next, + masked_lm_positions=masked_lm_positions, + masked_lm_labels=masked_lm_labels) + instances.append(instance) + current_chunk = [] + current_length = 0 + i += 1 + + return instances + + +MaskedLmInstance = collections.namedtuple("MaskedLmInstance", + ["index", "label"]) + + +def create_masked_lm_predictions(tokens, masked_lm_prob, + max_predictions_per_seq, vocab_words, rng): + """Creates the predictions for the masked LM objective.""" + + cand_indexes = [] + for (i, token) in enumerate(tokens): + if token == "[CLS]" or token == "[SEP]": + continue + cand_indexes.append(i) + + rng.shuffle(cand_indexes) + + output_tokens = list(tokens) + + num_to_predict = min(max_predictions_per_seq, + max(1, int(round(len(tokens) * masked_lm_prob)))) + + masked_lms = [] + covered_indexes = set() + for index in cand_indexes: + if len(masked_lms) >= num_to_predict: + break + if index in covered_indexes: + continue + covered_indexes.add(index) + + masked_token = None + # 80% of the time, replace with [MASK] + if rng.random() < 0.8: + masked_token = "[MASK]" + else: + # 10% of the time, keep original + if rng.random() < 0.5: + masked_token = tokens[index] + # 10% of the time, replace with random word + else: + masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)] + + output_tokens[index] = masked_token + + masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) + + masked_lms = sorted(masked_lms, key=lambda x: x.index) + + masked_lm_positions = [] + masked_lm_labels = [] + for p in masked_lms: + masked_lm_positions.append(p.index) + masked_lm_labels.append(p.label) + + return (output_tokens, masked_lm_positions, masked_lm_labels) + + +def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): + """Truncates a pair of sequences to a maximum sequence length.""" + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_num_tokens: + break + + trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b + assert len(trunc_tokens) >= 1 + + # We want to sometimes truncate from the front and sometimes from the + # back to add more randomness and avoid biases. + if rng.random() < 0.5: + del trunc_tokens[0] + else: + trunc_tokens.pop() + + +def main(): + + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument("--vocab_file", + default=None, + type=str, + required=True, + help="The vocabulary the BERT model will train on.") + parser.add_argument("--input_file", + default=None, + type=str, + required=True, + help="The input train corpus. can be directory with .txt files or a path to a single file") + parser.add_argument("--output_file", + default=None, + type=str, + required=True, + help="The output file where the model checkpoints will be written.") + + ## Other parameters + + # str + parser.add_argument("--bert_model", default="bert-large-uncased", type=str, required=False, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") + + #int + parser.add_argument("--max_seq_length", + default=128, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.") + parser.add_argument("--dupe_factor", + default=10, + type=int, + help="Number of times to duplicate the input data (with different masks).") + parser.add_argument("--max_predictions_per_seq", + default=20, + type=int, + help="Maximum sequence length.") + + + # floats + + parser.add_argument("--masked_lm_prob", + default=0.15, + type=float, + help="Masked LM probability.") + + parser.add_argument("--short_seq_prob", + default=0.1, + type=float, + help="Probability to create a sequence shorter than maximum sequence length") + + parser.add_argument("--do_lower_case", + action='store_true', + default=True, + help="Whether to lower case the input text. True for uncased models, False for cased models.") + parser.add_argument('--random_seed', + type=int, + default=12345, + help="random seed for initialization") + + args = parser.parse_args() + + tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) + + input_files = [] + if os.path.isfile(args.input_file): + input_files.append(args.input_file) + elif os.path.isdir(args.input_file): + input_files = [os.path.join(args.input_file, f) for f in os.listdir(args.input_file) if (os.path.isfile(os.path.join(args.input_file, f)) and f.endswith('.txt') )] + else: + raise ValueError("{} is not a valid path".format(args.input_file)) + + rng = random.Random(args.random_seed) + instances = create_training_instances( + input_files, tokenizer, args.max_seq_length, args.dupe_factor, + args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq, + rng) + + output_file = args.output_file + + + write_instance_to_example_file(instances, tokenizer, args.max_seq_length, + args.max_predictions_per_seq, output_file) + + +if __name__ == "__main__": + main() diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/BooksDownloader.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/BooksDownloader.py new file mode 100644 index 0000000000000000000000000000000000000000..a80a114dc9f93a2721b30979ec0fcaa27a4d4aca --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/BooksDownloader.py @@ -0,0 +1,31 @@ +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import sys + +class BooksDownloader: + def __init__(self, save_path): + self.save_path = save_path + pass + + + def download(self): + import os + working_dir = os.environ['BERT_PREP_WORKING_DIR'] + args = '--list ' + working_dir + '/bookcorpus/url_list.jsonl --out' + bookscorpus_download_command = f'{sys.executable} ' + working_dir + '/bookcorpus/download_files.py ' + args + bookscorpus_download_command += ' ' + self.save_path + '/bookscorpus' + bookscorpus_download_command += ' --trash-bad-count' + bookscorpus_download_process = subprocess.run(bookscorpus_download_command, shell=True, check=True) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/BookscorpusTextFormatting.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/BookscorpusTextFormatting.py new file mode 100644 index 0000000000000000000000000000000000000000..22e48d4b2e12867bc455bd964833d39f0f34a26e --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/BookscorpusTextFormatting.py @@ -0,0 +1,32 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os + +class BookscorpusTextFormatting: + def __init__(self, books_path, output_filename, recursive = False): + self.books_path = books_path + self.recursive = recursive + self.output_filename = output_filename + + + # This puts one book per line + def merge(self): + with open(self.output_filename, mode='w', newline='\n') as ofile: + for filename in glob.glob(self.books_path + '/' + '*.txt', recursive=True): + with open(filename, mode='r', encoding='utf-8-sig', newline='\n') as file: + for line in file: + if line.strip() != '': + ofile.write(line.strip() + ' ') + ofile.write("\n\n") \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/Downloader.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/Downloader.py new file mode 100644 index 0000000000000000000000000000000000000000..eae17da1e0edb922746a74defd9e0403e8e57fe9 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/Downloader.py @@ -0,0 +1,65 @@ +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from GooglePretrainedWeightDownloader import GooglePretrainedWeightDownloader +from WikiDownloader import WikiDownloader +from BooksDownloader import BooksDownloader + + +class Downloader: + + def __init__(self, dataset_name, save_path): + self.dataset_name = dataset_name + self.save_path = save_path + + def download(self): + if self.dataset_name == 'bookscorpus': + self.download_bookscorpus() + + elif self.dataset_name == 'wikicorpus_en': + self.download_wikicorpus('en') + + elif self.dataset_name == 'wikicorpus_zh': + self.download_wikicorpus('zh') + + elif self.dataset_name == 'google_pretrained_weights': + self.download_google_pretrained_weights() + + + + + elif self.dataset_name == 'all': + self.download_bookscorpus() + self.download_wikicorpus('en') + self.download_wikicorpus('zh') + self.download_google_pretrained_weights() + + else: + print(self.dataset_name) + assert False, 'Unknown dataset_name provided to downloader' + + def download_bookscorpus(self): + downloader = BooksDownloader(self.save_path) + downloader.download() + + def download_wikicorpus(self, language): + downloader = WikiDownloader(language, self.save_path) + downloader.download() + + def download_google_pretrained_weights(self): + downloader = GooglePretrainedWeightDownloader(self.save_path) + downloader.download() + + + diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/GooglePretrainedWeightDownloader.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/GooglePretrainedWeightDownloader.py new file mode 100644 index 0000000000000000000000000000000000000000..bb0684d34b5c9b9a2ef24080af819cf013f63639 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/GooglePretrainedWeightDownloader.py @@ -0,0 +1,158 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hashlib +import os +import urllib.request +import zipfile + +class GooglePretrainedWeightDownloader: + def __init__(self, save_path): + self.save_path = save_path + '/google_pretrained_weights' + + if not os.path.exists(self.save_path): + os.makedirs(self.save_path) + + # Download urls + self.model_urls = { + 'bert_base_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-12_H-768_A-12.zip'), + 'bert_large_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'uncased_L-24_H-1024_A-16.zip'), + 'bert_base_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'cased_L-12_H-768_A-12.zip'), + 'bert_large_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip', 'cased_L-24_H-1024_A-16.zip'), + 'bert_base_multilingual_cased': ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'), + 'bert_large_multilingual_uncased': ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'), + 'bert_base_chinese': ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip') + } + + # SHA256sum verification for file download integrity (and checking for changes from the download source over time) + self.bert_base_uncased_sha = { + 'bert_config.json': '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc', + 'bert_model.ckpt.data-00000-of-00001': '58580dc5e0bf0ae0d2efd51d0e8272b2f808857f0a43a88aaf7549da6d7a8a84', + 'bert_model.ckpt.index': '04c1323086e2f1c5b7c0759d8d3e484afbb0ab45f51793daab9f647113a0117b', + 'bert_model.ckpt.meta': 'dd5682170a10c3ea0280c2e9b9a45fee894eb62da649bbdea37b38b0ded5f60e', + 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', + } + + self.bert_large_uncased_sha = { + 'bert_config.json': 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb', + 'bert_model.ckpt.data-00000-of-00001': 'bc6b3363e3be458c99ecf64b7f472d2b7c67534fd8f564c0556a678f90f4eea1', + 'bert_model.ckpt.index': '68b52f2205ffc64dc627d1120cf399c1ef1cbc35ea5021d1afc889ffe2ce2093', + 'bert_model.ckpt.meta': '6fcce8ff7628f229a885a593625e3d5ff9687542d5ef128d9beb1b0c05edc4a1', + 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', + } + + self.bert_base_cased_sha = { + 'bert_config.json': 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc', + 'bert_model.ckpt.data-00000-of-00001': '734d5a1b68bf98d4e9cb6b6692725d00842a1937af73902e51776905d8f760ea', + 'bert_model.ckpt.index': '517d6ef5c41fc2ca1f595276d6fccf5521810d57f5a74e32616151557790f7b1', + 'bert_model.ckpt.meta': '5f8a9771ff25dadd61582abb4e3a748215a10a6b55947cbb66d0f0ba1694be98', + 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', + } + + self.bert_large_cased_sha = { + 'bert_config.json': '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57', + 'bert_model.ckpt.data-00000-of-00001': '6ff33640f40d472f7a16af0c17b1179ca9dcc0373155fb05335b6a4dd1657ef0', + 'bert_model.ckpt.index': 'ef42a53f577fbe07381f4161b13c7cab4f4fc3b167cec6a9ae382c53d18049cf', + 'bert_model.ckpt.meta': 'd2ddff3ed33b80091eac95171e94149736ea74eb645e575d942ec4a5e01a40a1', + 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', + } + + self.bert_base_multilingual_cased_sha = { + 'bert_config.json': 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0', + 'bert_model.ckpt.data-00000-of-00001': '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5', + 'bert_model.ckpt.index': '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37', + 'bert_model.ckpt.meta': '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa', + 'vocab.txt': 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c', + } + + self.bert_large_multilingual_uncased_sha = { + 'bert_config.json': '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624', + 'bert_model.ckpt.data-00000-of-00001': '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429', + 'bert_model.ckpt.index': '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7', + 'bert_model.ckpt.meta': '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29', + 'vocab.txt': '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f', + } + + self.bert_base_chinese_sha = { + 'bert_config.json': '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015', + 'bert_model.ckpt.data-00000-of-00001': '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba', + 'bert_model.ckpt.index': '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e', + 'bert_model.ckpt.meta': 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047', + 'vocab.txt': '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c', + } + + # Relate SHA to urls for loop below + self.model_sha = { + 'bert_base_uncased': self.bert_base_uncased_sha, + 'bert_large_uncased': self.bert_large_uncased_sha, + 'bert_base_cased': self.bert_base_cased_sha, + 'bert_large_cased': self.bert_large_cased_sha, + 'bert_base_multilingual_cased': self.bert_base_multilingual_cased_sha, + 'bert_large_multilingual_uncased': self.bert_large_multilingual_uncased_sha, + 'bert_base_chinese': self.bert_base_chinese_sha + } + + # Helper to get sha256sum of a file + def sha256sum(self, filename): + h = hashlib.sha256() + b = bytearray(128*1024) + mv = memoryview(b) + with open(filename, 'rb', buffering=0) as f: + for n in iter(lambda : f.readinto(mv), 0): + h.update(mv[:n]) + + return h.hexdigest() + + def download(self): + # Iterate over urls: download, unzip, verify sha256sum + found_mismatch_sha = False + for model in self.model_urls: + url = self.model_urls[model][0] + file = self.save_path + '/' + self.model_urls[model][1] + + print('Downloading', url) + response = urllib.request.urlopen(url) + with open(file, 'wb') as handle: + handle.write(response.read()) + + print('Unzipping', file) + zip = zipfile.ZipFile(file, 'r') + zip.extractall(self.save_path) + zip.close() + + sha_dict = self.model_sha[model] + for extracted_file in sha_dict: + sha = sha_dict[extracted_file] + if sha != self.sha256sum(file[:-4] + '/' + extracted_file): + found_mismatch_sha = True + print('SHA256sum does not match on file:', extracted_file, 'from download url:', url) + else: + print(file[:-4] + '/' + extracted_file, '\t', 'verified') + + if not found_mismatch_sha: + print("All downloads pass sha256sum verification.") + + def serialize(self): + pass + + def deserialize(self): + pass + + def listAvailableWeights(self): + print("Available Weight Datasets") + for item in self.model_urls: + print(item) + + def listLocallyStoredWeights(self): + pass + diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/TextSharding.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/TextSharding.py new file mode 100644 index 0000000000000000000000000000000000000000..0753e742db9816fb18dbbf81f9588800764bd7b0 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/TextSharding.py @@ -0,0 +1,327 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict +from itertools import islice + +import multiprocessing +import statistics + +class Sharding: + def __init__(self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set): + assert len(input_files) > 0, 'The input file list must contain at least one file.' + assert n_training_shards > 0, 'There must be at least one output shard.' + assert n_test_shards > 0, 'There must be at least one output shard.' + + self.n_training_shards = n_training_shards + self.n_test_shards = n_test_shards + self.fraction_test_set = fraction_test_set + + self.input_files = input_files + + self.output_name_prefix = output_name_prefix + self.output_training_identifier = '_training' + self.output_test_identifier = '_test' + self.output_file_extension = '.txt' + + self.articles = {} # key: integer identifier, value: list of articles + self.sentences = {} # key: integer identifier, value: list of sentences + self.output_training_files = {} # key: filename, value: list of articles to go into file + self.output_test_files = {} # key: filename, value: list of articles to go into file + + self.init_output_files() + + + # Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines) + def load_articles(self): + print('Start: Loading Articles') + + global_article_count = 0 + for input_file in self.input_files: + print('input file:', input_file) + with open(input_file, mode='r', newline='\n') as f: + for i, line in enumerate(f): + if line.strip(): + self.articles[global_article_count] = line.rstrip() + global_article_count += 1 + + print('End: Loading Articles: There are', len(self.articles), 'articles.') + + + def segment_articles_into_sentences(self, segmenter): + print('Start: Sentence Segmentation') + if len(self.articles) is 0: + self.load_articles() + + assert len(self.articles) is not 0, 'Please check that input files are present and contain data.' + + # TODO: WIP: multiprocessing (create independent ranges and spawn processes) + use_multiprocessing = 'serial' + + def chunks(data, size=len(self.articles)): + it = iter(data) + for i in range(0, len(data), size): + yield {k: data[k] for k in islice(it, size)} + + if use_multiprocessing == 'manager': + manager = multiprocessing.Manager() + return_dict = manager.dict() + jobs = [] + n_processes = 7 # in addition to the main process, total = n_proc+1 + + def work(articles, return_dict): + sentences = {} + for i, article in enumerate(articles): + sentences[i] = segmenter.segment_string(articles[article]) + + if i % 5000 == 0: + print('Segmenting article', i) + + return_dict.update(sentences) + + for item in chunks(self.articles, len(self.articles)): + p = multiprocessing.Process(target=work, args=(item, return_dict)) + + # Busy wait + while len(jobs) >= n_processes: + pass + + jobs.append(p) + p.start() + + for proc in jobs: + proc.join() + + elif use_multiprocessing == 'queue': + work_queue = multiprocessing.Queue() + jobs = [] + + for item in chunks(self.articles, len(self.articles)): + pass + + else: # serial option + for i, article in enumerate(self.articles): + self.sentences[i] = segmenter.segment_string(self.articles[article]) + + if i % 5000 == 0: + print('Segmenting article', i) + + print('End: Sentence Segmentation') + + + def init_output_files(self): + print('Start: Init Output Files') + assert len(self.output_training_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' + assert len(self.output_test_files) is 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' + + for i in range(self.n_training_shards): + name = self.output_name_prefix + self.output_training_identifier + '_' + str(i) + self.output_file_extension + self.output_training_files[name] = [] + + for i in range(self.n_test_shards): + name = self.output_name_prefix + self.output_test_identifier + '_' + str(i) + self.output_file_extension + self.output_test_files[name] = [] + + print('End: Init Output Files') + + + def get_sentences_per_shard(self, shard): + result = 0 + for article_id in shard: + result += len(self.sentences[article_id]) + + return result + + + def distribute_articles_over_shards(self): + print('Start: Distribute Articles Over Shards') + assert len(self.articles) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.' + + # Create dictionary with - key: sentence count per article, value: article id number + sentence_counts = defaultdict(lambda: []) + + max_sentences = 0 + total_sentences = 0 + + for article_id in self.sentences: + current_length = len(self.sentences[article_id]) + sentence_counts[current_length].append(article_id) + max_sentences = max(max_sentences, current_length) + total_sentences += current_length + + n_sentences_assigned_to_training = int((1 - self.fraction_test_set) * total_sentences) + nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards + nominal_sentences_per_test_shard = (total_sentences - n_sentences_assigned_to_training) // self.n_test_shards + + consumed_article_set = set({}) + unused_article_set = set(self.articles.keys()) + + # Make first pass and add one article worth of lines per file + for file in self.output_training_files: + current_article_id = sentence_counts[max_sentences][-1] + sentence_counts[max_sentences].pop(-1) + self.output_training_files[file].append(current_article_id) + consumed_article_set.add(current_article_id) + unused_article_set.remove(current_article_id) + + # Maintain the max sentence count + while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: + max_sentences -= 1 + + if len(self.sentences[current_article_id]) > nominal_sentences_per_training_shard: + nominal_sentences_per_training_shard = len(self.sentences[current_article_id]) + print('Warning: A single article contains more than the nominal number of sentences per training shard.') + + for file in self.output_test_files: + current_article_id = sentence_counts[max_sentences][-1] + sentence_counts[max_sentences].pop(-1) + self.output_test_files[file].append(current_article_id) + consumed_article_set.add(current_article_id) + unused_article_set.remove(current_article_id) + + # Maintain the max sentence count + while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: + max_sentences -= 1 + + if len(self.sentences[current_article_id]) > nominal_sentences_per_test_shard: + nominal_sentences_per_test_shard = len(self.sentences[current_article_id]) + print('Warning: A single article contains more than the nominal number of sentences per test shard.') + + training_counts = [] + test_counts = [] + + for shard in self.output_training_files: + training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard])) + + for shard in self.output_test_files: + test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard])) + + training_median = statistics.median(training_counts) + test_median = statistics.median(test_counts) + + # Make subsequent passes over files to find articles to add without going over limit + history_remaining = [] + n_history_remaining = 4 + + while len(consumed_article_set) < len(self.articles): + for fidx, file in enumerate(self.output_training_files): + nominal_next_article_size = min(nominal_sentences_per_training_shard - training_counts[fidx], max_sentences) + + # Maintain the max sentence count + while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: + max_sentences -= 1 + + while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0: + nominal_next_article_size -= 1 + + if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or training_counts[fidx] > training_median: + continue # skip adding to this file, will come back later if no file can accept unused articles + + current_article_id = sentence_counts[nominal_next_article_size][-1] + sentence_counts[nominal_next_article_size].pop(-1) + + self.output_training_files[file].append(current_article_id) + consumed_article_set.add(current_article_id) + unused_article_set.remove(current_article_id) + + for fidx, file in enumerate(self.output_test_files): + nominal_next_article_size = min(nominal_sentences_per_test_shard - test_counts[fidx], max_sentences) + + # Maintain the max sentence count + while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: + max_sentences -= 1 + + while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0: + nominal_next_article_size -= 1 + + if nominal_next_article_size not in sentence_counts or nominal_next_article_size is 0 or test_counts[fidx] > test_median: + continue # skip adding to this file, will come back later if no file can accept unused articles + + current_article_id = sentence_counts[nominal_next_article_size][-1] + sentence_counts[nominal_next_article_size].pop(-1) + + self.output_test_files[file].append(current_article_id) + consumed_article_set.add(current_article_id) + unused_article_set.remove(current_article_id) + + # If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed + if len(history_remaining) == n_history_remaining: + history_remaining.pop(0) + history_remaining.append(len(unused_article_set)) + + history_same = True + for i in range(1, len(history_remaining)): + history_same = history_same and (history_remaining[i-1] == history_remaining[i]) + + if history_same: + nominal_sentences_per_training_shard += 1 + # nominal_sentences_per_test_shard += 1 + + training_counts = [] + test_counts = [] + for shard in self.output_training_files: + training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard])) + + for shard in self.output_test_files: + test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard])) + + training_median = statistics.median(training_counts) + test_median = statistics.median(test_counts) + + print('Distributing data over shards:', len(unused_article_set), 'articles remaining.') + + + if len(unused_article_set) != 0: + print('Warning: Some articles did not make it into output files.') + + + for shard in self.output_training_files: + print('Training shard:', self.get_sentences_per_shard(self.output_training_files[shard])) + + for shard in self.output_test_files: + print('Test shard:', self.get_sentences_per_shard(self.output_test_files[shard])) + + print('End: Distribute Articles Over Shards') + + + def write_shards_to_disk(self): + print('Start: Write Shards to Disk') + for shard in self.output_training_files: + self.write_single_shard(shard, self.output_training_files[shard]) + + for shard in self.output_test_files: + self.write_single_shard(shard, self.output_test_files[shard]) + + print('End: Write Shards to Disk') + + + def write_single_shard(self, shard_name, shard): + with open(shard_name, mode='w', newline='\n') as f: + for article_id in shard: + for line in self.sentences[article_id]: + f.write(line + '\n') + + f.write('\n') # Line break between articles + + +import nltk + +nltk.download('punkt') + +class NLTKSegmenter: + def __init(self): + pass + + def segment_string(self, article): + return nltk.tokenize.sent_tokenize(article) + diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/WikiDownloader.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/WikiDownloader.py new file mode 100644 index 0000000000000000000000000000000000000000..8262dda75f8c6e04e083ef8e413cdc5f3fd29dff --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/WikiDownloader.py @@ -0,0 +1,59 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import bz2 +import os +import urllib.request +import subprocess +import sys + +class WikiDownloader: + def __init__(self, language, save_path): + self.save_path = save_path + '/wikicorpus_' + language + + if not os.path.exists(self.save_path): + os.makedirs(self.save_path) + + self.language = language + self.download_urls = { + 'en' : 'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2', + 'zh' : 'https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2' + } + + self.output_files = { + 'en' : 'wikicorpus_en.xml.bz2', + 'zh' : 'wikicorpus_zh.xml.bz2' + } + + + def download(self): + if self.language in self.download_urls: + url = self.download_urls[self.language] + filename = self.output_files[self.language] + + print('Downloading:', url) + if os.path.isfile(self.save_path + '/' + filename): + print('** Download file already exists, skipping download') + else: + cmd = ['wget', url, '--output-document={}'.format(self.save_path + '/' + filename)] + print('Running:', cmd) + status = subprocess.run(cmd) + if status.returncode != 0: + raise RuntimeError('Wiki download not successful') + + # Always unzipping since this is relatively fast and will overwrite + print('Unzipping:', self.output_files[self.language]) + subprocess.run('bzip2 -dk ' + self.save_path + '/' + filename, shell=True, check=True) + + else: + assert False, 'WikiDownloader not implemented for this language yet.' \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/WikicorpusTextFormatting.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/WikicorpusTextFormatting.py new file mode 100644 index 0000000000000000000000000000000000000000..9d356b1360d6e9b4b0037afac1e35d8bb9688af8 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/WikicorpusTextFormatting.py @@ -0,0 +1,46 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os + +class WikicorpusTextFormatting: + def __init__(self, wiki_path, output_filename, recursive = False): + self.wiki_path = wiki_path + self.recursive = recursive + self.output_filename = output_filename + + + # This puts one article per line + def merge(self): + with open(self.output_filename, mode='w', newline='\n') as ofile: + for dirname in glob.glob(self.wiki_path + '/*/', recursive=False): + for filename in glob.glob(dirname + 'wiki_*', recursive=self.recursive): + print(filename) + article_lines = [] + article_open = False + + with open(filename, mode='r', newline='\n') as file: + for line in file: + if '' in line: + article_open = False + for oline in article_lines[1:]: + if oline != '\n': + ofile.write(oline.rstrip() + " ") + ofile.write("\n\n") + article_lines = [] + else: + if article_open: + article_lines.append(line) \ No newline at end of file diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/__init__.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..98386fd49e458f90ac933cabb86446934d8676bb --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/bertPrep.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/bertPrep.py new file mode 100644 index 0000000000000000000000000000000000000000..06ccf042050befdd73c45a4686d9266f728f4bb6 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/bertPrep.py @@ -0,0 +1,362 @@ +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import BookscorpusTextFormatting +import Downloader +import TextSharding +import WikicorpusTextFormatting + +import argparse +import itertools +import multiprocessing +import os +import pprint +import subprocess + + +def main(args): + working_dir = os.environ['BERT_PREP_WORKING_DIR'] + + print('Working Directory:', working_dir) + print('Action:', args.action) + print('Dataset Name:', args.dataset) + + if args.input_files: + args.input_files = args.input_files.split(',') + + hdf5_tfrecord_folder_prefix = "_lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \ + + "_max_pred_" + str(args.max_predictions_per_seq) + "_masked_lm_prob_" + str(args.masked_lm_prob) \ + + "_random_seed_" + str(args.random_seed) + "_dupe_factor_" + str(args.dupe_factor) + + directory_structure = { + 'download' : working_dir + '/download', # Downloaded and decompressed + 'extracted' : working_dir +'/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor) + 'formatted' : working_dir + '/formatted_one_article_per_line', # This is the level where all sources should look the same + 'sharded' : working_dir + '/sharded_' + "training_shards_" + str(args.n_training_shards) + "_test_shards_" + str(args.n_test_shards) + "_fraction_" + str(args.fraction_test_set), + 'tfrecord' : working_dir + '/tfrecord'+ hdf5_tfrecord_folder_prefix, + 'hdf5': working_dir + '/hdf5' + hdf5_tfrecord_folder_prefix + } + + print('\nDirectory Structure:') + pp = pprint.PrettyPrinter(indent=2) + pp.pprint(directory_structure) + print('') + + if args.action == 'download': + if not os.path.exists(directory_structure['download']): + os.makedirs(directory_structure['download']) + + downloader = Downloader.Downloader(args.dataset, directory_structure['download']) + downloader.download() + + elif args.action == 'text_formatting': + assert args.dataset != 'google_pretrained_weights' and args.dataset != 'nvidia_pretrained_weights' and args.dataset != 'squad' and args.dataset != 'mrpc', 'Cannot perform text_formatting on pretrained weights' + + if not os.path.exists(directory_structure['extracted']): + os.makedirs(directory_structure['extracted']) + + if not os.path.exists(directory_structure['formatted']): + os.makedirs(directory_structure['formatted']) + + if args.dataset == 'bookscorpus': + books_path = directory_structure['download'] + '/bookscorpus' + #books_path = directory_structure['download'] + output_filename = directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt' + books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting(books_path, output_filename, recursive=True) + books_formatter.merge() + + elif args.dataset == 'wikicorpus_en': + if args.skip_wikiextractor == 0: + path_to_wikiextractor_in_container = working_dir + '/wikiextractor/WikiExtractor.py' + wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset + print('WikiExtractor Command:', wikiextractor_command) + wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True) + #wikiextractor_process.communicate() + + wiki_path = directory_structure['extracted'] + '/wikicorpus_en' + output_filename = directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt' + wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True) + wiki_formatter.merge() + + elif args.dataset == 'wikicorpus_zh': + assert False, 'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be translated and properly segmented still, and should work once this step is added.' + if args.skip_wikiextractor == 0: + path_to_wikiextractor_in_container = working_dir + '/wikiextractor/WikiExtractor.py' + wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset + print('WikiExtractor Command:', wikiextractor_command) + wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True) + #wikiextractor_process.communicate() + + wiki_path = directory_structure['extracted'] + '/wikicorpus_zh' + output_filename = directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt' + wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True) + wiki_formatter.merge() + + assert os.stat(output_filename).st_size > 0, 'File glob did not pick up extracted wiki files from WikiExtractor.' + + elif args.action == 'sharding': + # Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces) + if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset: + if args.input_files is None: + if args.dataset == 'bookscorpus': + args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt'] + elif args.dataset == 'wikicorpus_en': + args.input_files = [directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'] + elif args.dataset == 'wikicorpus_zh': + args.input_files = [directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt'] + elif args.dataset == 'books_wiki_en_corpus': + args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt', directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'] + + output_file_prefix = directory_structure['sharded'] + '/' + args.dataset + '/' + args.dataset + + if not os.path.exists(directory_structure['sharded']): + os.makedirs(directory_structure['sharded']) + + if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset): + os.makedirs(directory_structure['sharded'] + '/' + args.dataset) + + # Segmentation is here because all datasets look the same in one article/book/whatever per line format, and + # it seemed unnecessarily complicated to add an additional preprocessing step to call just for this. + # Different languages (e.g., Chinese simplified/traditional) may require translation and + # other packages to be called from here -- just add a conditional branch for those extra steps + segmenter = TextSharding.NLTKSegmenter() + sharding = TextSharding.Sharding(args.input_files, output_file_prefix, args.n_training_shards, args.n_test_shards, args.fraction_test_set) + + sharding.load_articles() + sharding.segment_articles_into_sentences(segmenter) + sharding.distribute_articles_over_shards() + sharding.write_shards_to_disk() + + else: + assert False, 'Unsupported dataset for sharding' + + elif args.action == 'create_tfrecord_files': + assert False, 'TFrecord creation not supported in this PyTorch model example release.' \ + '' + if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset): + os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset) + + def create_record_worker(filename_prefix, shard_id, output_format='tfrecord', split='training'): + cmd_script_dir = os.path.dirname(os.path.normpath(working_dir)) + bert_preprocessing_command = 'python ' + cmd_script_dir + '/create_pretraining_data.py' + bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt' + bert_preprocessing_command += ' --output_file=' + directory_structure['tfrecord'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format + bert_preprocessing_command += ' --vocab_file=' + args.vocab_file + bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else '' + bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length) + bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq) + bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob) + bert_preprocessing_command += ' --random_seed=' + str(args.random_seed) + bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor) + bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True) + + last_process = bert_preprocessing_process + + # This could be better optimized (fine if all take equal time) + if shard_id % args.n_processes == 0 and shard_id > 0: + bert_preprocessing_process.wait() + return last_process + + output_file_prefix = args.dataset + + for i in range(args.n_training_shards): + last_process =create_record_worker(output_file_prefix + '_training', i) + + last_process.wait() + + for i in range(args.n_test_shards): + last_process = create_record_worker(output_file_prefix + '_test', i) + + last_process.wait() + + + elif args.action == 'create_hdf5_files': + last_process = None + + if not os.path.exists(directory_structure['hdf5'] + "/" + args.dataset): + os.makedirs(directory_structure['hdf5'] + "/" + args.dataset) + + def create_record_worker(filename_prefix, shard_id, output_format='hdf5'): + cmd_script_dir = os.path.dirname(os.path.normpath(working_dir)) + bert_preprocessing_command = 'python ' + cmd_script_dir + '/create_pretraining_data.py' + bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt' + bert_preprocessing_command += ' --output_file=' + directory_structure['hdf5'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format + bert_preprocessing_command += ' --vocab_file=' + args.vocab_file + bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else '' + bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length) + bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq) + bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob) + bert_preprocessing_command += ' --random_seed=' + str(args.random_seed) + bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor) + bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True) + + last_process = bert_preprocessing_process + + # This could be better optimized (fine if all take equal time) + if shard_id % args.n_processes == 0 and shard_id > 0: + bert_preprocessing_process.wait() + return last_process + + output_file_prefix = args.dataset + + for i in range(args.n_training_shards): + last_process = create_record_worker(output_file_prefix + '_training', i) + + last_process.wait() + + for i in range(args.n_test_shards): + last_process = create_record_worker(output_file_prefix + '_test', i) + + last_process.wait() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description='Preprocessing Application for Everything BERT-related' + ) + + parser.add_argument( + '--action', + type=str, + help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords', + choices={ + 'download', # Download and verify mdf5/sha sums + 'text_formatting', # Convert into a file that contains one article/book per line + 'sharding', # Convert previous formatted text into shards containing one sentence per line + 'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info + 'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info + } + ) + + parser.add_argument( + '--dataset', + type=str, + help='Specify the dataset to perform --action on', + choices={ + 'bookscorpus', + 'wikicorpus_en', + 'wikicorpus_zh', + 'books_wiki_en_corpus', + 'google_pretrained_weights', + 'all' + } + ) + + parser.add_argument( + '--input_files', + type=str, + help='Specify the input files in a comma-separated list (no spaces)' + ) + + parser.add_argument( + '--n_training_shards', + type=int, + help='Specify the number of training shards to generate', + default=256 + ) + + parser.add_argument( + '--n_test_shards', + type=int, + help='Specify the number of test shards to generate', + default=256 + ) + + parser.add_argument( + '--fraction_test_set', + type=float, + help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)', + default=0.1 + ) + + parser.add_argument( + '--segmentation_method', + type=str, + help='Specify your choice of sentence segmentation', + choices={ + 'nltk' + }, + default='nltk' + ) + + parser.add_argument( + '--n_processes', + type=int, + help='Specify the max number of processes to allow at one time', + default=4 + ) + + parser.add_argument( + '--random_seed', + type=int, + help='Specify the base seed to use for any random number generation', + default=12345 + ) + + parser.add_argument( + '--dupe_factor', + type=int, + help='Specify the duplication factor', + default=5 + ) + + parser.add_argument( + '--masked_lm_prob', + type=float, + help='Specify the probability for masked lm', + default=0.15 + ) + + parser.add_argument( + '--max_seq_length', + type=int, + help='Specify the maximum sequence length', + default=512 + ) + + parser.add_argument( + '--max_predictions_per_seq', + type=int, + help='Specify the maximum number of masked words per sequence', + default=20 + ) + + parser.add_argument( + '--do_lower_case', + type=int, + help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)', + default=1 + ) + + parser.add_argument( + '--vocab_file', + type=str, + help='Specify absolute path to vocab file to use)' + ) + + parser.add_argument( + '--skip_wikiextractor', + type=int, + help='Specify whether to skip wikiextractor step 0=False, 1=True', + default=0 + ) + + parser.add_argument( + '--interactive_json_config_generator', + type=str, + help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords' + ) + + args = parser.parse_args() + main(args) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/create_datasets_from_start.sh b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/create_datasets_from_start.sh new file mode 100644 index 0000000000000000000000000000000000000000..9afb1019a8471726822459af27c95c28d87385c4 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/create_datasets_from_start.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +to_download=${1:-"wiki_only"} + +#Download +data_dir=$(pwd) +BERT_PREP_WORKING_DIR="${data_dir}" +export BERT_PREP_WORKING_DIR="${BERT_PREP_WORKING_DIR}" +echo "Checkout WikiExtractor repository" +# checkout WikiExtractor scripts +git clone https://github.com/attardi/wikiextractor.git && cd wikiextractor && git checkout 6408a430fc504a38b04d37ce5e7fc740191dee16 && cd .. +# Download Wikipedia dataset and/or Bookscorpus dataset +echo "Download dataset ${to_download}" +if [ "$to_download" = "wiki_books" ] ; then + # checkout BookCorpus download scripts + git clone https://github.com/soskek/bookcorpus.git + $PYTHON ${data_dir}/bertPrep.py --action download --dataset bookscorpus +fi + +$PYTHON ${data_dir}/bertPrep.py --action download --dataset wikicorpus_en +echo "Download pretrained weights" +echo "${data_dir}" +$PYTHON ${data_dir}/bertPrep.py --action download --dataset google_pretrained_weights # Includes vocab + + +# Properly format the text files +if [ "$to_download" = "wiki_books" ] ; then + $PYTHON ${data_dir}/bertPrep.py --action text_formatting --dataset bookscorpus +fi +$PYTHON ${data_dir}/bertPrep.py --action text_formatting --dataset wikicorpus_en + +if [ "$to_download" = "wiki_books" ] ; then + DATASET="books_wiki_en_corpus" +else + DATASET="wikicorpus_en" + # Shard the text files +fi + +# Shard the text files +$PYTHON ${data_dir}/bertPrep.py --action sharding --dataset $DATASET + +# Create HDF5 files Phase 1 +$PYTHON ${data_dir}/bertPrep.py --action create_hdf5_files --dataset $DATASET --max_seq_length 128 \ +--max_predictions_per_seq 20 --vocab_file $BERT_PREP_WORKING_DIR/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt --do_lower_case 1 + +# Create HDF5 files Phase 2 +$PYTHON ${data_dir}/bertPrep.py --action create_hdf5_files --dataset $DATASET --max_seq_length 512 \ +--max_predictions_per_seq 80 --vocab_file $BERT_PREP_WORKING_DIR/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt --do_lower_case 1 diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/squad/squad_download.sh b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/squad/squad_download.sh new file mode 100644 index 0000000000000000000000000000000000000000..682d03cae45f96378f091d3e586fcded05f42376 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/data/squad/squad_download.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +############################################################################### +# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company +############################################################################### +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo "Downloading dataset for squad..." + +# Download SQuAD + +v1="v1.1" +mkdir $v1 +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json -O $v1/train-v1.1.json +wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -O $v1/dev-v1.1.json +wget https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/ -O $v1/evaluate-v1.1.py + +EXP_TRAIN_v1='981b29407e0affa3b1b156f72073b945 -' +EXP_DEV_v1='3e85deb501d4e538b6bc56f786231552 -' +EXP_EVAL_v1='afb04912d18ff20696f7f88eed49bea9 -' +CALC_TRAIN_v1=`cat ${v1}/train-v1.1.json |md5sum` +CALC_DEV_v1=`cat ${v1}/dev-v1.1.json |md5sum` +CALC_EVAL_v1=`cat ${v1}/evaluate-v1.1.py |md5sum` + + +echo "Squad data download done!" + +echo "Verifying Dataset...." + +if [ "$EXP_TRAIN_v1" != "$CALC_TRAIN_v1" ]; then + echo "train-v1.1.json is corrupted! md5sum doesn't match" +fi + +if [ "$EXP_DEV_v1" != "$CALC_DEV_v1" ]; then + echo "dev-v1.1.json is corrupted! md5sum doesn't match" +fi +if [ "$EXP_EVAL_v1" != "$CALC_EVAL_v1" ]; then + echo "evaluate-v1.1.py is corrupted! md5sum doesn't match" +fi + + +echo "Complete!" diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/file_utils.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/file_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cdefb125839dc6c69e15be74b46a525420ed2b12 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/file_utils.py @@ -0,0 +1,263 @@ +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Utilities for working with the local dataset cache. +This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp +Copyright by the AllenNLP authors. +""" + +from __future__ import (absolute_import, division, print_function, unicode_literals) + +import json +import logging +import os +import shutil +import tempfile +from functools import wraps +from hashlib import sha256 +import sys +from io import open + +import boto3 +import requests +from botocore.exceptions import ClientError +from tqdm import tqdm + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse + +try: + from pathlib import Path + PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', + Path.home() / '.pytorch_pretrained_bert')) +except AttributeError: + PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', + os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert')) + +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + + +def url_to_filename(url, etag=None): + """ + Convert `url` into a hashed filename in a repeatable way. + If `etag` is specified, append its hash to the url's, delimited + by a period. + """ + url_bytes = url.encode('utf-8') + url_hash = sha256(url_bytes) + filename = url_hash.hexdigest() + + if etag: + etag_bytes = etag.encode('utf-8') + etag_hash = sha256(etag_bytes) + filename += '.' + etag_hash.hexdigest() + + return filename + + +def filename_to_url(filename, cache_dir=None): + """ + Return the url and etag (which may be ``None``) stored for `filename`. + Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + cache_path = os.path.join(cache_dir, filename) + if not os.path.exists(cache_path): + raise EnvironmentError("file {} not found".format(cache_path)) + + meta_path = cache_path + '.json' + if not os.path.exists(meta_path): + raise EnvironmentError("file {} not found".format(meta_path)) + + with open(meta_path, encoding="utf-8") as meta_file: + metadata = json.load(meta_file) + url = metadata['url'] + etag = metadata['etag'] + + return url, etag + + +def cached_path(url_or_filename, cache_dir=None): + """ + Given something that might be a URL (or might be a local path), + determine which. If it's a URL, download the file and cache it, and + return the path to the cached file. If it's already a local path, + make sure the file exists and then return the path. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): + url_or_filename = str(url_or_filename) + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + parsed = urlparse(url_or_filename) + + if parsed.scheme in ('http', 'https', 's3'): + # URL, so get it from the cache (downloading if necessary) + return get_from_cache(url_or_filename, cache_dir) + elif os.path.exists(url_or_filename): + # File, and it exists. + return url_or_filename + elif parsed.scheme == '': + # File, but it doesn't exist. + raise EnvironmentError("file {} not found".format(url_or_filename)) + else: + # Something unknown + raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) + + +def split_s3_path(url): + """Split a full s3 path into the bucket name and path.""" + parsed = urlparse(url) + if not parsed.netloc or not parsed.path: + raise ValueError("bad s3 path {}".format(url)) + bucket_name = parsed.netloc + s3_path = parsed.path + # Remove '/' at beginning of path. + if s3_path.startswith("/"): + s3_path = s3_path[1:] + return bucket_name, s3_path + + +def s3_request(func): + """ + Wrapper function for s3 requests in order to create more helpful error + messages. + """ + + @wraps(func) + def wrapper(url, *args, **kwargs): + try: + return func(url, *args, **kwargs) + except ClientError as exc: + if int(exc.response["Error"]["Code"]) == 404: + raise EnvironmentError("file {} not found".format(url)) + else: + raise + + return wrapper + + +@s3_request +def s3_etag(url): + """Check ETag on S3 object.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_object = s3_resource.Object(bucket_name, s3_path) + return s3_object.e_tag + + +@s3_request +def s3_get(url, temp_file): + """Pull a file directly from S3.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) + + +def http_get(url, temp_file): + req = requests.get(url, stream=True) + content_length = req.headers.get('Content-Length') + total = int(content_length) if content_length is not None else None + progress = tqdm(unit="B", total=total) + for chunk in req.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + progress.update(len(chunk)) + temp_file.write(chunk) + progress.close() + + +def get_from_cache(url, cache_dir=None): + """ + Given a URL, look for the corresponding dataset in the local cache. + If it's not there, download it. Then return the path to the cached file. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + # Get eTag to add to filename, if it exists. + if url.startswith("s3://"): + etag = s3_etag(url) + else: + response = requests.head(url, allow_redirects=True) + if response.status_code != 200: + raise IOError("HEAD request failed for url {} with status code {}" + .format(url, response.status_code)) + etag = response.headers.get("ETag") + + filename = url_to_filename(url, etag) + + # get cache path to put the file + cache_path = os.path.join(cache_dir, filename) + + if not os.path.exists(cache_path): + # Download to temporary file, then copy to cache dir once finished. + # Otherwise you get corrupt cache entries if the download gets interrupted. + with tempfile.NamedTemporaryFile() as temp_file: + logger.info("%s not found in cache, downloading to %s", url, temp_file.name) + + # GET file object + if url.startswith("s3://"): + s3_get(url, temp_file) + else: + http_get(url, temp_file) + + # we are copying the file before closing it, so flush to avoid truncation + temp_file.flush() + # shutil.copyfileobj() starts at the current position, so go to the start + temp_file.seek(0) + + logger.info("copying %s to cache at %s", temp_file.name, cache_path) + with open(cache_path, 'wb') as cache_file: + shutil.copyfileobj(temp_file, cache_file) + + logger.info("creating metadata file for %s", cache_path) + meta = {'url': url, 'etag': etag} + meta_path = cache_path + '.json' + with open(meta_path, 'w', encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + logger.info("removing temp file %s", temp_file.name) + + return cache_path + + +def read_set_from_file(filename): + ''' + Extract a de-duped collection (set) of text from a file. + Expected file format is one item per line. + ''' + collection = set() + with open(filename, 'r', encoding='utf-8') as file_: + for line in file_: + collection.add(line.rstrip()) + return collection + + +def get_file_extension(path, dot=True, lower=True): + ext = os.path.splitext(path)[1] + ext = ext if dot else ext[1:] + return ext.lower() if lower else ext diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/lamb.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..adffb0512c928274be085c741ccda34704a20f11 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/lamb.py @@ -0,0 +1,202 @@ +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import torch +from torch.optim import Optimizer + + +class NVLAMB(Optimizer): + + """Implements a pure pytorch variant of FuseLAMB (NVLAMB variant) optimizer from apex.optimizers.FusedLAMB + reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py + + LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + NOT SUPPORTED now! (default: False) + adam_w_mode (boolean, optional): Apply L2 regularization or weight decay + True for decoupled weight decay(also known as AdamW) (default: True) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + set_grad_none (bool, optional): whether set grad to None when zero_grad() + method is called. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm + (default: 1.0) + use_nvlamb (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + + .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + def __init__(self, params, lr=1e-3, bias_correction=True, + betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01, + amsgrad=False, adam_w_mode=True, + grad_averaging=True, set_grad_none=True, + max_grad_norm=1.0, use_nvlamb=False,fused=False): + if amsgrad: + raise RuntimeError('NVLAMB does not support the AMSGrad variant.') + defaults = dict(lr=lr, bias_correction=bias_correction, + betas=betas, eps=eps, weight_decay=weight_decay, + grad_averaging=grad_averaging, + max_grad_norm=max_grad_norm) + super().__init__(params, defaults) + self.fused = fused + self.adam_w_mode = 1 if adam_w_mode else 0 # dummy for now, always use adam_w mode (wd is excluded from EMA) + self.set_grad_none = set_grad_none + self.use_nvlamb = use_nvlamb + + def zero_grad(self): + if self.set_grad_none: + for group in self.param_groups: + for p in group['params']: + p.grad = None + else: + super(NVLAMB, self).zero_grad() + + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + device = self.param_groups[0]["params"][0].device + + loss = None + if closure is not None: + loss = closure() + + global_grad_norm = torch.zeros(1, device=device) + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm_ = torch.sqrt(global_grad_norm) + max_grad_norm = self.defaults['max_grad_norm'] + + if global_grad_norm_ > max_grad_norm: + clip_global_grad_norm = global_grad_norm_ / max_grad_norm + else: + clip_global_grad_norm = 1.0 + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + if grad_averaging: + beta3 = 1 - beta1 + else: + beta3 = 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + step_size = group['lr'] + + if bias_correction: + bias_correction1 = 1 - beta1 ** group['step'] + bias_correction2 = 1 - beta2 ** group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data.div_(clip_global_grad_norm) + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg_, exp_avg_sq_ = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + # m_t + exp_avg_.mul_(beta1).add_(grad, alpha=beta3) + # v_t + exp_avg_sq_.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + # create clones to avoid modifying runner stats + exp_avg = exp_avg_.div(bias_correction1) + exp_avg_sq = exp_avg_sq_.div(bias_correction2) + + # || w_t || + weight_norm = p.data.norm(2.0) + # u_t + exp_avg_sq_sqrt = torch.sqrt(exp_avg_sq) + adam_step = exp_avg.div_(exp_avg_sq_sqrt.add_(group['eps'])) + if group['weight_decay'] != 0: + adam_step.add_(p.data, alpha=group['weight_decay']) + # || u_t || + adam_norm = adam_step.norm(2.0) + if (group['weight_decay'] != 0 or self.use_nvlamb) and adam_norm > 0 and weight_norm > 0: + trust_ratio = weight_norm / adam_norm + trust_ratio = trust_ratio.item() + else: + trust_ratio = 1 + + state['weight_norm'] = weight_norm + state['adam_norm'] = adam_norm + state['trust_ratio'] = trust_ratio + + #p.data.add_(adam_step, alpha=-step_size * trust_ratio) + alpha = -step_size * trust_ratio + adam_step2 = adam_step * alpha + p.data.add_(adam_step2) diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/modeling.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..21d0ba114b4beb32d889bb38529fd6e774051c98 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/modeling.py @@ -0,0 +1,1339 @@ +# coding=utf-8 +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""PyTorch BERT model.""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import copy +import json +import logging +import math +import os +import shutil +import tarfile +import tempfile +import sys +from io import open + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss +from torch.utils import checkpoint + +sys.path.append('/workspace/bert/') +from file_utils import cached_path + +from torch.nn import Module +from torch.nn.parameter import Parameter +import torch.nn.functional as F +import torch.nn.init as init + +logger = logging.getLogger(__name__) + +PRETRAINED_MODEL_ARCHIVE_MAP = { + 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", + 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", + 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", + 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", + 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", + 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", + 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", +} +CONFIG_NAME = 'bert_config.json' +WEIGHTS_NAME = 'pytorch_model.bin' +TF_WEIGHTS_NAME = 'model.ckpt' + +def load_tf_weights_in_bert(model, tf_checkpoint_path): + """ Load tf checkpoints in a pytorch model + """ + try: + import re + import numpy as np + import tensorflow as tf + except ImportError: + print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions.") + raise + tf_path = os.path.abspath(tf_checkpoint_path) + print("Converting TensorFlow checkpoint from {}".format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + print("Loading TF weight {} with shape {}".format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split('/') + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any(n in ["adam_v", "adam_m"] for n in name): + print("Skipping {}".format("/".join(name))) + continue + pointer = model + for m_name in name: + if re.fullmatch(r'[A-Za-z]+_\d+', m_name): + l = re.split(r'_(\d+)', m_name) + else: + l = [m_name] + if l[0] == 'kernel' or l[0] == 'gamma': + pointer = getattr(pointer, 'weight') + elif l[0] == 'output_bias' or l[0] == 'beta': + pointer = getattr(pointer, 'bias') + elif l[0] == 'output_weights': + pointer = getattr(pointer, 'weight') + else: + pointer = getattr(pointer, l[0]) + if len(l) >= 2: + num = int(l[1]) + pointer = pointer[num] + if m_name[-11:] == '_embeddings': + pointer = getattr(pointer, 'weight') + elif m_name == 'kernel': + array = np.ascontiguousarray(np.transpose(array)) + try: + assert pointer.shape == array.shape + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + print("Initialize PyTorch weight {}".format(name)) + pointer.data = torch.from_numpy(array) + return model + +def gelu(x): + if torch.cuda.is_available(): + return x * 0.5 * (1.0 + torch.erf(x / 1.41421)) + else: + return F.gelu(x) + +#used only for triton inference +def bias_gelu(bias, y): + x = bias + y + return x * 0.5 * (1.0 + torch.erf(x / 1.41421)) + +# used specifically for training since torch.nn.functional.gelu breaks ONNX export +def bias_gelu_training(bias, y): + x = bias + y + return torch.nn.functional.gelu(x) # Breaks ONNX export + +def bias_tanh(bias, y): + x = bias + y + return torch.tanh(x) + +def swish(x): + return x * torch.sigmoid(x) + +def tanh(x): + return torch.tanh(x) + +#torch.nn.functional.gelu(x) # Breaks ONNX export +ACT2FN = {"gelu": gelu, "bias_gelu": bias_gelu, "bias_tanh": bias_tanh, "relu": torch.nn.functional.relu, "swish": swish, "tanh": tanh} + +class LinearActivation(Module): + r"""Fused Linear and activation Module. + """ + __constants__ = ['bias'] + + def __init__(self, in_features, out_features, act='gelu', bias=True): + super(LinearActivation, self).__init__() + self.in_features = in_features + self.out_features = out_features + # setting act_fn to nn.Identity caused issues when re-assigning to gelu.Hence set to None + #self.act_fn = nn.Identity() # + self.act_fn = None # + self.biased_act_fn = None # + self.bias = None # + if isinstance(act, str) or (sys.version_info[0] == 2 and isinstance(act, unicode)): # For TorchScript + if bias and not 'bias' in act and torch.cuda.is_available(): # compatibility + act = 'bias_' + act # + self.biased_act_fn = ACT2FN[act] # + + else: + self.act_fn = ACT2FN[act] + else: + self.act_fn = act + self.weight = Parameter(torch.Tensor(out_features, in_features)) + if bias: + self.bias = Parameter(torch.Tensor(out_features)) + else: + self.register_parameter('bias', None) + self.reset_parameters() + + def reset_parameters(self): + init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + if self.bias is not None: + fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) + init.uniform_(self.bias, -bound, bound) + + def forward(self, input): + if not self.bias is None and torch.cuda.is_available(): + return self.biased_act_fn(self.bias, F.linear(input, self.weight, None)) + else: + return self.act_fn(F.linear(input, self.weight, self.bias)) + + def extra_repr(self): + return 'in_features={}, out_features={}, bias={}'.format( + self.in_features, self.out_features, self.bias is not None + ) + + +class BertConfig(object): + """Configuration class to store the configuration of a `BertModel`. + """ + def __init__(self, + vocab_size_or_config_json_file, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + output_all_encoded_layers=False): + """Constructs BertConfig. + + Args: + vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. + hidden_size: Size of the encoder layers and the pooler layer. + num_hidden_layers: Number of hidden layers in the Transformer encoder. + num_attention_heads: Number of attention heads for each attention layer in + the Transformer encoder. + intermediate_size: The size of the "intermediate" (i.e., feed-forward) + layer in the Transformer encoder. + hidden_act: The non-linear activation function (function or string) in the + encoder and pooler. If string, "gelu", "relu" and "swish" are supported. + hidden_dropout_prob: The dropout probabilitiy for all fully connected + layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob: The dropout ratio for the attention + probabilities. + max_position_embeddings: The maximum sequence length that this model might + ever be used with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + type_vocab_size: The vocabulary size of the `token_type_ids` passed into + `BertModel`. + initializer_range: The sttdev of the truncated_normal_initializer for + initializing all weight matrices. + """ + if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 + and isinstance(vocab_size_or_config_json_file, unicode)): + with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: + json_config = json.loads(reader.read()) + for key, value in json_config.items(): + self.__dict__[key] = value + elif isinstance(vocab_size_or_config_json_file, int): + self.vocab_size = vocab_size_or_config_json_file + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.output_all_encoded_layers = output_all_encoded_layers + else: + raise ValueError("First argument must be either a vocabulary size (int)" + "or the path to a pretrained model config file (str)") + + @classmethod + def from_dict(cls, json_object): + """Constructs a `BertConfig` from a Python dictionary of parameters.""" + config = BertConfig(vocab_size_or_config_json_file=-1) + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + """Constructs a `BertConfig` from a json file of parameters.""" + with open(json_file, "r", encoding='utf-8') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) + + def __repr__(self): + return str(self.to_json_string()) + + def to_dict(self): + """Serializes this instance to a Python dictionary.""" + output = copy.deepcopy(self.__dict__) + return output + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + +class BertNonFusedLayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-12): + """Construct a layernorm module in the TF style (epsilon inside the square root). + """ + super(BertNonFusedLayerNorm, self).__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) + self.variance_epsilon = eps + + def forward(self, x): + u = x.mean(-1, keepdim=True) + s = (x - u) + s = s * s + s = s.mean(-1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.variance_epsilon) + return self.weight * x + self.bias + +try: + import apex + #apex.amp.register_half_function(apex.normalization.fused_layer_norm, 'FusedLayerNorm') + import apex.normalization + from apex.normalization.fused_layer_norm import FusedLayerNormAffineFunction + #apex.amp.register_float_function(apex.normalization.FusedLayerNorm, 'forward') + #BertLayerNorm = apex.normalization.FusedLayerNorm + APEX_IS_AVAILABLE = True +except ImportError: + if torch.cuda.is_available(): + print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") + #BertLayerNorm = BertNonFusedLayerNorm + APEX_IS_AVAILABLE = False + +if torch.cuda.is_available(): + class BertLayerNorm(Module): + def __init__(self, hidden_size, eps=1e-12): + super(BertLayerNorm, self).__init__() + self.shape = torch.Size((hidden_size,)) + self.eps = eps + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) + self.apex_enabled = APEX_IS_AVAILABLE + + @torch.jit.unused + def fused_layer_norm(self, x): + return FusedLayerNormAffineFunction.apply( + x, self.weight, self.bias, self.shape, self.eps) + + + def forward(self, x): + if self.apex_enabled and not torch.jit.is_scripting(): + x = self.fused_layer_norm(x) + else: + u = x.mean(-1, keepdim=True) + s = (x - u) + s = s * s + s = s.mean(-1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight * x + self.bias + return x + +else: + BertLayerNorm = torch.nn.LayerNorm + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings. + """ + def __init__(self, config): + super(BertEmbeddings, self).__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids, token_type_ids, positions = None): + seq_length = input_ids.size(1) + if positions is not None: + position_ids = positions + else: + position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + + words_embeddings = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = words_embeddings + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config): + super(BertSelfAttention, self).__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = torch.reshape(x, new_x_shape) + return x.permute(0, 2, 1, 3) + + def transpose_key_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = torch.reshape(x, new_x_shape) + return x.permute(0, 2, 3, 1) + + def forward(self, hidden_states, attention_mask): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_key_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = F.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = torch.reshape(context_layer, new_context_layer_shape) + return context_layer + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super(BertSelfOutput, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config): + super(BertAttention, self).__init__() + self.self = BertSelfAttention(config) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, attention_mask): + self_output = self.self(input_tensor, attention_mask) + attention_output = self.output(self_output, input_tensor) + return attention_output + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super(BertIntermediate, self).__init__() + self.dense_act = LinearActivation(config.hidden_size, config.intermediate_size, act=config.hidden_act) + + def forward(self, hidden_states): + hidden_states = self.dense_act(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super(BertOutput, self).__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config): + super(BertLayer, self).__init__() + self.attention = BertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask): + attention_output = self.attention(hidden_states, attention_mask) + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + +class BertEncoder(nn.Module): + def __init__(self, config): + super(BertEncoder, self).__init__() + self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) + self.output_all_encoded_layers = config.output_all_encoded_layers + self._checkpoint_activations = False + + @torch.jit.unused + def checkpointed_forward(self, hidden_states, attention_mask): + def custom(start, end): + def custom_forward(*inputs): + layers = self.layer[start:end] + x_ = inputs[0] + for layer in layers: + x_ = layer(x_, inputs[1]) + return x_ + return custom_forward + + l = 0 + num_layers = len(self.layer) + chunk_length = math.ceil(math.sqrt(num_layers)) + while l < num_layers: + hidden_states = checkpoint.checkpoint(custom(l, l+chunk_length), hidden_states, attention_mask*1) + l += chunk_length + + return hidden_states + + def forward(self, hidden_states, attention_mask): + all_encoder_layers = [] + + if self._checkpoint_activations: + hidden_states = self.checkpointed_forward(hidden_states, attention_mask) + else: + for i,layer_module in enumerate(self.layer): + hidden_states = layer_module(hidden_states, attention_mask) + + if self.output_all_encoded_layers: + all_encoder_layers.append(hidden_states) + + if not self.output_all_encoded_layers or self._checkpoint_activations: + all_encoder_layers.append(hidden_states) + return all_encoder_layers + +def gather_indexes(sequence_tensor, positions): + """Gathers the vectors at the specific positions over a minibatch.""" + batch_size = sequence_tensor.shape[0] + seq_length = sequence_tensor.shape[1] + width = sequence_tensor.shape[2] + + flat_offsets = (torch.arange(batch_size, dtype=torch.long, device=sequence_tensor.device) * seq_length).unsqueeze(1) + flat_positions = (positions + flat_offsets).flatten() + flat_sequence_tensor = sequence_tensor.reshape(batch_size * seq_length, width) + output_tensor = flat_sequence_tensor[flat_positions] + return output_tensor.reshape(batch_size, -1, width) + +class BertPooler(nn.Module): + def __init__(self, config): + super(BertPooler, self).__init__() + self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh") + + def forward(self, hidden_states, next_sentence_positions = None): + if next_sentence_positions is not None: + selected_tokens = gather_indexes(hidden_states, next_sentence_positions) + else: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + selected_tokens = hidden_states[:, 0] + pooled_output = self.dense_act(selected_tokens) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super(BertPredictionHeadTransform, self).__init__() + self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + + def forward(self, hidden_states): + hidden_states = self.dense_act(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertLMPredictionHead, self).__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(bert_model_embedding_weights.size(1), + bert_model_embedding_weights.size(0), + bias=False) + self.decoder.weight = bert_model_embedding_weights + self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + self.bias + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertOnlyMLMHead, self).__init__() + self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertOnlyNSPHead(nn.Module): + def __init__(self, config): + super(BertOnlyNSPHead, self).__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class BertPreTrainingHeads(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertPreTrainingHeads, self).__init__() + self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class BertPreTrainedModel(nn.Module): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + def __init__(self, config, *inputs, **kwargs): + super(BertPreTrainedModel, self).__init__() + if not isinstance(config, BertConfig): + raise ValueError( + "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " + "To create a model from a Google pretrained model use " + "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( + self.__class__.__name__, self.__class__.__name__ + )) + self.config = config + + def init_bert_weights(self, module): + """ Initialize the weights. + """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, BertLayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + def checkpoint_activations(self, val): + def _apply_flag(module): + if hasattr(module, "_checkpoint_activations"): + module._checkpoint_activations=val + self.apply(_apply_flag) + def enable_apex(self, val): + def _apply_flag(module): + if hasattr(module, "apex_enabled"): + module.apex_enabled=val + self.apply(_apply_flag) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, + from_tf=False, *inputs, **kwargs): + """ + Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. + Download and cache the pre-trained model file if needed. + + Params: + pretrained_model_name_or_path: either: + - a str with the name of a pre-trained model to load selected in the list of: + . `bert-base-uncased` + . `bert-large-uncased` + . `bert-base-cased` + . `bert-large-cased` + . `bert-base-multilingual-uncased` + . `bert-base-multilingual-cased` + . `bert-base-chinese` + - a path or url to a pretrained model archive containing: + . `bert_config.json` a configuration file for the model + . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance + - a path or url to a pretrained model archive containing: + . `bert_config.json` a configuration file for the model + . `model.chkpt` a TensorFlow checkpoint + from_tf: should we load the weights from a locally saved TensorFlow checkpoint + cache_dir: an optional path to a folder in which the pre-trained models will be cached. + state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models + *inputs, **kwargs: additional input for the specific Bert class + (ex: num_labels for BertForSequenceClassification) + """ + if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: + archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] + else: + archive_file = pretrained_model_name_or_path + # redirect to the cache, if necessary + try: + resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) + except EnvironmentError: + logger.error( + "Model name '{}' was not found in model name list ({}). " + "We assumed '{}' was a path or url but couldn't find any file " + "associated to this path or url.".format( + pretrained_model_name_or_path, + ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), + archive_file)) + return None + if resolved_archive_file == archive_file: + logger.info("loading archive file {}".format(archive_file)) + else: + logger.info("loading archive file {} from cache at {}".format( + archive_file, resolved_archive_file)) + tempdir = None + if os.path.isdir(resolved_archive_file) or from_tf: + serialization_dir = resolved_archive_file + else: + # Extract archive to temp dir + tempdir = tempfile.mkdtemp() + logger.info("extracting archive file {} to temp dir {}".format( + resolved_archive_file, tempdir)) + with tarfile.open(resolved_archive_file, 'r:gz') as archive: + def is_within_directory(directory, target): + abs_directory = os.path.abspath(directory) + abs_target = os.path.abspath(target) + + prefix = os.path.commonprefix([abs_directory, abs_target]) + + return prefix == abs_directory + + def safe_extract(tar, path=".", members=None, *, numeric_owner=False): + + for member in tar.getmembers(): + member_path = os.path.join(path, member.name) + if not is_within_directory(path, member_path): + raise Exception("Attempted Path Traversal in Tar File") + + tar.extractall(path, members, numeric_owner=numeric_owner) + + + safe_extract(archive, tempdir) + serialization_dir = tempdir + # Load config + config_file = os.path.join(serialization_dir, CONFIG_NAME) + config = BertConfig.from_json_file(config_file) + logger.info("Model config {}".format(config)) + # Instantiate model. + model = cls(config, *inputs, **kwargs) + if state_dict is None and not from_tf: + weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) + state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None) + if tempdir: + # Clean up temp dir + shutil.rmtree(tempdir) + if from_tf: + # Directly load from a TensorFlow checkpoint + weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) + return load_tf_weights_in_bert(model, weights_path) + # Load from a PyTorch state_dict + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + new_key = None + if 'gamma' in key: + new_key = key.replace('gamma', 'weight') + if 'beta' in key: + new_key = key.replace('beta', 'bias') + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + module._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + start_prefix = '' + if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): + start_prefix = 'bert.' + load(model, prefix=start_prefix) + if len(missing_keys) > 0: + logger.info("Weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, missing_keys)) + if len(unexpected_keys) > 0: + logger.info("Weights from pretrained model not used in {}: {}".format( + model.__class__.__name__, unexpected_keys)) + if len(error_msgs) > 0: + raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( + model.__class__.__name__, "\n\t".join(error_msgs))) + return model + + +class BertModel(BertPreTrainedModel): + """BERT model ("Bidirectional Embedding Representations from a Transformer"). + + Params: + config: a BertConfig class instance with the configuration to build a new model + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + + Outputs: Tuple of (encoded_layers, pooled_output) + `encoded_layers`: controled by `output_all_encoded_layers` argument: + - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end + of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each + encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], + - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding + to the last attention block of shape [batch_size, sequence_length, hidden_size], + `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a + classifier pretrained on top of the hidden state associated to the first character of the + input (`CLS`) to train on the Next-Sentence task (see BERT's paper). + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = modeling.BertModel(config=config) + all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertModel, self).__init__(config) + self.embeddings = BertEmbeddings(config) + self.encoder = BertEncoder(config) + self.pooler = BertPooler(config) + self.apply(self.init_bert_weights) + self.output_all_encoded_layers = config.output_all_encoded_layers + + def forward(self, input_ids, token_type_ids, attention_mask, enable_packed_data_mode = False, positions = None, next_sentence_positions = None): + if enable_packed_data_mode: + extended_attention_mask = 0.0 + for i in range(3): + tmp = (attention_mask == i+1).type(torch.float32).unsqueeze(-1) + tmp = torch.matmul(tmp, torch.transpose(tmp, 1, 2)) + extended_attention_mask += tmp.unsqueeze(1) + else: + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.embeddings.word_embeddings.weight.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + embedding_output = self.embeddings(input_ids, token_type_ids, positions) + encoded_layers = self.encoder(embedding_output, extended_attention_mask) + sequence_output = encoded_layers[-1] + pooled_output = self.pooler(sequence_output, next_sentence_positions) + if not self.output_all_encoded_layers: + encoded_layers = encoded_layers[-1:] + return encoded_layers, pooled_output + + +class BertForPreTraining(BertPreTrainedModel): + """BERT model with pre-training heads. + This module comprises the BERT model followed by the two pre-training heads: + - the masked language modeling head, and + - the next sentence classification head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss + is only computed for the labels set in [0, ..., vocab_size] + `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] + with indices selected in [0, 1]. + 0 => next sentence is the continuation, 1 => next sentence is a random sentence. + + Outputs: + if `masked_lm_labels` and `next_sentence_label` are not `None`: + Outputs the total_loss which is the sum of the masked language modeling loss and the next + sentence classification loss. + if `masked_lm_labels` or `next_sentence_label` is `None`: + Outputs a tuple comprising + - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and + - the next sentence classification logits of shape [batch_size, 2]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForPreTraining(config) + masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertForPreTraining, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) + self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=-1) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids, attention_mask, masked_lm_labels=None, next_sentence_labels=None, enable_packed_data_mode = False, positions = None, next_sentence_positions = None): + encoded_layers, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, enable_packed_data_mode, positions, next_sentence_positions) + sequence_output = encoded_layers[-1] + prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) + return prediction_scores, seq_relationship_score + +class BertForMaskedLM(BertPreTrainedModel): + """BERT model with the masked language modeling head. + This module comprises the BERT model followed by the masked language modeling head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss + is only computed for the labels set in [0, ..., vocab_size] + + Outputs: + if `masked_lm_labels` is not `None`: + Outputs the masked language modeling loss. + if `masked_lm_labels` is `None`: + Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForMaskedLM(config) + masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertForMaskedLM, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None): + encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask) + sequence_output = encoded_layers[-1] + prediction_scores = self.cls(sequence_output) + + if masked_lm_labels is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + return masked_lm_loss + else: + return prediction_scores + + +class BertForNextSentencePrediction(BertPreTrainedModel): + """BERT model with next sentence prediction head. + This module comprises the BERT model followed by the next sentence classification head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] + with indices selected in [0, 1]. + 0 => next sentence is the continuation, 1 => next sentence is a random sentence. + + Outputs: + if `next_sentence_label` is not `None`: + Outputs the total_loss which is the sum of the masked language modeling loss and the next + sentence classification loss. + if `next_sentence_label` is `None`: + Outputs the next sentence classification logits of shape [batch_size, 2]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForNextSentencePrediction(config) + seq_relationship_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertForNextSentencePrediction, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertOnlyNSPHead(config) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None): + _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask) + seq_relationship_score = self.cls( pooled_output) + + if next_sentence_label is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + return next_sentence_loss + else: + return seq_relationship_score + + +class BertForSequenceClassification(BertPreTrainedModel): + """BERT model for classification. + This module is composed of the BERT model with a linear layer on top of + the pooled output. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_labels`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] + with indices selected in [0, ..., num_labels]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_labels = 2 + + model = BertForSequenceClassification(config, num_labels) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config, num_labels): + super(BertForSequenceClassification, self).__init__(config) + self.num_labels = num_labels + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, num_labels) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None): + _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask) + pooled_output = self.dropout(pooled_output) + return self.classifier(pooled_output) + + +class BertForMultipleChoice(BertPreTrainedModel): + """BERT model for multiple choice tasks. + This module is composed of the BERT model with a linear layer on top of + the pooled output. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_choices`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] + with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` + and type 1 corresponds to a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] + with indices selected in [0, ..., num_choices]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) + input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) + token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_choices = 2 + + model = BertForMultipleChoice(config, num_choices) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config, num_choices): + super(BertForMultipleChoice, self).__init__(config) + self.num_choices = num_choices + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): + flat_input_ids = input_ids.view(-1, input_ids.size(-1)) + flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) + flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) + _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask) + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, self.num_choices) + + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + return loss + else: + return reshaped_logits + + +class BertForTokenClassification(BertPreTrainedModel): + """BERT model for token-level classification. + This module is composed of the BERT model with a linear layer on top of + the full hidden state of the last layer. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_labels`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [0, ..., num_labels]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_labels = 2 + + model = BertForTokenClassification(config, num_labels) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config, num_labels): + super(BertForTokenClassification, self).__init__(config) + self.num_labels = num_labels + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, num_labels) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None): + encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask) + sequence_output = encoded_layers[-1] + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels)[active_loss] + active_labels = labels.view(-1)[active_loss] + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + return loss + else: + return logits + + +class BertForQuestionAnswering(BertPreTrainedModel): + """BERT model for Question Answering (span extraction). + This module is composed of the BERT model with a linear layer on top of + the sequence output that computes start_logits and end_logits + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + + Outputs: + Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end + position tokens of shape [batch_size, sequence_length]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForQuestionAnswering(config) + start_logits, end_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertForQuestionAnswering, self).__init__(config) + self.bert = BertModel(config) + # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version + # self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.qa_outputs = nn.Linear(config.hidden_size, 2) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids, attention_mask): + encoded_layers, _ = self.bert(input_ids, token_type_ids, attention_mask) + sequence_output = encoded_layers[-1] + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1) + end_logits = end_logits.squeeze(-1) + return start_logits, end_logits diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/ops_bf16_bert.txt b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/ops_bf16_bert.txt new file mode 100644 index 0000000000000000000000000000000000000000..62e87d000e101ebf03d819fd2a16f101f87fe2bb --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/ops_bf16_bert.txt @@ -0,0 +1,12 @@ +add +addmm +bmm +dropout +gelu +iadd +linear +layer_norm +matmul +mm +rsub +softmax diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/optimization.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..1f8a002c06c9e1c91c463e50aa90d29082d60f2e --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/optimization.py @@ -0,0 +1,168 @@ +############################################################################### +# Copyright (C) 2023 Habana Labs, Ltd. an Intel Company +############################################################################### +# coding=utf-8 +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""PyTorch optimization for BERT model.""" + +import math +import torch +from torch.optim import Optimizer +from torch.optim.optimizer import required +from torch.nn.utils import clip_grad_norm_ +from utils import is_main_process + + +def warmup_cosine(x, warmup=0.002): + if x < warmup: + return x/warmup + return 0.5 * (1.0 + torch.cos(math.pi * x)) + +def warmup_constant(x, warmup=0.002): + if x < warmup: + return x/warmup + return 1.0 + +def warmup_linear(x, warmup=0.002): + if x < warmup: + return x/warmup + return max((x - 1. )/ (warmup - 1.), 0.) + +def warmup_poly(x, warmup=0.002, degree=0.5): + if x < warmup: + return x/warmup + return (1.0 - x)**degree + + +SCHEDULES = { + 'warmup_cosine':warmup_cosine, + 'warmup_constant':warmup_constant, + 'warmup_linear':warmup_linear, + 'warmup_poly':warmup_poly, +} + +class BertAdam(Optimizer): + """Implements BERT version of Adam algorithm with weight decay fix. + Params: + lr: learning rate + warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 + t_total: total number of training steps for the learning + rate schedule, -1 means constant learning rate. Default: -1 + schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' + b1: Adams b1. Default: 0.9 + b2: Adams b2. Default: 0.999 + e: Adams epsilon. Default: 1e-6 + weight_decay: Weight decay. Default: 0.01 + max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 + """ + def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', + b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, + max_grad_norm=1.0): + if lr is not required and lr < 0.0: + raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) + if schedule not in SCHEDULES: + raise ValueError("Invalid schedule parameter: {}".format(schedule)) + if not 0.0 <= warmup < 1.0 and not warmup == -1: + raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) + if not 0.0 <= b1 < 1.0: + raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) + if not 0.0 <= b2 < 1.0: + raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) + if not e >= 0.0: + raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) + defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, + b1=b1, b2=b2, e=e, weight_decay=weight_decay, + max_grad_norm=max_grad_norm) + super(BertAdam, self).__init__(params, defaults) + + def get_lr(self): + lr = [] + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + if len(state) == 0: + return [0] + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) + else: + lr_scheduled = group['lr'] + lr.append(lr_scheduled) + return lr + + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['next_m'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['next_v'] = torch.zeros_like(p.data) + + next_m, next_v = state['next_m'], state['next_v'] + beta1, beta2 = group['b1'], group['b2'] + + # Add grad clipping + if group['max_grad_norm'] > 0: + clip_grad_norm_(p, group['max_grad_norm']) + + # Decay the first and second moment running average coefficient + # In-place operations to update the averages at the same time + next_m.mul_(beta1).add_(1 - beta1, grad) + next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) + update = next_m / (next_v.sqrt() + group['e']) + + # Just adding the square of the weights to the loss function is *not* + # the correct way of using L2 regularization/weight decay with Adam, + # since that will interact with the m and v parameters in strange ways. + # + # Instead we want to decay the weights in a manner that doesn't interact + # with the m/v parameters. This is equivalent to adding the square + # of the weights to the loss with plain (non-momentum) SGD. + if group['weight_decay'] > 0.0: + update += group['weight_decay'] * p.data + + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) + else: + lr_scheduled = group['lr'] + + update_with_lr = lr_scheduled * update + p.data.add_(-update_with_lr) + + state['step'] += 1 + + return loss diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/pack_pretraining_data_pytorch.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/pack_pretraining_data_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..88780c576aa520ced6c27112f1a10b245dfbcb47 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/pack_pretraining_data_pytorch.py @@ -0,0 +1,517 @@ +############################################################################### +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +############################################################################### +import os +import time +import argparse +import random +import h5py +from tqdm import tqdm, trange +import os +import numpy as np +import torch +from torch.utils.data import Dataset +from scipy import optimize +from itertools import repeat, chain +from functools import lru_cache, reduce +from collections import defaultdict, OrderedDict +from concurrent.futures import ProcessPoolExecutor +import gc +import json + +@lru_cache(maxsize=None) +def packing_strategies(start, previous, target, depth): + gap = target - start + strategies = [] + # Complete the packing with exactly 1 number + if depth == 1: + if gap >= previous: + strategies.append([gap]) + # Complete the sample in "depth" steps, recursively + else: + for new in range(previous, gap + 1): + new_gap = target - start - new + if new_gap == 0: + strategies.append([new]) + else: + options = packing_strategies(start + new, new, target, depth - 1) + for option in options: + if len(option) > 0: + strategies.append([new] + option) + return strategies + +def create_json_metadata( + seqeunces_dropped, + num_strategies_utilized, + new_number_of_samples, + original_number_of_samples, + compression_ratio, + expected_speedup, + theoretical_speedup, + avg_sequence_per_sample, + padding_tokens_packed_dataset, + padding_tokens_original_dataset, + packing_efficiency, + top_8_strategies): + # convert to json serrializable format + top_8_strategies = top_8_strategies.tolist() + packing_efficiency = float(packing_efficiency) + padding_tokens_original_dataset = int(padding_tokens_original_dataset) + padding_tokens_packed_dataset = float(padding_tokens_packed_dataset) + avg_sequence_per_sample = float(avg_sequence_per_sample) + theoretical_speedup = float(theoretical_speedup) + json_object = json.dumps( + {'number_of_sequences_dropped': seqeunces_dropped, + 'number_of_strategies_utilized': num_strategies_utilized, + 'new_number_of_samples': new_number_of_samples, + 'original_number_of_samples': original_number_of_samples, + 'compression_ratio': compression_ratio, + 'expected_speed_up': expected_speedup, + 'theoretical_speed_up': theoretical_speedup, + 'avg_seq_per_sample': avg_sequence_per_sample, + 'padding_tokens_packed_dataset': padding_tokens_packed_dataset, + 'padding_tokens_original_dataset': padding_tokens_original_dataset, + 'padding_tokens_original_dataset': padding_tokens_original_dataset, + 'packing_efficiency':packing_efficiency, + 'top_8_strategies':top_8_strategies}, + sort_keys=True, indent=2) + return json_object + +def get_packing_recipe(output_dir, sequence_lengths, max_sequence_length, max_sequences_per_pack=3): + # Histogram of sequence lengths + histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, max_sequence_length + 2)) + print("Begin packing pass".center(80, "_")) + print(f"Unpacked mean sequence length: {sequence_lengths.mean():3.2f}") + + # Make sure all strategies are recipes to pack to the correct sequence length + strategy_set = packing_strategies(0, 1, max_sequence_length, max_sequences_per_pack) + for strategy in strategy_set: + assert(sum(strategy) == max_sequence_length) + num_strategies = len(strategy_set) + print(f"Found {num_strategies} unique packing strategies.") + # Solve the packing equation A@mixture = histogram + A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32) + for i in range(num_strategies): + strategy = strategy_set[i] + for seq_len in strategy: + A[seq_len - 1, i] += 1 + # short sequences are inexpensive to add, so should have low residual weights + # to exactly minimize padding use w0 = np.arange(1, max_sequence_length + 1) + # in practice the difference is negligible, but this converges faster + padding_cutoff = 8 + w0 = np.ones([max_sequence_length]) + # w0 = np.linspace(1, max_sequence_length+1, max_sequence_length)/max_sequence_length # padding minimization weight + w0[:padding_cutoff] = padding_cutoff / (2 * max_sequence_length) + w0 = np.sqrt(w0) + # Starting values for the padding and the mixture + padding = np.zeros([max_sequence_length], dtype=np.int32) + mixture = np.zeros([num_strategies], dtype=np.int32) + b = histogram + padding + # Pack sequences as best as possible, then increase padding accordingly and repeat + for i in range(0, 20): + print(f"\nIteration: {i}: sequences still to pack: ", b.sum()) + start = time.time() + partial_mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * b) + print(f"Solving nnls took {time.time() - start:3.2f} seconds.") + print(f"Residual norm: {rnorm:3.5e}") + # Update mixture (round the floating point solution to integers) + partial_mixture = np.where(partial_mixture < 2, np.rint(partial_mixture), np.floor(partial_mixture)) + # If partial mixture is empty (due to rounding) we follow the gradient + # this usually happens when the number of examples is small i.e. ~100 + if partial_mixture.max() == 0: + grad = A.T @ (b * np.arange(1, max_sequence_length + 1)) + k = int(b.sum() // 2) + 1 + topk = np.argsort(-grad)[:k] + partial_mixture[topk] += 1 + # Update mixture + mixture = mixture + partial_mixture + # Compute the residuals + residual = b - A @ partial_mixture + print(f"Max residual: {abs(residual).max()}") + print(f"Residual on first 8 categories: {np.around(residual[:8], 4)}") + print(f"Residual on last 8 categories: {np.around(residual[-8:], 4)}") + # Add padding based on deficit (negative residual) + partial_padding = np.where(residual < 0, -residual, 0) + print(f"Added {(partial_padding*np.arange(1,max_sequence_length+1)).sum():3.2e} tokens of padding.") + padding = padding + partial_padding + # Update the rhs vector (remaining surplus sequences) + b = histogram + padding - A @ mixture + assert np.all(b >= 0), b + # Done iterating + if b.sum() < 100: + break + # Make sure there is no remainder + unpacked_seqlen = np.arange(1, max_sequence_length + 1)[b > 0] + # Update the mixture to also covered the unpacked sequences + for l in unpacked_seqlen: + # Get the depth 1 strategy + strategy = sorted([l, max_sequence_length - l]) + strategy_index = strategy_set.index(strategy) + mixture[strategy_index] += b[l-1] + b = histogram - A @ mixture + padding = np.where(b < 0, -b, 0) + b = histogram + padding - A @ mixture + assert b.sum() == 0 + # Analyze result + print("Done solving for packing order".center(80, "_")) + num_padding_tokens = (np.arange(1, max_sequence_length + 1) * padding).sum() + num_padding_tokens_original = (max_sequence_length - sequence_lengths).sum() + number_of_sequences_dropped = b.sum() + print(f"Number of sequences dropped: {number_of_sequences_dropped}") + number_of_strategies_utilized = np.count_nonzero(mixture) + print(f"Number of strategies utilized: {number_of_strategies_utilized}") + new_number_of_samples = int(mixture.sum()) + original_number_of_samples = len(sequence_lengths) + compression = 1 - new_number_of_samples / original_number_of_samples + print(f"New number of samples: {new_number_of_samples:3.2f}, original {original_number_of_samples}. A compression ratio of {compression:3.3f}") + expected_speedup_from_packing = 1 / (1 - compression) + print(f"The expected speed-up from packing: {expected_speedup_from_packing}") + upper_bound = 1.0 / (1 - ((1 - sequence_lengths / max_sequence_length).mean())) + print(f"Theoretical upper bound on speed-up: {upper_bound:3.3f}") + avg_sequences_per_sample = ((A.sum(0) * mixture).sum() - padding.sum()) / new_number_of_samples + print(f"Average sequences/sample {avg_sequences_per_sample:3.5f}") + print(f"Added {num_padding_tokens:3.2e} padding tokens. Original dataset used {num_padding_tokens_original:3.2e} padding tokens") + efficiency = (new_number_of_samples*max_sequence_length - num_padding_tokens)/(new_number_of_samples*max_sequence_length) + print(f"Packing efficiency (fraction of real tokens): {efficiency:3.4f}") + print(f"Top 8 strategies") + topK = np.argsort(-mixture)[:8] + for i in topK: + print(f"Strategy {strategy_set[i]} which is used {int(mixture[i])} times") + print("".center(80, "_")) + # Figure out the slicing that each strategy should use + slicing = np.zeros_like(A) + slicing[:, 1:] = np.cumsum(A * mixture, axis=1)[:, :-1] + slicing = slicing.T + mixture = mixture.astype(np.int64) + norm_path = os.path.normpath(output_dir) + head_tail = os.path.split(norm_path) + metadata_file_name = head_tail[1] + metadata_file_name = metadata_file_name + '_metadata.json' + metadata_file_path = os.path.join(head_tail[0],metadata_file_name) + print(f"Saving metadata to file: {metadata_file_path}") + with open(metadata_file_path,mode='w') as file_handle: + json_content = create_json_metadata(seqeunces_dropped=int(number_of_sequences_dropped), + num_strategies_utilized=number_of_strategies_utilized, + new_number_of_samples=new_number_of_samples, + original_number_of_samples=original_number_of_samples, + compression_ratio=compression, + expected_speedup=expected_speedup_from_packing, + theoretical_speedup=upper_bound, + avg_sequence_per_sample=avg_sequences_per_sample, + padding_tokens_original_dataset=num_padding_tokens_original, + padding_tokens_packed_dataset=num_padding_tokens, + packing_efficiency=efficiency, + top_8_strategies=topK) + file_handle.write(json_content) + return strategy_set, mixture, padding, slicing + + +def slice_examples_mult_stratagies_shuffle(examples_by_length, slicing, strategy_set, repeat_counts): + # Divide the work, firstly between the strategies and then into chunks of 50k + strategies_slices = defaultdict(list) + for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, repeat_counts): + if repeat_count == 0: + continue + # Slice out the sequences allocated to this strategy in increments of 50k + subcounts = (min(1, repeat_count - 1 * (i - 1)) for i in range(1, repeat_count + 1)) + for part_id, part_count in enumerate(subcounts): + for k, seq_len in enumerate(strategy): + slice_start = int(slice_offsets[seq_len - 1]) + slice_end = slice_start + int(part_count) + slice_offsets[seq_len - 1] = slice_end + strategies_slices[str(strategy)+'_'+str(seq_len)].append([slice_start,slice_end]) + + slices = [] + examples_batch = [] + slice_offsets=slicing[0] + total_num_samples=[len(examples_by_length[sl]) for sl in examples_by_length.keys()] + suffle_samples_ind=np.random.permutation(sum(repeat_counts)) + strategies = [[st]*rp for st,rp in zip(strategy_set,repeat_counts)] + strategies = list(chain.from_iterable(strategies)) + num_sample_per_slice=4480 + counter=0; count_samples=0 + + for ind in suffle_samples_ind: + strategy=strategies[ind] + if len(strategy) == 0: + continue + # Slice out the sequences allocated to this strategy in increments of 50k + counter+=1 + examples=[] + for k, seq_len in enumerate(strategy): + count_samples+=1 + [slice_start,slice_end]=strategies_slices[str(strategy)+'_'+str(seq_len)].pop() + examples.append(examples_by_length[seq_len][slice_start:slice_end][0]) + + examples_batch.append(examples) + if counter%num_sample_per_slice==0: + slices.append(examples_batch) + examples_batch=[] + assert sum(total_num_samples)==count_samples, "Possibly not using all samples" + examples_by_length = None + return slices + + +def parallel_pack_according_to_strategy(args, part_idx, examples): + # Pack the sequences according to the strategy and write them to disk + filename = os.path.join(args.output_dir, "mixed_strategies_part_%d.hdf5"%part_idx) + features = defaultdict(list) + for inst_index, multi_sequence in enumerate(examples): + features_packed = create_multi_sequence_example(multi_sequence, args.max_predictions_per_sequence, + args.max_sequence_length, args.max_sequences_per_pack) + #if features_packed['next_sentence_weights'].sum()>1: + # print(features_packed['next_sentence_weights'],filename) + features["input_ids"].append(features_packed["input_ids"]) + features["input_mask"].append(features_packed["input_mask"]) + features["segment_ids"].append(features_packed["segment_ids"]) + features["positions"].append(features_packed["positions"]) + features["masked_lm_positions"].append(features_packed["masked_lm_positions"]) + features["masked_lm_ids"].append(features_packed["masked_lm_ids"]) + features["next_sentence_positions"].append(features_packed["next_sentence_positions"]) + features["next_sentence_labels"].append(features_packed["next_sentence_labels"]) + features["next_sentence_weights"].append(features_packed["next_sentence_weights"]) + f= h5py.File(filename, 'w') + f.create_dataset("input_ids", data=np.array(features["input_ids"]), dtype='i4', compression='gzip') + f.create_dataset("input_mask", data=np.array(features["input_mask"]), dtype='i4', compression='gzip') + f.create_dataset("segment_ids", data=np.array(features["segment_ids"]), dtype='i1', compression='gzip') + f.create_dataset("positions", data=np.array(features["positions"]), dtype='i4', compression='gzip') + f.create_dataset("masked_lm_positions", data=np.array(features["masked_lm_positions"]), dtype='i4', compression='gzip') + f.create_dataset("masked_lm_ids", data=np.array(features["masked_lm_ids"]), dtype='i4', compression='gzip') + f.create_dataset("next_sentence_positions", data=np.array(features["next_sentence_positions"]), dtype='i4', compression='gzip') + f.create_dataset("next_sentence_labels", data=np.array(features["next_sentence_labels"]), dtype='i1', compression='gzip') + f.create_dataset("next_sentence_weights", data=np.array(features["next_sentence_weights"]), dtype='i4', compression='gzip') + f.flush() + f.close() + + +def create_multi_sequence_example(multi_sequence, max_predictions_per_sequence, max_sequence_length, max_sequences_per_pack): + # SEQ + packed_input_ids = np.zeros(max_sequence_length, dtype=np.int32) + packed_input_mask = np.zeros(max_sequence_length, dtype=np.int32) + packed_segment_ids = np.zeros(max_sequence_length, dtype=np.int32) + packed_positions = np.zeros(max_sequence_length, dtype=np.int32) + # MLM + # we are packing up to max_sequences_per_pack, each with a certain percentage of masked tokens + # in case that percentege is rounded up for all sequences in the pack, need to add an extra token for + # each sequence in the pack + packed_masked_lm_positions = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32) + packed_masked_lm_ids = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32) + #packed_masked_lm_weights = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32) + # NSP + packed_next_sentence_positions = np.zeros(max_sequences_per_pack, dtype=np.int32) + packed_next_sentence_labels = np.zeros(max_sequences_per_pack, dtype=np.int32) + packed_next_sentence_weights = np.zeros(max_sequences_per_pack, dtype=np.int32) + + offset = 0 + mlm_offset = 0 + sequence_index = 1 # used in the input mask + for sequence in multi_sequence: + # Padding sequences are donoted with None + if sequence is not None: + input_ids = np.array(sequence['input_ids']) + input_mask = np.array(sequence['input_mask']) + segment_ids = np.array(sequence['segment_ids']) + masked_lm_positions = np.array(sequence['masked_lm_positions']) + masked_lm_ids = np.array(sequence['masked_lm_ids']) + #masked_lm_weights = np.array(sequence['masked_lm_weights']) + next_sentence_labels = np.array(sequence['next_sentence_labels']) + + #input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights, next_sentence_labels = sequence + seq_len = input_mask.sum() + # SEQ + packed_input_ids[offset:offset + seq_len] = input_ids[:seq_len] + packed_input_mask[offset:offset + seq_len] = sequence_index + packed_segment_ids[offset:offset + seq_len] = segment_ids[:seq_len] + packed_positions[offset:offset + seq_len] = np.arange(0, seq_len) + # MLM + mlm_len= (masked_lm_ids!=0).sum() + #mlm_len = int(masked_lm_weights.sum()) + assert mlm_offset + mlm_len < max_predictions_per_sequence + max_sequences_per_pack, "Too many LM predictions per sequences" + max_mlm = mlm_offset + mlm_len + packed_masked_lm_positions[mlm_offset:max_mlm] = offset + masked_lm_positions[:mlm_len] + packed_masked_lm_ids[mlm_offset:max_mlm] = masked_lm_ids[:mlm_len] + #packed_masked_lm_weights[mlm_offset:max_mlm] = sequence_index + # NSP + packed_next_sentence_positions[sequence_index - 1] = offset + packed_next_sentence_labels[sequence_index - 1] = next_sentence_labels + packed_next_sentence_weights[sequence_index - 1] = 1 + # Update offsets + sequence_index += 1 + offset += seq_len + mlm_offset = max_mlm + input_ids = None; input_mask = None; segment_ids = None; masked_lm_positions = None; + masked_lm_ids = None; next_sentence_labels = None; seq_len = None + # Pack into tfrecord format: + + features = OrderedDict() + + features["input_ids"] = packed_input_ids + features["input_mask"] = packed_input_mask + features["segment_ids"] = packed_segment_ids + features["positions"] = packed_positions + features["masked_lm_positions"] = packed_masked_lm_positions + features["masked_lm_ids"] = packed_masked_lm_ids + features["next_sentence_positions"] = packed_next_sentence_positions + features["next_sentence_labels"] = packed_next_sentence_labels + features["next_sentence_weights"] = packed_next_sentence_weights + del packed_input_ids; del packed_input_mask; del packed_segment_ids; del packed_positions; del packed_masked_lm_positions; del packed_masked_lm_ids; + del packed_next_sentence_positions; del packed_next_sentence_labels; del packed_next_sentence_weights + + return features + +class pretraining_dataset(Dataset): + def __init__(self, input_file, max_pred_length): + self.input_file = input_file + self.max_pred_length = max_pred_length + f = h5py.File(input_file, "r") + keys = ['input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions', 'masked_lm_ids', + 'next_sentence_labels'] + self.keys_exist = list(f.keys()) + self.inputs = [np.asarray(f[key][:]) for key in keys] + self.len_dict={} + for key in keys: + self.len_dict[key] = np.asarray(f[key][:]).shape + + f.close() + + def __len__(self): + 'Denotes the total number of samples' + return len(self.inputs[0]) + + def __getitem__(self, index): + [input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids,next_sentence_labels] = [input[index] if indice < 5 else + np.asarray(input[index]) for indice, input in enumerate(self.inputs)] + + return [input_ids, input_mask, segment_ids,masked_lm_positions, masked_lm_ids, + next_sentence_labels] + +class WorkerInitObj(object): + def __init__(self, seed): + self.seed = seed + def __call__(self, id): + np.random.seed(seed=self.seed + id) + random.seed(self.seed + id) +def parse_arguments(): + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument("--input_dir", + default=None, + type=str, + required=True, + help="The input data dir. Should contain .hdf5 files for the task.") + parser.add_argument("--max_sequence_length", + default=512, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.") + parser.add_argument("--max_predictions_per_sequence", + default=80, + type=int, + help="The maximum total of masked tokens in input sequence") + parser.add_argument("--max_sequences_per_pack", + default=3, + type=int, + help="The maximum number of sequences to pack in multi-sequence") + parser.add_argument("--train_batch_size", + default=8, + type=int, + help="Total batch size for training.") + parser.add_argument("--output_dir", + default=None, + type=str, + required=True, + help="The output directory where the packed dataset will be written.") + parser.add_argument('--seed', + type=int, + default=42, + help="random seed for initialization") + parser.add_argument("--local_rank", + type=int, + default=os.getenv('LOCAL_RANK', -1), + help="local_rank for distributed training on gpus") + parser.add_argument('--disable_progress_bar', + default=False, + action='store_true', + help='Disable tqdm progress bar') + args = parser.parse_args() + return args + +def main(): + global timeout_sent + args = parse_arguments() + random.seed(args.seed + args.local_rank) + np.random.seed(args.seed + args.local_rank) + torch.manual_seed(args.seed + args.local_rank) + torch.cuda.manual_seed(args.seed + args.local_rank) + worker_init = WorkerInitObj(args.seed + args.local_rank) + + device = torch.device("cpu") + print("args.max_sequence_length={}, args.max_sequences_per_pack={},args.max_predictions_per_sequence={}".format(args.max_sequence_length, args.max_sequences_per_pack,args.max_predictions_per_sequence)) + + files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if + os.path.isfile(os.path.join(args.input_dir, f)) and 'training' in f] + print("files={}".format(files)) + sequence_lengths = [] + examples_by_length = defaultdict(list) + print("Looping through dataset to collect sequence length information...") + + for f_id in range(len(files)): + #single card + data_file = files[f_id] + print("-- loading data_file={}".format(data_file)) + train_data = pretraining_dataset(data_file, args.max_predictions_per_sequence) + for step, batch in enumerate(train_data): + input_ids, input_mask, segment_ids,masked_lm_positions, masked_lm_ids, next_sentence_labels = batch + features = OrderedDict() + features["input_ids"] = input_ids + features["input_mask"] = input_mask + features["segment_ids"] = segment_ids + features["masked_lm_positions"] = masked_lm_positions + features["masked_lm_ids"] = masked_lm_ids + #features["masked_lm_weights"] = masked_lm_weights + features["next_sentence_labels"] = next_sentence_labels + im_length = sum(input_mask) + examples_by_length[im_length].append(features) + sequence_lengths.append(im_length) + sequence_lengths = np.array(sequence_lengths) + # Pass the array of sequence lengths to the packing algorithm + + strategy_set, mixture, padding, slicing = get_packing_recipe(args.output_dir, sequence_lengths, args.max_sequence_length, args.max_sequences_per_pack) + + # Add the calculated padding + for i in range(1, args.max_sequence_length + 1): + if i not in examples_by_length.keys(): + examples_by_length[i]=[] + examples_by_length[i].extend([None] * int(padding[i - 1])) + # Shuffle the data + for key in examples_by_length: + random.shuffle(examples_by_length[key]) + # Pack and store the data + print(f"\nPacking and writing packed dataset to {args.output_dir}.") + # Slice the data into chunks of max 50k packed examples + example_slices = slice_examples_mult_stratagies_shuffle(examples_by_length, slicing, strategy_set, mixture) + part_idx = [i for i in range(len(example_slices))] + gc.collect() + print('Done slice_examples !!!') + del examples_by_length; del slicing; del strategy_set; del mixture + gc.collect() + start = time.time() + print(f"Splitting work into {len(part_idx)} parts.") + + split_write_sessions_size = 1000 + for rr in range(1+len(example_slices)//split_write_sessions_size): + print(rr,'out of',1+len(example_slices)//split_write_sessions_size) + str_idx,stp_idx=rr*split_write_sessions_size,min((rr+1)*split_write_sessions_size,len(example_slices)) + example_slices_prt,part_idx_prt = example_slices[str_idx:stp_idx], part_idx[str_idx:stp_idx] + with ProcessPoolExecutor(50) as executor: + work = repeat(args), part_idx_prt, example_slices_prt + for partial_result in executor.map(parallel_pack_according_to_strategy, *work): + pass + print('------') + del work + print(f"\nDone. Took: {time.time() - start:3.2f} seconds to pack and write dataset.") + print('-------------',str_idx,stp_idx) + print('Done Cleaning') +if __name__ == "__main__": + main() diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/pytorch_packed_data_checker.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/pytorch_packed_data_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..08454e47994c68a2d2970ea75630633a42457d5f --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/pytorch_packed_data_checker.py @@ -0,0 +1,99 @@ +############################################################################### +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +############################################################################### +import os +import time +import argparse +import random +import h5py +from tqdm import tqdm, trange +import os +import numpy as np +import torch +from torch.utils.data import Dataset +from functools import lru_cache, reduce +from itertools import repeat, chain +from collections import defaultdict +from concurrent.futures import ProcessPoolExecutor + +class pretraining_dataset(Dataset): + + def __init__(self, input_file, max_pred_length): + self.input_file = input_file + self.max_pred_length = max_pred_length + f = h5py.File(input_file, "r") + keys = ['input_ids', 'input_mask', 'segment_ids', 'positions', + 'masked_lm_positions', 'masked_lm_ids', + 'next_sentence_positions', 'next_sentence_labels', 'next_sentence_weights'] + self.inputs = [np.asarray(f[key][:]) for key in keys] + f.close() + + def __len__(self): + 'Denotes the total number of samples' + return len(self.inputs[0]) + + def __getitem__(self, index): + + [input_ids, input_mask, segment_ids, positions, + masked_lm_positions, masked_lm_ids, + next_sentence_positions, next_sentence_labels, next_sentence_weights] = [torch.from_numpy(input[index].astype(np.int64)) for input in self.inputs] + + masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -1 + index = self.max_pred_length + # store number of masked tokens in index + padded_mask_indices = (masked_lm_positions == 0).nonzero() + if len(padded_mask_indices) != 0: + index = padded_mask_indices[0].item() + masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index] + + next_sentence_labels = (next_sentence_weights == 1) * next_sentence_labels + (next_sentence_weights == 0) * -1 + return [input_ids, segment_ids, input_mask, positions, masked_lm_labels, next_sentence_positions, next_sentence_labels] + +class WorkerInitObj(object): + def __init__(self, seed): + self.seed = seed + def __call__(self, id): + np.random.seed(seed=self.seed + id) + random.seed(self.seed + id) +def parse_arguments(): + parser = argparse.ArgumentParser() + ## Required parameters + parser.add_argument("--input_dir", + default=None, + type=str, + required=True, + help="The input data dir. Should contain .hdf5 files for the task.") + parser.add_argument("--max_predictions_per_sequence", + default=80, + type=int, + help="The maximum total of masked tokens in input sequence") + + args = parser.parse_args() + return args + +def parallel_data_loader(max_predictions_per_sequence,data_file): + train_data = pretraining_dataset(data_file, max_predictions_per_sequence) + try: + for step, batch in enumerate(train_data): + input_ids, segment_ids, input_mask, positions, masked_lm_labels, next_sentence_positions, next_sentence_labels = batch + print(data_file) + return True + except: + print('Issue with file: %s'%data_file) + return False +def main(): + global timeout_sent + args = parse_arguments() + + files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if + os.path.isfile(os.path.join(args.input_dir, f))] + num_files=len(files) + print(" num files is: %d"%(num_files)) + print("Looping through dataset to collect sequence length information...") + with ProcessPoolExecutor(50) as executor: + work = repeat(args.max_predictions_per_sequence), files + for pass_fail in executor.map(parallel_data_loader, *work): + pass + print('Done') +if __name__ == "__main__": + main() diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/requirements.txt b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..dd42778ed7e03193946eb14d196eb9d6914955ac --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/requirements.txt @@ -0,0 +1,12 @@ +# Accessing files from S3 directly. +boto3==1.26.75 +# Used for downloading models over HTTP +ipdb==0.13.9 +#Data processing +h5py==3.9.0 +html2text==2020.1.16 +nltk>=3.7.0 +progressbar==2.5 +#Others +onnxruntime==1.14.0 +git+https://github.com/NVIDIA/dllogger.git@26a0f8f1958de2c0c460925ff6102a4d2486d6cc diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/run_pretraining.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/run_pretraining.py new file mode 100644 index 0000000000000000000000000000000000000000..eb820b0e386d4e7de460d9a6a94d58343509ee12 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/run_pretraining.py @@ -0,0 +1,1049 @@ +# coding=utf-8 +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""BERT finetuning runner.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# ================== +import time +import argparse +import random +from builtins import ValueError + +import h5py +from tqdm import tqdm +import os +import numpy as np +import torch +from enum import Enum +from torch.utils.data import DataLoader, RandomSampler, Dataset +from torch.utils.tensorboard import SummaryWriter +from torch.distributed.optim import ZeroRedundancyOptimizer +import sys +import json +import warnings + +import modeling +from schedulers import PolyWarmUpScheduler + +from utils import is_main_process, format_step, get_world_size, get_rank, mkdir, repair_checkpoint +from schedulers import LinearWarmUpScheduler + +try: + from apex import amp + from apex.optimizers import FusedLAMB + from apex.parallel import DistributedDataParallel as DDP + from apex.parallel.distributed import flat_dist_call + import amp_C + import apex_C + from apex.amp import _amp_state +except ImportError: + if torch.cuda.is_available(): + raise ImportError("Please install apex from " + "https://www.github.com/nvidia/apex") + else: + from torch.nn.parallel import DistributedDataParallel as DDP + +from lamb import NVLAMB + +import dllogger +from concurrent.futures import ProcessPoolExecutor + +torch._C._jit_set_profiling_mode(False) +torch._C._jit_set_profiling_executor(False) + +skipped_steps = 0 +avg_seq_per_pack = 1.0 + +# Track whether a SIGTERM (cluster time up) has been handled +timeout_sent = False + +import signal +# handle SIGTERM sent from the scheduler and mark so we +# can gracefully save & exit +def signal_handler(sig, frame): + global timeout_sent + timeout_sent = True + +signal.signal(signal.SIGTERM, signal_handler) + +#Workaround because python functions are not picklable +class WorkerInitObj(object): + def __init__(self, seed): + self.seed = seed + def __call__(self, id): + np.random.seed(seed=self.seed + id) + random.seed(self.seed + id) + + +def create_pretraining_dataset(input_file, max_pred_length, shared_list, args, worker_init): + num_workers = 0 if args.use_habana else 4 + train_data = pretraining_dataset(input_file=input_file, max_pred_length=max_pred_length, enable_packed_data_mode=args.enable_packed_data_mode) + train_sampler = RandomSampler(train_data) + train_dataloader = DataLoader(train_data, sampler=train_sampler, + batch_size=args.train_batch_size * args.n_pu, + num_workers=num_workers, worker_init_fn=worker_init, + drop_last=True, pin_memory=True) + return train_dataloader, input_file + +class pretraining_dataset(Dataset): + + def __init__(self, input_file, max_pred_length, enable_packed_data_mode:bool=False): + self.input_file = input_file + self.max_pred_length = max_pred_length + f = h5py.File(input_file, "r") + if enable_packed_data_mode: + keys = ['input_ids', 'input_mask', 'segment_ids', 'positions', + 'masked_lm_positions', 'masked_lm_ids', + 'next_sentence_positions', 'next_sentence_labels', 'next_sentence_weights'] + else: + keys = ['input_ids', 'input_mask', 'segment_ids', + 'masked_lm_positions', 'masked_lm_ids', + 'next_sentence_labels'] + self.inputs = [np.asarray(f[key][:]) for key in keys] + f.close() + self.enable_packed_data_mode = enable_packed_data_mode + + def __len__(self): + 'Denotes the total number of samples' + return len(self.inputs[0]) + + def __getitem__(self, index): + if self.enable_packed_data_mode: + [input_ids, input_mask, segment_ids, positions, + masked_lm_positions, masked_lm_ids, + next_sentence_positions, next_sentence_labels, next_sentence_weights] = [torch.from_numpy(input[index].astype(np.int64)) for input in self.inputs] + else: + [input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [torch.from_numpy(input[index].astype(np.int64)) if indice < 5 else torch.from_numpy(np.asarray(input[index].astype(np.int64))) for indice, input in enumerate(self.inputs)] + + masked_lm_labels = torch.ones(input_ids.shape, dtype=torch.long) * -1 + index = self.max_pred_length + # store number of masked tokens in index + padded_mask_indices = (masked_lm_positions == 0).nonzero() + if len(padded_mask_indices) != 0: + index = padded_mask_indices[0].item() + masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index] + + if self.enable_packed_data_mode: + next_sentence_labels = (next_sentence_weights == 1) * next_sentence_labels + (next_sentence_weights == 0) * -1 + return [input_ids, segment_ids, input_mask, positions, masked_lm_labels, next_sentence_positions, next_sentence_labels] + else: + return [input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels] + + +class BertPretrainingCriterion(torch.nn.Module): + def __init__(self, vocab_size): + super(BertPretrainingCriterion, self).__init__() + self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=-1) + self.vocab_size = vocab_size + + def forward(self, prediction_scores, seq_relationship_score, masked_lm_labels, next_sentence_labels): + masked_lm_loss = self.loss_fn(prediction_scores.view(-1, self.vocab_size), masked_lm_labels.view(-1)) + next_sentence_loss = self.loss_fn(seq_relationship_score.view(-1, 2), next_sentence_labels.view(-1)) + total_loss = masked_lm_loss + next_sentence_loss + return total_loss + + +def parse_arguments(): + + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--input_dir", + default=None, + type=str, + required=True, + help="The input data dir. Should contain .hdf5 files for the task.") + + parser.add_argument("--config_file", + default=None, + type=str, + required=True, + help="The BERT model config") + + parser.add_argument("--bert_model", default="bert-large-uncased", type=str, + help="Bert pre-trained model selected in the list: bert-base-uncased, " + "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") + + parser.add_argument("--output_dir", + default=None, + type=str, + required=True, + help="The output directory where the model checkpoints will be written.") + + ## Other parameters + parser.add_argument("--init_checkpoint", + default=None, + type=str, + help="The initial checkpoint to start training from.") + + parser.add_argument("--max_seq_length", + default=512, + type=int, + help="The maximum total input sequence length after WordPiece tokenization. \n" + "Sequences longer than this will be truncated, and sequences shorter \n" + "than this will be padded.") + parser.add_argument("--max_predictions_per_seq", + default=80, + type=int, + help="The maximum total of masked tokens in input sequence") + parser.add_argument("--train_batch_size", + default=32, + type=int, + help="Total batch size for training.") + parser.add_argument("--learning_rate", + default=5e-5, + type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--num_train_epochs", + default=3.0, + type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", + default=1000, + type=float, + help="Total number of training steps to perform.") + parser.add_argument("--warmup_proportion", + default=0.01, + type=float, + help="Proportion of training to perform linear learning rate warmup for. " + "E.g., 0.1 = 10%% of training.") + parser.add_argument("--local_rank", + type=int, + default=os.getenv('LOCAL_RANK', -1), + help="local_rank for distributed training on gpus") + parser.add_argument('--seed', + type=int, + default=42, + help="random seed for initialization") + parser.add_argument('--gradient_accumulation_steps', + type=int, + default=1, + help="Number of updates steps to accumualte before performing a backward/update pass.") + parser.add_argument('--fp16', + default=False, + action='store_true', + help="Mixed precision training") + parser.add_argument('--amp', + default=False, + action='store_true', + help="Mixed precision training") + parser.add_argument('--loss_scale', + type=float, default=0.0, + help='Loss scaling, positive power of 2 values can improve fp16 convergence.') + parser.add_argument('--log_freq', + type=float, default=1.0, + help='frequency of logging loss.') + parser.add_argument('--checkpoint_activations', + default=False, + action='store_true', + help="Whether to use gradient checkpointing") + parser.add_argument("--resume_from_checkpoint", + default=False, + action='store_true', + help="Whether to resume training from checkpoint.") + parser.add_argument('--resume_step', + type=int, + default=-1, + help="Step to resume training from.") + parser.add_argument('--num_steps_per_checkpoint', + type=int, + default=100, + help="Number of update steps until a model checkpoint is saved to disk.") + parser.add_argument('--skip_checkpoint', + default=False, + action='store_true', + help="Whether to save checkpoints") + parser.add_argument('--phase2', + default=False, + action='store_true', + help="Whether to train with seq len 512") + parser.add_argument('--allreduce_post_accumulation', + default=False, + action='store_true', + help="Whether to do allreduces during gradient accumulation steps.") + parser.add_argument('--allreduce_post_accumulation_fp16', + default=False, + action='store_true', + help="Whether to do fp16 allreduce post accumulation.") + parser.add_argument('--phase1_end_step', + type=int, + default=7038, + help="Number of training steps in Phase1 - seq len 128") + parser.add_argument('--init_loss_scale', + type=int, + default=2**20, + help="Initial loss scaler value") + parser.add_argument("--do_train", + default=False, + action='store_true', + help="Whether to run training.") + parser.add_argument('--json-summary', type=str, default="results/dllogger.json", + help='If provided, the json summary will be written to' + 'the specified file.') + parser.add_argument("--use_env", + action='store_true', + help="Whether to read local rank from ENVVAR") + parser.add_argument('--disable_progress_bar', + default=False, + action='store_true', + help='Disable tqdm progress bar') + parser.add_argument('--steps_this_run', type=int, default=-1, + help='If provided, only run this many steps before exiting') + parser.add_argument("--no_cuda", + action='store_true', + help="Whether to use CPU when available") + parser.add_argument("--use_habana", + action="store_true", + help="Whether not to use Habana device when available") + mixed_precision_group = parser.add_mutually_exclusive_group() + mixed_precision_group.add_argument('--autocast', + dest='use_autocast', + action='store_true', + help='enable autocast mode') + parser.add_argument("--use_fused_lamb", + action='store_true', + help='use FusedLamb optimizer') + parser.add_argument("--use_lazy_mode", + default='True', type=lambda x: x.lower() == 'true', + help='[DEPRECATED] Do not use, it has no effect anymore. Instead, set env variable PT_HPU_LAZY_MODE to 1') + parser.add_argument('--enable_packed_data_mode', default='True', type=lambda x: x.lower() == 'true', + help='enable/disable training with packed data. Default is True, --input_dir should be set accordingly') + parser.add_argument("--use_zero_optimizer", + default='False', type=lambda x: x.lower() == 'true', + help='use zero optimizer') + parser.add_argument("--profile", + default=False, + type=bool, + help='enable/disable pytorch profiler') + parser.add_argument("--profile_steps", + default='0', + help='warmup and active steps when to take profiler. Syntax is x:y where x is warmup steps and y is number of steps for which the profiler will be active') + parser.add_argument("--tensorboard_logdir", + default='', + help='profiler logging dir') + parser.add_argument('--use-hpu-graphs', + type=bool, default=False, + help="Use HPU Graphs") + parser.add_argument("--use_torch_compile", + dest="use_torch_compile", + help="Use torch.compile feature to run the model", + action="store_true") + parser.add_argument('--compiled_autograd', action='store_true', help='[EXPERIMENTAL] Enable compiled_autograd for hpu') + parser.add_argument('--log_memory_usage', default=False, + help='log memory usage') + parser.add_argument('--enable-tensorboard-logging', action='store_true', + help='enable logging using tensorboard things such as accuracy, loss or performance (img/s)') + + args = parser.parse_args() + args.fp16 = args.fp16 or args.amp + + if args.steps_this_run < 0: + args.steps_this_run = args.max_steps + + return args + +def create_rank_dir(dir): + worker_output_dir = f"{dir}/worker_{get_rank()}" + mkdir(worker_output_dir) + return worker_output_dir + +def unflatten_tensor(flat, tensor_list): + outputs = [] + offset = 0 + for tensor in tensor_list: + numel = tensor.numel() + outputs.append(flat.narrow(0, offset, numel).view_as(tensor)) + offset += numel + return outputs + +def update_tensors(grad_tensors, outputs): + idx=0 + for grad in grad_tensors: + grad.copy_(outputs[idx]) + idx+=1 + return outputs + +def setup_training(args): + + #assert (torch.cuda.is_available()) + if args.use_habana: + device = torch.device("hpu") + + args.n_pu = 1 + from habana_frameworks.torch.distributed.hccl import initialize_distributed_hpu + args.world_size, args.rank, args.local_rank = initialize_distributed_hpu() + if args.local_rank != -1: + torch.distributed.init_process_group('hccl', + rank=args.rank, world_size=args.world_size) + if args.local_rank != -1: + args.allreduce_post_accumulation = True + args.allreduce_post_accumulation_fp16 = True + else: + args.allreduce_post_accumulation = False + args.allreduce_post_accumulation_fp16 = False + + elif args.local_rank == -1 or args.no_cuda: + device = torch.device( + "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + if device == torch.device("cuda"): + args.n_pu = torch.cuda.device_count() + else: + args.n_pu = 1 + + args.allreduce_post_accumulation = False + args.allreduce_post_accumulation_fp16 = False + else: + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.distributed.init_process_group(backend='nccl', init_method='env://') + args.n_pu = 1 + + if args.gradient_accumulation_steps == 1: + args.allreduce_post_accumulation = False + args.allreduce_post_accumulation_fp16 = False + + if is_main_process(): + dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, + filename=args.json_summary), + dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)]) + else: + dllogger.init(backends=[]) + + print("device: {} n_pu: {}, distributed training: {}, 16-bits training: {}".format( + device, args.n_pu, bool(args.local_rank != -1), args.fp16 or args.use_autocast)) + + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps)) + if args.train_batch_size % args.gradient_accumulation_steps != 0: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, batch size {} should be divisible".format( + args.gradient_accumulation_steps, args.train_batch_size)) + + args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps + + if args.enable_packed_data_mode: + args.gradient_accumulation_steps = round(args.gradient_accumulation_steps / avg_seq_per_pack) + + if not args.do_train: + raise ValueError(" `do_train` must be True.") + + if not args.resume_from_checkpoint and os.path.exists(args.output_dir) and ( + os.listdir(args.output_dir) and any([i.startswith('ckpt') for i in os.listdir(args.output_dir)])): + raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) + + if (not args.resume_from_checkpoint or not os.path.exists(args.output_dir)) and is_main_process(): + os.makedirs(args.output_dir, exist_ok=True) + + return device, args + +def prepare_model_and_optimizer(args, device, lazy_mode): + + # Prepare model + config = modeling.BertConfig.from_json_file(args.config_file) + + # Padding for divisibility by 8 + if config.vocab_size % 8 != 0: + config.vocab_size += 8 - (config.vocab_size % 8) + + modeling.ACT2FN["bias_gelu"] = modeling.bias_gelu_training + model = modeling.BertForPreTraining(config) + + checkpoint = None + if not args.resume_from_checkpoint: + global_step = 0 + else: + if args.resume_step == -1 and not args.init_checkpoint: + model_names = [f for f in os.listdir(args.output_dir) if f.endswith(".pt")] + args.resume_step = max([int(x.split('.pt')[0].split('_')[1].strip()) for x in model_names]) + + global_step = args.resume_step if not args.init_checkpoint else 0 + + if not args.init_checkpoint: + checkpoint = torch.load(os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)), map_location="cpu") + else: + checkpoint = torch.load(args.init_checkpoint, map_location="cpu") + + model.load_state_dict(checkpoint['model'], strict=False) + + if args.phase2 and not args.init_checkpoint: + global_step -= args.phase1_end_step + if is_main_process(): + print("resume step from ", args.resume_step) + + model.to(device) + # BERT modeling uses weight sharing between word embedding and prediction decoder. + # So make sure the storage is pointing properly even after model is moved to device. + if args.use_habana: + model.cls.predictions.decoder.weight = model.bert.embeddings.word_embeddings.weight + + param_optimizer = list(model.named_parameters()) + no_decay = ['bias', 'gamma', 'beta', 'LayerNorm'] + + optimizer_grouped_parameters = [ + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] + + if args.use_habana: + if args.use_fused_lamb: + try: + from habana_frameworks.torch.hpex.optimizers import FusedLamb + except ImportError: + raise ImportError("Please install hbopt.") + optimizer_cls = FusedLamb + else: + optimizer_cls = NVLAMB + else: + if torch.cuda.is_available(): + optimizer_cls = FusedLAMB + else: + optimizer_cls = NVLAMB + if args.local_rank != -1 and args.use_zero_optimizer: + optimizer = ZeroRedundancyOptimizer( + optimizer_grouped_parameters[0]['params'], + optimizer_class=optimizer_cls, + lr=args.learning_rate, + weight_decay=optimizer_grouped_parameters[0]['weight_decay']) + for pg in optimizer_grouped_parameters[1:]: + optimizer.add_param_group(pg) + else: + optimizer = optimizer_cls(optimizer_grouped_parameters, + lr=args.learning_rate) + + lr_scheduler = PolyWarmUpScheduler(optimizer, + warmup=args.warmup_proportion, + total_steps=args.max_steps) + if args.fp16: + + if args.loss_scale == 0: + model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic", cast_model_outputs=torch.float16) + else: + model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale, cast_model_outputs=torch.float16) + amp._amp_state.loss_scalers[0]._loss_scale = args.init_loss_scale + + model.checkpoint_activations(args.checkpoint_activations) + + if args.resume_from_checkpoint: + if args.phase2 or args.init_checkpoint: + keys = list(checkpoint['optimizer']['state'].keys()) + #Override hyperparameters from previous checkpoint + for key in keys: + checkpoint['optimizer']['state'][key]['step'] = global_step + for iter, item in enumerate(checkpoint['optimizer']['param_groups']): + checkpoint['optimizer']['param_groups'][iter]['step'] = global_step + checkpoint['optimizer']['param_groups'][iter]['t_total'] = args.max_steps + checkpoint['optimizer']['param_groups'][iter]['warmup'] = args.warmup_proportion + checkpoint['optimizer']['param_groups'][iter]['lr'] = args.learning_rate + optimizer.load_state_dict(checkpoint['optimizer']) # , strict=False) + + # Restore AMP master parameters + if args.fp16: + optimizer._lazy_init_maybe_master_weights() + optimizer._amp_stash.lazy_init_called = True + optimizer.load_state_dict(checkpoint['optimizer']) + for param, saved_param in zip(amp.master_params(optimizer), checkpoint['master params']): + param.data.copy_(saved_param.data) + + if args.use_habana and lazy_mode and args.use_hpu_graphs: + import habana_frameworks.torch.hpu.graphs as htgraphs + htgraphs.ModuleCacher()(model, have_grad_accumulation=True) + + if args.local_rank != -1: + if not args.allreduce_post_accumulation: + if args.use_habana: + model = DDP(model, bucket_cap_mb=230) + else: + model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size()) + else: + if args.use_habana: + for param in model.parameters(): + torch.distributed.broadcast(param.data, 0) + else: + flat_dist_call([param.data for param in model.parameters()], torch.distributed.broadcast, (0,) ) + elif args.n_pu > 1: + model = torch.nn.DataParallel(model) + + criterion = BertPretrainingCriterion(config.vocab_size) + + return model, optimizer, lr_scheduler, checkpoint, global_step, criterion + +def take_optimizer_step(args, optimizer, model, overflow_buf, global_step): + + global skipped_steps + if args.allreduce_post_accumulation and not args.use_habana: + # manually allreduce gradients after all accumulation steps + # check for Inf/NaN + # 1. allocate an uninitialized buffer for flattened gradient + loss_scale = _amp_state.loss_scalers[0].loss_scale() if args.fp16 else 1 + master_grads = [p.grad for p in amp.master_params(optimizer) if p.grad is not None] + flat_grad_size = sum(p.numel() for p in master_grads) + allreduce_dtype = torch.float16 if args.allreduce_post_accumulation_fp16 else torch.float32 + flat_raw = torch.empty(flat_grad_size, device='cuda', dtype=allreduce_dtype) + # 2. combine unflattening and predivision of unscaled 'raw' gradient + allreduced_views = apex_C.unflatten(flat_raw, master_grads) + overflow_buf.zero_() + amp_C.multi_tensor_scale(65536, + overflow_buf, + [master_grads, allreduced_views], + loss_scale / (get_world_size() * args.gradient_accumulation_steps)) + # 3. sum gradient across ranks. Because of the predivision, this averages the gradient + torch.distributed.all_reduce(flat_raw) + # 4. combine unscaling and unflattening of allreduced gradient + overflow_buf.zero_() + amp_C.multi_tensor_scale(65536, + overflow_buf, + [allreduced_views, master_grads], + 1./loss_scale) + # 5. update loss scale + if args.fp16: + scaler = _amp_state.loss_scalers[0] + old_overflow_buf = scaler._overflow_buf + scaler._overflow_buf = overflow_buf + had_overflow = scaler.update_scale() + scaler._overfloat_buf = old_overflow_buf + else: + had_overflow = 0 + # 6. call optimizer step function + if had_overflow == 0: + optimizer.step() + global_step += 1 + else: + # Overflow detected, print message and clear gradients + skipped_steps += 1 + if is_main_process(): + scaler = _amp_state.loss_scalers[0] + dllogger.log(step="PARAMETER", data={"loss_scale": scaler.loss_scale()}) + if _amp_state.opt_properties.master_weights: + for param in optimizer._amp_stash.all_fp32_from_fp16_params: + param.grad = None + for param in model.parameters(): + param.grad = None + else: + #In case of parameter tying allreduce was called twice for the parameters. + #Manually adding allreduce for the parameters. + if args.use_habana and args.allreduce_post_accumulation: + grad_tensors = [param.grad for param in model.parameters() if param.grad is not None] + flat_tensor = torch.cat([t.contiguous().view(-1) for t in grad_tensors], dim=0) + flat_tensor.div_(float(torch.distributed.get_world_size() * args.gradient_accumulation_steps)) + torch.distributed.all_reduce(flat_tensor) + outputs = unflatten_tensor(flat_tensor, grad_tensors) + updated_outputs = update_tensors(grad_tensors, outputs) + + optimizer.step() + #optimizer.zero_grad() + for param in model.parameters(): + param.grad = None + global_step += 1 + + return global_step + +def get_metadata_file_path(input_dir : str) -> str: + norm_path = os.path.normpath(input_dir) + head_tail = os.path.split(norm_path) + metadata_file_name = head_tail[1] + metadata_file_name = metadata_file_name + '_metadata.json' + metadata_file_path = os.path.join(head_tail[0],metadata_file_name) + return metadata_file_path + +def read_avg_seq_per_sample(input_dir : str, max_sequence_length) -> float: + metadata = None + metadata_file_path = get_metadata_file_path(input_dir) + print(f"Reading dataset metadata from: {metadata_file_path}") + if os.path.exists(metadata_file_path): + file_handle = open(metadata_file_path, mode='r') + json_content = file_handle.read() + metadata = json.loads(json_content) + else: + print("Packed dataset metadata file not accessible, falling back to default values of avg_seq_per_sample") + if max_sequence_length == 128: + return 1.2 + elif max_sequence_length == 512: + return 2.0 + else: + assert f"invalid max_sequence_length" + avg_seq_per_sample_key = "avg_seq_per_sample" + if metadata is not None and avg_seq_per_sample_key in metadata.keys(): + avg_seq_per_sample = metadata[avg_seq_per_sample_key] + else: + assert False, f"Key {avg_seq_per_sample_key} not present in packed dataset metadata file: {metadata_file_path}" + print(f"AVG_SEQ_PER_SAMPLE: {avg_seq_per_sample}") + return avg_seq_per_sample + +def main(): + global timeout_sent + global avg_seq_per_pack + + args = parse_arguments() + + random.seed(args.seed + args.local_rank) + np.random.seed(args.seed + args.local_rank) + torch.manual_seed(args.seed + args.local_rank) + torch.cuda.manual_seed(args.seed + args.local_rank) + worker_init = WorkerInitObj(args.seed + args.local_rank) + if args.enable_packed_data_mode: + avg_seq_per_pack = read_avg_seq_per_sample(args.input_dir, args.max_seq_length) + else: + warnings.warn("--enable_packed_data_mode flag will be deprecated and usage of packed and unpacked dataset" + " will be decided based on metadata file availability at input_dir") + avg_seq_per_pack = 1.0 + device, args = setup_training(args) + + # Tensorboard logging initialization + if args.enable_tensorboard_logging: + tb_writer_dir = create_rank_dir(args.output_dir) + tb_writer = SummaryWriter(log_dir=tb_writer_dir) + tb_writer.add_scalar('_hparams_/session_start_info', time.time(), 0) + else: + tb_writer = None + + lazy_mode = os.getenv('PT_HPU_LAZY_MODE', '1') == '1' + if args.use_habana: + if lazy_mode: + try: + import habana_frameworks.torch.core as htcore + except ImportError: + assert False, "Could Not import habana_frameworks.torch.core" + + if args.use_torch_compile: + assert os.getenv('PT_HPU_LAZY_MODE') == '0', f"args.use_torch_compile == True, but PT_HPU_LAZY_MODE={os.getenv('PT_HPU_LAZY_MODE')}. For torch.compile mode, set PT_HPU_LAZY_MODE to 0" + + if args.compiled_autograd: + assert args.use_torch_compile, f"--compiled_autograd can only be used with --use_torch_compile" + from habana_frameworks.torch.dynamo.compile_backend.experimental import enable_compiled_autograd + enable_compiled_autograd() + + # Enable hpu dynamic shape + try: + import habana_frameworks.torch.hpu as hthpu + hthpu.enable_dynamic_shape() + except ImportError: + print("habana_frameworks could not be loaded") + + dllogger.log(step="PARAMETER", data={"Config": [str(args)]}) + + # Prepare optimizer + model, optimizer, lr_scheduler, checkpoint, global_step, criterion = prepare_model_and_optimizer(args, device, lazy_mode) + + if is_main_process(): + dllogger.log(step="PARAMETER", data={"SEED": args.seed}) + + raw_train_start = None + if args.do_train: + if is_main_process(): + dllogger.log(step="PARAMETER", data={"train_start": True}) + dllogger.log(step="PARAMETER", data={"batch_size_per_pu": args.train_batch_size}) + dllogger.log(step="PARAMETER", data={"learning_rate": args.learning_rate}) + + model.train() + most_recent_ckpts_paths = [] + average_loss = 0.0 # averaged loss every args.log_freq steps + epoch = 0 + training_steps = 0 + average_training_time_per_step = 0 + average_perf_per_step = 0 + loss_list = [] + + if device.type == 'cuda': + pool = ProcessPoolExecutor(1) + starting_time = time.time() + + if args.use_torch_compile: + model = torch.compile(model, backend="aot_hpu_training_backend") + + # Note: We loop infinitely over epochs, termination is handled via iteration count + while True: + thread = None + restored_data_loader = None + if not args.resume_from_checkpoint or epoch > 0 or (args.phase2 and global_step < 1) or args.init_checkpoint: + if args.enable_packed_data_mode: + files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if + os.path.isfile(os.path.join(args.input_dir, f))] # Packed files have no 'training' pre/postfix. + else: + files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if + os.path.isfile(os.path.join(args.input_dir, f)) and 'training' in f] + files.sort() + num_files = len(files) + random.Random(args.seed + epoch).shuffle(files) + f_start_id = 0 + else: + f_start_id = checkpoint['files'][0] + files = checkpoint['files'][1:] + args.resume_from_checkpoint = False + num_files = len(files) + # may not exist in all checkpoints + epoch = checkpoint.get('epoch', 0) + restored_data_loader = checkpoint.get('data_loader', None) + + shared_file_list = {} + + if torch.distributed.is_initialized() and get_world_size() > num_files: + remainder = get_world_size() % num_files + data_file = files[(f_start_id*get_world_size()+get_rank() + remainder*f_start_id)%num_files] + else: + data_file = files[(f_start_id*get_world_size()+get_rank())%num_files] + + previous_file = data_file + + if restored_data_loader is None: + num_workers = 0 if args.use_habana else 4 + train_data = pretraining_dataset(data_file, args.max_predictions_per_seq, args.enable_packed_data_mode) + train_sampler = RandomSampler(train_data) + train_dataloader = DataLoader(train_data, sampler=train_sampler, + batch_size=args.train_batch_size * args.n_pu, + num_workers=num_workers, worker_init_fn=worker_init, + drop_last=True, pin_memory=True) + # shared_file_list["0"] = (train_dataloader, data_file) + else: + train_dataloader = restored_data_loader + restored_data_loader = None + + overflow_buf = None + if args.allreduce_post_accumulation and not args.use_habana: + overflow_buf = torch.cuda.IntTensor([0]) + + prof = None + if args.profile: + assert args.profile_steps is not None, "please provide profile_steps argument" + step_words = args.profile_steps.split(":") + assert step_words[0] != '', "please provide valid profile_steps argument" + warmup_steps = int(step_words[0]) - 1 if int(step_words[0]) > 0 else 0 + active_steps = 1 + if len(step_words) == 2: + active_steps = int(step_words[1]) - warmup_steps + + assert active_steps > 0 + prof = torch.profiler.profile( + activities=(torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.HPU), + schedule=torch.profiler.schedule(wait=0, warmup=warmup_steps, active=active_steps), + on_trace_ready=torch.profiler.tensorboard_trace_handler(args.tensorboard_logdir), + record_shapes=True, + with_stack=True) + if prof: + prof.start() + + for f_id in range(f_start_id + 1 , len(files)): + + + if get_world_size() > num_files: + data_file = files[(f_id*get_world_size()+get_rank() + remainder*f_id)%num_files] + else: + data_file = files[(f_id*get_world_size()+get_rank())%num_files] + + previous_file = data_file + + if device.type == 'cuda': + dataset_future = pool.submit(create_pretraining_dataset, data_file, args.max_predictions_per_seq, shared_file_list, args, worker_init) + + train_iter = tqdm(train_dataloader, desc="Iteration", disable=args.disable_progress_bar) if is_main_process() else train_dataloader + + if raw_train_start is None: + raw_train_start = time.time() + cnt = 0 + for step, batch in enumerate(train_iter): + if lazy_mode and args.use_hpu_graphs: + model.set_iteration_count(cnt) + + training_steps += 1 + + cnt += 1 + if training_steps % args.gradient_accumulation_steps == 0: + cnt = 0 + + batch = [t.to(device) for t in batch] + if args.enable_packed_data_mode: + input_ids, segment_ids, input_mask, positions, masked_lm_labels, next_sentence_positions, next_sentence_labels = batch + else: + input_ids, segment_ids, input_mask, masked_lm_labels, next_sentence_labels = batch + + if (args.local_rank != -1) and (training_steps % args.gradient_accumulation_steps == 0): + torch.distributed.barrier() + + with torch.autocast(device_type="hpu", dtype=torch.bfloat16, enabled=args.use_autocast): + if args.local_rank != -1 and not args.allreduce_post_accumulation \ + and (training_steps % args.gradient_accumulation_steps != 0): + with model.no_sync(): + prediction_scores, seq_relationship_score = model( + input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, enable_packed_data_mode=args.enable_packed_data_mode, + positions=positions if args.enable_packed_data_mode else None, + next_sentence_positions=next_sentence_positions if args.enable_packed_data_mode else None) + else: + prediction_scores, seq_relationship_score = model( + input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, enable_packed_data_mode=args.enable_packed_data_mode, + positions=positions if args.enable_packed_data_mode else None, + next_sentence_positions=next_sentence_positions if args.enable_packed_data_mode else None) + + loss = criterion( + prediction_scores, seq_relationship_score, masked_lm_labels, next_sentence_labels) + if args.n_pu > 1: + loss = loss.mean() # mean() to average on multi-pu. + + divisor = args.gradient_accumulation_steps + if args.gradient_accumulation_steps > 1: + if not args.allreduce_post_accumulation: + # this division was merged into predivision + loss = loss / args.gradient_accumulation_steps + divisor = 1.0 + if args.fp16: + with amp.scale_loss(loss, optimizer, delay_overflow_check=args.allreduce_post_accumulation) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + + if lazy_mode: + htcore.mark_step() + + loss_list.append(loss) + + if training_steps % args.gradient_accumulation_steps == 0: + lr_scheduler.step() # learning rate warmup + global_step = take_optimizer_step(args, optimizer, model, overflow_buf, global_step) + + if lazy_mode: + htcore.mark_step() + + if global_step >= args.steps_this_run or timeout_sent or training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0: + for loss_t in loss_list: + average_loss += loss_t.item() + loss_list.clear() + train_time = time.time() - starting_time + starting_time = time.time() + average_training_time_per_step = train_time/(args.gradient_accumulation_steps * args.log_freq) + average_perf_per_step = args.train_batch_size*avg_seq_per_pack/average_training_time_per_step + + if tb_writer is not None: + tb_writer.add_scalar('Loss', average_loss, global_step) + tb_writer.add_scalar('performance', average_perf_per_step, global_step) + + if global_step >= args.steps_this_run or timeout_sent: + train_time_raw = time.time() - raw_train_start + last_num_steps = int(training_steps / args.gradient_accumulation_steps) % args.log_freq + last_num_steps = args.log_freq if last_num_steps == 0 else last_num_steps + average_loss = average_loss / (last_num_steps * divisor) + average_loss = torch.tensor(average_loss, dtype=torch.float32).to(device) + if (torch.distributed.is_initialized()): + average_loss /= get_world_size() + torch.distributed.barrier() + torch.distributed.all_reduce(average_loss) + final_loss = average_loss.item() + + if tb_writer is not None: + tb_writer.add_scalar('Loss', final_loss, global_step) + if is_main_process(): + dllogger.log(step=(epoch, global_step, ), data={"final_loss": final_loss, + "average_training_time_step": average_training_time_per_step, + "average_perf_per_step": average_perf_per_step}) + elif training_steps % (args.log_freq * args.gradient_accumulation_steps) == 0: + if is_main_process(): + dllogger.log(step=(epoch, global_step, ), data={"average_loss": average_loss / (args.log_freq * divisor), + "step_loss": loss.item() * args.gradient_accumulation_steps / divisor, + "learning_rate": optimizer.param_groups[0]['lr'], + "average_training_time_step": average_training_time_per_step, + "average_perf_per_step": average_perf_per_step}) + average_loss = 0 + + + if global_step >= args.steps_this_run or training_steps % ( + args.num_steps_per_checkpoint * args.gradient_accumulation_steps) == 0 or timeout_sent: + if isinstance(optimizer, ZeroRedundancyOptimizer): + optimizer.consolidate_state_dict() + if is_main_process() and not args.skip_checkpoint: + # Save a trained model + dllogger.log(step="PARAMETER", data={"checkpoint_step": global_step}) + model_to_save = model.module if hasattr(model, + 'module') else model # Only save the model it-self + if args.resume_step < 0 or not args.phase2: + output_save_file = os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step)) + else: + output_save_file = os.path.join(args.output_dir, "ckpt_{}.pt".format(global_step + args.phase1_end_step)) + checkpoint_dict ={} + if args.do_train: + if args.use_habana or args.no_cuda: + model_state_dict = model_to_save.state_dict() + if args.use_torch_compile: + model_state_dict = repair_checkpoint(model_to_save.state_dict()) + checkpoint_dict = {'model': model_state_dict, + 'optimizer': optimizer.state_dict(), + 'files': [f_id] + files, + 'epoch': epoch, + 'data_loader': None if global_step >= args.max_steps else train_dataloader} + else: + checkpoint_dict = {'model': model_to_save.state_dict(), + 'optimizer': optimizer.state_dict(), + 'master params': list(amp.master_params(optimizer)), + 'files': [f_id] + files, + 'epoch': epoch, + 'data_loader': None if global_step >= args.max_steps else train_dataloader} + + torch.save(checkpoint_dict, output_save_file) + most_recent_ckpts_paths.append(output_save_file) + if len(most_recent_ckpts_paths) > 3: + ckpt_to_be_removed = most_recent_ckpts_paths.pop(0) + os.remove(ckpt_to_be_removed) + + # Exiting the training due to hitting max steps, or being sent a + # timeout from the cluster scheduler + if global_step >= args.steps_this_run or timeout_sent: + del train_dataloader + # thread.join() + return args, final_loss, train_time_raw, global_step + + if prof: + prof.step() + + del train_dataloader + # thread.join() + # Make sure pool has finished and switch train_dataloader + # NOTE: Will block until complete + if device.type == 'cuda': + train_dataloader, data_file = dataset_future.result(timeout=None) + else: + train_dataloader, data_file = create_pretraining_dataset(data_file, args.max_predictions_per_seq, shared_file_list, args, worker_init) + + epoch += 1 + + if prof: + prof.stop() + +if __name__ == "__main__": + + now = time.time() + args, final_loss, train_time_raw, global_step = main() + pu_count = args.n_pu + global_step += args.phase1_end_step if (args.phase2 and args.resume_step > 0) else 0 + if args.resume_step == -1: + args.resume_step = 0 + if torch.distributed.is_initialized(): + pu_count = get_world_size() + if is_main_process(): + e2e_time = time.time() - now + training_perf = args.train_batch_size * args.gradient_accumulation_steps * pu_count * avg_seq_per_pack\ + * (global_step - args.resume_step + skipped_steps) / train_time_raw + dllogger.log(step=tuple(), data={"e2e_train_time": e2e_time, "training_sequences_per_second": training_perf, + "final_loss": final_loss, "raw_train_time": train_time_raw }) + if args.log_memory_usage: + import habana_frameworks.torch as ht + mem_stats = ht.hpu.memory.memory_stats() + max_used = str(mem_stats['MaxInUse'] / 1024.0 / 1024.0 / 1024.0) + "G" + perc_used = str(100 * mem_stats['MaxInUse'] / mem_stats['Limit']) + "%" + stats = 'max_hpu_mem:{'+max_used+'} ({'+perc_used+'})' + + dllogger.log(step=tuple(), data={"memory usage": stats}) + dllogger.flush() diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/schedulers.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/schedulers.py new file mode 100644 index 0000000000000000000000000000000000000000..4dd99b43a15d9c785285dbbca7be3632cfc68c3d --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/schedulers.py @@ -0,0 +1,131 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import torch +from torch.optim.optimizer import Optimizer +from torch.optim.lr_scheduler import _LRScheduler + + +class LRScheduler(_LRScheduler): + def __init__(self, optimizer, last_epoch=-1): + # Check if using mixed precision training + self.mixed_training = False + base_optimizer = optimizer + + # Check that optimizer param is valid + if not isinstance(optimizer, Optimizer): + raise TypeError('{} is not an Optimizer'.format( + type(optimizer).__name__)) + + super(LRScheduler, self).__init__(base_optimizer, last_epoch) + + def step(self, epoch=None): + # Set the current training step + # ('epoch' is used to be consistent with _LRScheduler) + if self.mixed_training: + # The assumption is that the step will be constant + state_dict = self.optimizer.state[self.optimizer.param_groups[0]['params'][0]] + if 'step' in state_dict: + self.last_epoch = state_dict['step'] + 1 + else: + self.last_epoch = 1 + else: + self.last_epoch = epoch if epoch is not None else self.last_epoch + 1 + + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr + + +class CosineWarmUpScheduler(LRScheduler): + """ + Applies a warm up period to the learning rate. + """ + + def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): + self.warmup = warmup + self.total_steps = total_steps + super(CosineWarmUpScheduler, self).__init__(optimizer, last_epoch) + + def get_lr(self): + progress = self.last_epoch / self.total_steps + if progress < self.warmup: + return [base_lr * progress / self.warmup for base_lr in self.base_lrs] + else: + return [base_lr * (0.5 * (1.0 + torch.cos(math.pi + progress))) for base_lr in self.base_lrs] + + +class ConstantWarmUpScheduler(LRScheduler): + """ + Applies a warm up period to the learning rate. + """ + + def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): + self.warmup = warmup + self.total_steps = total_steps + super(ConstantWarmUpScheduler, self).__init__(optimizer, last_epoch) + + def get_lr(self): + progress = self.last_epoch / self.total_steps + if progress < self.warmup: + return [base_lr * progress / self.warmup for base_lr in self.base_lrs] + else: + return self.base_lrs + + +class LinearWarmUpScheduler(LRScheduler): + """ + Applies a warm up period to the learning rate. + """ + + def __init__(self, optimizer, warmup, total_steps, last_epoch=-1): + self.warmup = warmup + self.total_steps = total_steps + super(LinearWarmUpScheduler, self).__init__(optimizer, last_epoch) + + def get_lr(self): + progress = self.last_epoch / self.total_steps + if progress < self.warmup: + return [base_lr * progress / self.warmup for base_lr in self.base_lrs] + else: + return [base_lr * max(( progress - 1.0)/(self.warmup - 1.0), 0.) for base_lr in self.base_lrs] + + +class PolyWarmUpScheduler(LRScheduler): + """ + Applies a warm up period to the learning rate. + """ + + def __init__(self, optimizer, warmup, total_steps, degree=0.5, last_epoch=-1): + self.warmup = warmup + self.total_steps = total_steps + self.degree = degree + super(PolyWarmUpScheduler, self).__init__(optimizer, last_epoch) + + def step(self, epoch=None): + param_group = self.optimizer.param_groups[0] + if 'step' in param_group: + self.last_epoch = param_group['step'] + 1 + else: + self.last_epoch = 1 + + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr + + def get_lr(self): + progress = self.last_epoch / self.total_steps + if progress < self.warmup: + return [base_lr * progress / self.warmup for base_lr in self.base_lrs] + else: + return [base_lr * ((1.0 - progress) ** self.degree) for base_lr in self.base_lrs] diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/scripts/run_pretraining.sh b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/scripts/run_pretraining.sh new file mode 100644 index 0000000000000000000000000000000000000000..71ce0ac05da3f5c67fa5af8bc801559be5b1e66f --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/scripts/run_pretraining.sh @@ -0,0 +1,242 @@ +#!/bin/bash +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +train_batch_size=${1:-8192} +learning_rate=${2:-"6e-3"} +precision=${3:-"bf16"} +n_pu=${4:-1} #Number of processing units +warmup_proportion=${5:-"0.2843"} +train_steps=${6:-7038} +save_checkpoint_steps=${7:-200} +resume_training=${8:-"false"} +create_logfile=${9:-"true"} +accumulate_gradients=${10:-"true"} +gradient_accumulation_steps=${11:-128} +seed=${12:-12439} +job_name=${13:-"bert_lamb_pretraining"} +allreduce_post_accumulation=${14:-"true"} +allreduce_post_accumulation_fp16=${15:-"true"} +train_batch_size_phase2=${16:-4096} +learning_rate_phase2=${17:-"4e-3"} +warmup_proportion_phase2=${18:-"0.128"} +train_steps_phase2=${19:-1563} +gradient_accumulation_steps_phase2=${20:-512} +DATASET=hdf5_lower_case_1_seq_len_128_max_pred_20_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5/books_wiki_en_corpus/ # change this for other datasets +DATA_DIR_PHASE1=${21:-$BERT_DATASET_DIR/${DATASET}/} +BERT_CONFIG=bert_config.json +DATASET2=hdf5_lower_case_1_seq_len_512_max_pred_80_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5/books_wiki_en_corpus # change this for other datasets +DATA_DIR_PHASE2=${22:-$BERT_DATASET_DIR/${DATASET2}/} +CODEDIR=${23:-"$BERT_REPO_BASE/pretraining"} +init_checkpoint=${24:-"None"} +RESULTS_DIR=$BERT_OUT_DIR/results +CHECKPOINTS_DIR=$RESULTS_DIR/checkpoints + +mkdir -p $CHECKPOINTS_DIR + + +if [ ! -d "$DATA_DIR_PHASE1" ] ; then + echo "Warning! $DATA_DIR_PHASE1 directory missing. Training cannot start" +fi +if [ ! -d "$RESULTS_DIR" ] ; then + echo "Error! $RESULTS_DIR directory missing." + exit -1 +fi +if [ ! -d "$CHECKPOINTS_DIR" ] ; then + echo "Warning! $CHECKPOINTS_DIR directory missing." + echo "Checkpoints will be written to $RESULTS_DIR instead." + CHECKPOINTS_DIR=$RESULTS_DIR +fi +if [ ! -f "$BERT_CONFIG" ] ; then + echo "Error! BERT large configuration file not found at $BERT_CONFIG" + exit -1 +fi + +PREC="" +if [ "$precision" = "fp16" ] ; then + PREC="--fp16" +elif [ "$precision" = "fp32" ] ; then + PREC="" +elif [ "$precision" = "tf32" ] ; then + PREC="" +elif [ "$precision" = "bf16" ] ; then + PREC="" +else + echo "Unknown argument" + exit -2 +fi + +ACCUMULATE_GRADIENTS="" +if [ "$accumulate_gradients" == "true" ] ; then + ACCUMULATE_GRADIENTS="--gradient_accumulation_steps=$gradient_accumulation_steps" +fi + +CHECKPOINT="" +if [ "$resume_training" == "true" ] ; then + CHECKPOINT="--resume_from_checkpoint" +fi + +ALL_REDUCE_POST_ACCUMULATION="" +if [ "$allreduce_post_accumulation" == "true" ] ; then + ALL_REDUCE_POST_ACCUMULATION="--allreduce_post_accumulation" +fi + +ALL_REDUCE_POST_ACCUMULATION_FP16="" +if [ "$allreduce_post_accumulation_fp16" == "true" ] ; then + ALL_REDUCE_POST_ACCUMULATION_FP16="--allreduce_post_accumulation_fp16" +fi + +INIT_CHECKPOINT="" +if [ "$init_checkpoint" != "None" ] ; then + INIT_CHECKPOINT="--init_checkpoint=$init_checkpoint" +fi + +echo $DATA_DIR_PHASE1 +INPUT_DIR=$DATA_DIR_PHASE1 +CMD=" $CODEDIR/run_pretraining.py" +CMD+=" --input_dir=$DATA_DIR_PHASE1" +CMD+=" --output_dir=$CHECKPOINTS_DIR" +CMD+=" --config_file=$BERT_CONFIG" +CMD+=" --bert_model=bert-large-uncased" +CMD+=" --train_batch_size=$train_batch_size" +CMD+=" --max_seq_length=128" +CMD+=" --max_predictions_per_seq=20" +CMD+=" --max_steps=$train_steps" +CMD+=" --warmup_proportion=$warmup_proportion" +CMD+=" --num_steps_per_checkpoint=$save_checkpoint_steps" +CMD+=" --learning_rate=$learning_rate" +CMD+=" --seed=$seed" +CMD+=" $PREC" +CMD+=" $ACCUMULATE_GRADIENTS" +CMD+=" $CHECKPOINT" +CMD+=" $ALL_REDUCE_POST_ACCUMULATION" +CMD+=" $ALL_REDUCE_POST_ACCUMULATION_FP16" +CMD+=" $INIT_CHECKPOINT" +CMD+=" --do_train" +CMD+=" --json-summary ${RESULTS_DIR}/dllogger.json " +CMD+=" --use_habana " +CMD+=" --use_jit_trace " +CMD+=" --use_fused_lamb " + +if [ "$n_pu" -gt "1" ]; then + CMD="$PYTHON -m torch.distributed.launch --nproc_per_node=$n_pu $CMD" +else + CMD="$PYTHON -u $CMD" +fi + + +if [ "$create_logfile" = "true" ] ; then + export GBS=$(expr $train_batch_size \* $n_pu) + printf -v TAG "pyt_bert_pretraining_phase1_%s_gbs%d" "$precision" $GBS + DATESTAMP=`date +'%y%m%d%H%M%S'` + LOGFILE=$RESULTS_DIR/$job_name.$TAG.$DATESTAMP.log + printf "Logs written to %s\n" "$LOGFILE" +fi + +set -x +if [ -z "$LOGFILE" ] ; then + $CMD +else + ( + $CMD + ) |& tee $LOGFILE +fi + +set +x + +echo "finished pretraining" + +#Start Phase2 + +PREC="" +if [ "$precision" = "fp16" ] ; then + PREC="--fp16" +elif [ "$precision" = "fp32" ] ; then + PREC="" +elif [ "$precision" = "tf32" ] ; then + PREC="" +elif [ "$precision" = "bf16" ] ; then + PREC="" +else + echo "Unknown argument" + exit -2 +fi + +ACCUMULATE_GRADIENTS="" +if [ "$accumulate_gradients" == "true" ] ; then + ACCUMULATE_GRADIENTS="--gradient_accumulation_steps=$gradient_accumulation_steps_phase2" +fi + +ALL_REDUCE_POST_ACCUMULATION="" +if [ "$allreduce_post_accumulation" == "true" ] ; then + ALL_REDUCE_POST_ACCUMULATION="--allreduce_post_accumulation" +fi + +ALL_REDUCE_POST_ACCUMULATION_FP16="" +if [ "$allreduce_post_accumulation_fp16" == "true" ] ; then + ALL_REDUCE_POST_ACCUMULATION_FP16="--allreduce_post_accumulation_fp16" +fi + +echo $DATA_DIR_PHASE2 +INPUT_DIR=$DATA_DIR_PHASE2 +CMD=" $CODEDIR/run_pretraining.py" +CMD+=" --input_dir=$DATA_DIR_PHASE2" +CMD+=" --output_dir=$CHECKPOINTS_DIR" +CMD+=" --config_file=$BERT_CONFIG" +CMD+=" --bert_model=bert-large-uncased" +CMD+=" --train_batch_size=$train_batch_size_phase2" +CMD+=" --max_seq_length=512" +CMD+=" --max_predictions_per_seq=80" +CMD+=" --max_steps=$train_steps_phase2" +CMD+=" --warmup_proportion=$warmup_proportion_phase2" +CMD+=" --num_steps_per_checkpoint=$save_checkpoint_steps" +CMD+=" --learning_rate=$learning_rate_phase2" +CMD+=" --seed=$seed" +CMD+=" $PREC" +CMD+=" $ACCUMULATE_GRADIENTS" +CMD+=" $CHECKPOINT" +CMD+=" $ALL_REDUCE_POST_ACCUMULATION" +CMD+=" $ALL_REDUCE_POST_ACCUMULATION_FP16" +CMD+=" --do_train --phase2 --resume_from_checkpoint --phase1_end_step=$train_steps" +CMD+=" --json-summary ${RESULTS_DIR}/dllogger.json " +CMD+=" --use_habana " +CMD+=" --use_jit_trace " +CMD+=" --use_fused_lamb " + +if [ "$n_pu" -gt "1" ]; then + CMD="$PYTHON -m torch.distributed.launch --nproc_per_node=$n_pu $CMD" +else + CMD="$PYTHON -u $CMD" +fi + +if [ "$create_logfile" = "true" ] ; then + export GBS=$(expr $train_batch_size_phase2 \* $n_pu) + printf -v TAG "pyt_bert_pretraining_phase2_%s_gbs%d" "$precision" $GBS + DATESTAMP=`date +'%y%m%d%H%M%S'` + LOGFILE=$RESULTS_DIR/$job_name.$TAG.$DATESTAMP.log + printf "Logs written to %s\n" "$LOGFILE" +fi + +set -x +if [ -z "$LOGFILE" ] ; then + $CMD +else + ( + $CMD + ) |& tee $LOGFILE +fi + +set +x + +echo "finished phase2" diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/tokenization.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/tokenization.py new file mode 100644 index 0000000000000000000000000000000000000000..c25c323e74ee91298d52cdfbd9118a1d8b4aa62d --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/tokenization.py @@ -0,0 +1,392 @@ +# coding=utf-8 +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tokenization classes.""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +import collections +import logging +import os +import unicodedata +import six +from io import open + +from file_utils import cached_path + +logger = logging.getLogger(__name__) + +PRETRAINED_VOCAB_ARCHIVE_MAP = { + 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", + 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", + 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", + 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", + 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", + 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", + 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", +} +PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { + 'bert-base-uncased': 512, + 'bert-large-uncased': 512, + 'bert-base-cased': 512, + 'bert-large-cased': 512, + 'bert-base-multilingual-uncased': 512, + 'bert-base-multilingual-cased': 512, + 'bert-base-chinese': 512, +} +VOCAB_NAME = 'vocab.txt' + +def convert_to_unicode(text): + """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" + if six.PY3: + if isinstance(text, str): + return text + elif isinstance(text, bytes): + return text.decode("utf-8", "ignore") + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + elif six.PY2: + if isinstance(text, str): + return text.decode("utf-8", "ignore") + elif isinstance(text, unicode): + return text + else: + raise ValueError("Unsupported string type: %s" % (type(text))) + else: + raise ValueError("Not running on Python2 or Python 3?") + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + index = 0 + with open(vocab_file, "r", encoding="utf-8") as reader: + while True: + token = reader.readline() + if not token: + break + token = token.strip() + vocab[token] = index + index += 1 + return vocab + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class BertTokenizer(object): + """Runs end-to-end tokenization: punctuation splitting + wordpiece""" + + def __init__(self, vocab_file, do_lower_case=True, max_len=None, + never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): + if not os.path.isfile(vocab_file): + raise ValueError( + "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " + "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict( + [(ids, tok) for tok, ids in self.vocab.items()]) + self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, + never_split=never_split) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) + self.max_len = max_len if max_len is not None else int(1e12) + + def tokenize(self, text): + split_tokens = [] + for token in self.basic_tokenizer.tokenize(text): + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + return split_tokens + + def convert_tokens_to_ids(self, tokens): + """Converts a sequence of tokens into ids using the vocab.""" + ids = [] + for token in tokens: + ids.append(self.vocab[token]) + if len(ids) > self.max_len: + raise ValueError( + "Token indices sequence length is longer than the specified maximum " + " sequence length for this BERT model ({} > {}). Running this" + " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) + ) + return ids + + def convert_ids_to_tokens(self, ids): + """Converts a sequence of ids in wordpiece tokens using the vocab.""" + tokens = [] + for i in ids: + tokens.append(self.ids_to_tokens[i]) + return tokens + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): + """ + Instantiate a PreTrainedBertModel from a pre-trained model file. + Download and cache the pre-trained model file if needed. + """ + if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: + vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] + else: + vocab_file = pretrained_model_name_or_path + if os.path.isdir(vocab_file): + vocab_file = os.path.join(vocab_file, VOCAB_NAME) + # redirect to the cache, if necessary + try: + resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) + except EnvironmentError: + logger.error( + "Model name '{}' was not found in model name list ({}). " + "We assumed '{}' was a path or url but couldn't find any file " + "associated to this path or url.".format( + pretrained_model_name_or_path, + ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), + vocab_file)) + return None + if resolved_vocab_file == vocab_file: + logger.info("loading vocabulary file {}".format(vocab_file)) + else: + logger.info("loading vocabulary file {} from cache at {}".format( + vocab_file, resolved_vocab_file)) + if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: + # if we're using a pretrained model, ensure the tokenizer wont index sequences longer + # than the number of positional embeddings + max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] + kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) + # Instantiate tokenizer. + tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) + return tokenizer + + +class BasicTokenizer(object): + """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" + + def __init__(self, + do_lower_case=True, + never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): + """Constructs a BasicTokenizer. + + Args: + do_lower_case: Whether to lower case the input. + """ + self.do_lower_case = do_lower_case + self.never_split = never_split + + def tokenize(self, text): + """Tokenizes a piece of text.""" + text = self._clean_text(text) + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if self.do_lower_case and token not in self.never_split: + token = token.lower() + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text): + """Splits punctuation on a piece of text.""" + if text in self.never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ((cp >= 0x4E00 and cp <= 0x9FFF) or # + (cp >= 0x3400 and cp <= 0x4DBF) or # + (cp >= 0x20000 and cp <= 0x2A6DF) or # + (cp >= 0x2A700 and cp <= 0x2B73F) or # + (cp >= 0x2B740 and cp <= 0x2B81F) or # + (cp >= 0x2B820 and cp <= 0x2CEAF) or + (cp >= 0xF900 and cp <= 0xFAFF) or # + (cp >= 0x2F800 and cp <= 0x2FA1F)): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xfffd or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """Tokenizes a piece of text into its word pieces. + + This uses a greedy longest-match-first algorithm to perform tokenization + using the given vocabulary. + + For example: + input = "unaffable" + output = ["un", "##aff", "##able"] + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer`. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens + + +def _is_whitespace(char): + """Checks whether `chars` is a whitespace character.""" + # \t, \n, and \r are technically contorl characters but we treat them + # as whitespace since they are generally considered as such. + if char == " " or char == "\t" or char == "\n" or char == "\r": + return True + cat = unicodedata.category(char) + if cat == "Zs": + return True + return False + + +def _is_control(char): + """Checks whether `chars` is a control character.""" + # These are technically control characters but we count them as whitespace + # characters. + if char == "\t" or char == "\n" or char == "\r": + return False + cat = unicodedata.category(char) + if cat.startswith("C"): + return True + return False + + +def _is_punctuation(char): + """Checks whether `chars` is a punctuation character.""" + cp = ord(char) + # We treat all non-letter/number ASCII as punctuation. + # Characters such as "^", "$", and "`" are not in the Unicode + # Punctuation class but we treat them as punctuation anyways, for + # consistency. + if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or + (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + return True + cat = unicodedata.category(char) + if cat.startswith("P"): + return True + return False diff --git a/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/utils.py b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a208d8e71105487563aaa026d154de2af42124 --- /dev/null +++ b/docker/intel_code/llama13b/Model-References/PyTorch/nlp/bert/utils.py @@ -0,0 +1,85 @@ +# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.distributed as dist + +from pathlib import Path + + +def get_rank(): + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def get_world_size(): + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def is_main_process(): + return get_rank() == 0 + + +def barrier(): + if dist.is_available() and dist.is_initialized(): + dist.barrier() + + +def format_step(step): + if isinstance(step, str): + return step + s = "" + if len(step) > 0: + s += "Training Epoch: {} ".format(step[0]) + if len(step) > 1: + s += "Training Iteration: {} ".format(step[1]) + if len(step) > 2: + s += "Validation Iteration: {} ".format(step[2]) + return s + + +def mkdir(path): + Path(path).mkdir(parents=True, exist_ok=True) + + +def mkdir_by_main_process(path): + if is_main_process(): + mkdir(path) + barrier() + + +def remove_prefix(text, prefix): + if text.startswith(prefix): + return text[len(prefix) :] + return text + + +def repair_checkpoint(model_ckpt): + in_state_dict = model_ckpt + pairings = [ + (src_key, remove_prefix(src_key, "_orig_mod.")) + for src_key in in_state_dict.keys() + ] + if all(src_key == dest_key for src_key, dest_key in pairings): + return in_state_dict # Do not write checkpoint if no need to repair! + out_state_dict = {} + for src_key, dest_key in pairings: + out_state_dict[dest_key] = in_state_dict[src_key] + return out_state_dict