diff --git a/.gitattributes b/.gitattributes index 2c53de45b592821cf2d43c1b1b117fdd4a3215d6..85103d2149302bd68a4f6784dfd094bee3529ec9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -105,3 +105,4 @@ venv/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.8 filte venv/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/pandas/_libs/interval.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/nvidia/cusolver/lib/libcusolver.so.11 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/ckpts/universal/global_step20/zero/5.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/5.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..a8c1ebcebd483432acf6f292619a18f234fcca6d --- /dev/null +++ b/ckpts/universal/global_step20/zero/5.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2397224c299cfe30b815e8791a06b585fa77846bccf71949caf88ed36d8d0db2 +size 33555627 diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/config.yaml b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..46cf6f00b3bd7716effe94e46d60c979920d0178 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.40.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1715705121 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.40.2 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/output.log b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..9bb649187a5bcbc862912a5f73e8ab5eec3da59d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/output.log @@ -0,0 +1,33 @@ + +2024-05-14:16:45:22,362 INFO [__main__.py:251] Verbosity set to INFO +2024-05-14:16:45:26,887 INFO [__main__.py:335] Selected Tasks: ['indiccopa-hi'] +2024-05-14:16:45:26,888 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-14:16:45:26,888 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/data/cronscript/ckpts//hf_ckpt//global_step120'} +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/data/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/data/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/data/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/data/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/data/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 631, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 686, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 369, in cached_file + raise EnvironmentError( +OSError: /data/cronscript/ckpts//hf_ckpt//global_step120 does not appear to have a file named config.json. Checkout 'https://huggingface.co//data/cronscript/ckpts//hf_ckpt//global_step120/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63edd781bd5bdbb7f67523ac1ba9f0f1ed392dc --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/requirements.txt @@ -0,0 +1,163 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.3 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.2 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.4 +aiosignal==1.3.1 +antlr4-python3-runtime==4.9.3 +anyio==4.3.0 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +distro==1.9.0 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.0 +expecttest==0.2.1 +filelock==3.13.4 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.62.1 +h11==0.14.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +httpcore==1.0.5 +httpx==0.27.0 +huggingface-hub==0.23.0 +identify==2.5.35 +idna==3.7 +importlib_resources==6.4.0 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.3.0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +omegaconf==2.3.0 +openai==1.29.0 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.10.0 +perfetto==0.7.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.0 +pluggy==1.4.0 +portalocker==2.8.2 +pre-commit==3.3.3 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.0.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycountry==23.12.11 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.1.1 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.2 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==1.5.0 +safetensors==0.4.3 +scikit-learn==1.4.2 +scipy==1.13.0 +sentencepiece==0.2.0 +sentry-sdk==2.1.1 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sniffio==1.3.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.3.2 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.2 +transformers==4.40.2 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.25.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..38640871a5cd51f6b45cc128c065c6a9562ff922 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-metadata.json @@ -0,0 +1,810 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-14T16:45:22.227731", + "startedAt": "2024-05-14T16:45:21.795024", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120", + "--tasks", + "indiccopa-hi", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/data/cronscript/lm-evaluation-harness", + "host": "vizzhy-150-3", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 76, + "cpu_count_logical": 152, + "cpu_freq": { + "current": 3391.4959078947372, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3207.443, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.997, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3299.991, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 866.4415092468262, + "used": 863.4299850463867 + } + }, + "memory": { + "total": 1007.5000267028809 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..e682bae6b5eaeba8295fd0fffdc51474a259249e --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 5}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..619745b1bcd3c9950bca92e56da9eb2308596630 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/logs/debug-internal.log @@ -0,0 +1,179 @@ +2024-05-14 16:45:21,807 INFO StreamThr :137218 [internal.py:wandb_internal():85] W&B internal server running at pid: 137218, started at: 2024-05-14 16:45:21.806957 +2024-05-14 16:45:21,809 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: status +2024-05-14 16:45:21,810 INFO WriterThread:137218 [datastore.py:open_for_write():87] open: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/run-zso8ava4.wandb +2024-05-14 16:45:21,811 DEBUG SenderThread:137218 [sender.py:send():378] send: header +2024-05-14 16:45:21,822 DEBUG SenderThread:137218 [sender.py:send():378] send: run +2024-05-14 16:45:22,045 INFO SenderThread:137218 [dir_watcher.py:__init__():211] watching files in: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files +2024-05-14 16:45:22,045 INFO SenderThread:137218 [sender.py:_start_run_threads():1123] run started: zso8ava4 with start time 1715705121.807016 +2024-05-14 16:45:22,053 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: check_version +2024-05-14 16:45:22,053 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: check_version +2024-05-14 16:45:22,142 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: run_start +2024-05-14 16:45:22,144 DEBUG HandlerThread:137218 [system_info.py:__init__():26] System info init +2024-05-14 16:45:22,144 DEBUG HandlerThread:137218 [system_info.py:__init__():41] System info init done +2024-05-14 16:45:22,144 INFO HandlerThread:137218 [system_monitor.py:start():194] Starting system monitor +2024-05-14 16:45:22,144 INFO SystemMonitor:137218 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-14 16:45:22,144 INFO HandlerThread:137218 [system_monitor.py:probe():214] Collecting system info +2024-05-14 16:45:22,144 INFO SystemMonitor:137218 [interfaces.py:start():188] Started cpu monitoring +2024-05-14 16:45:22,145 INFO SystemMonitor:137218 [interfaces.py:start():188] Started disk monitoring +2024-05-14 16:45:22,145 INFO SystemMonitor:137218 [interfaces.py:start():188] Started memory monitoring +2024-05-14 16:45:22,145 INFO SystemMonitor:137218 [interfaces.py:start():188] Started network monitoring +2024-05-14 16:45:22,227 DEBUG HandlerThread:137218 [system_info.py:probe():150] Probing system +2024-05-14 16:45:22,236 DEBUG HandlerThread:137218 [system_info.py:_probe_git():135] Probing git +2024-05-14 16:45:22,256 ERROR HandlerThread:137218 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/data/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /data/cronscript/lm-evaluation-harness' +2024-05-14 16:45:22,256 DEBUG HandlerThread:137218 [system_info.py:_probe_git():143] Probing git done +2024-05-14 16:45:22,256 DEBUG HandlerThread:137218 [system_info.py:probe():198] Probing system done +2024-05-14 16:45:22,256 DEBUG HandlerThread:137218 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-14T16:45:22.227731', 'startedAt': '2024-05-14T16:45:21.795024', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/data/cronscript/ckpts//hf_ckpt//global_step120', '--tasks', 'indiccopa-hi', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/data/cronscript/lm-evaluation-harness', 'host': 'vizzhy-150-3', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 76, 'cpu_count_logical': 152, 'cpu_freq': {'current': 3391.4959078947372, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3207.443, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3299.991, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 866.4415092468262, 'used': 863.4299850463867}}, 'memory': {'total': 1007.5000267028809}} +2024-05-14 16:45:22,257 INFO HandlerThread:137218 [system_monitor.py:probe():224] Finished collecting system info +2024-05-14 16:45:22,257 INFO HandlerThread:137218 [system_monitor.py:probe():227] Publishing system info +2024-05-14 16:45:22,258 INFO HandlerThread:137218 [system_monitor.py:probe():229] Finished publishing system info +2024-05-14 16:45:22,261 DEBUG SenderThread:137218 [sender.py:send():378] send: files +2024-05-14 16:45:22,261 INFO SenderThread:137218 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-14 16:45:22,359 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: python_packages +2024-05-14 16:45:22,359 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: stop_status +2024-05-14 16:45:22,360 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: python_packages +2024-05-14 16:45:22,360 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: stop_status +2024-05-14 16:45:22,576 DEBUG SenderThread:137218 [sender.py:send():378] send: telemetry +2024-05-14 16:45:22,768 INFO wandb-upload_0:137218 [upload_job.py:push():130] Uploaded file /tmp/tmpoa15v0cxwandb/95z33ekx-wandb-metadata.json +2024-05-14 16:45:23,047 INFO Thread-12 :137218 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/requirements.txt +2024-05-14 16:45:23,047 INFO Thread-12 :137218 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-metadata.json +2024-05-14 16:45:23,047 INFO Thread-12 :137218 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/output.log +2024-05-14 16:45:25,047 INFO Thread-12 :137218 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/output.log +2024-05-14 16:45:26,888 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:45:27,989 DEBUG SenderThread:137218 [sender.py:send():378] send: exit +2024-05-14 16:45:27,989 INFO SenderThread:137218 [sender.py:send_exit():585] handling exit code: 1 +2024-05-14 16:45:27,990 INFO SenderThread:137218 [sender.py:send_exit():587] handling runtime: 5 +2024-05-14 16:45:27,990 INFO SenderThread:137218 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:45:27,991 INFO SenderThread:137218 [sender.py:send_exit():593] send defer +2024-05-14 16:45:27,991 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:27,991 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-14 16:45:27,991 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:27,991 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-14 16:45:27,991 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 1 +2024-05-14 16:45:27,991 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:27,991 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-14 16:45:27,991 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:27,991 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-14 16:45:27,991 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 2 +2024-05-14 16:45:27,991 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:27,991 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-14 16:45:27,991 INFO HandlerThread:137218 [system_monitor.py:finish():203] Stopping system monitor +2024-05-14 16:45:27,991 DEBUG SystemMonitor:137218 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-14 16:45:27,992 INFO HandlerThread:137218 [interfaces.py:finish():200] Joined cpu monitor +2024-05-14 16:45:27,992 DEBUG SystemMonitor:137218 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-14 16:45:27,992 INFO HandlerThread:137218 [interfaces.py:finish():200] Joined disk monitor +2024-05-14 16:45:27,993 DEBUG SystemMonitor:137218 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-14 16:45:27,993 INFO HandlerThread:137218 [interfaces.py:finish():200] Joined memory monitor +2024-05-14 16:45:27,994 INFO HandlerThread:137218 [interfaces.py:finish():200] Joined network monitor +2024-05-14 16:45:27,994 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:27,994 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-14 16:45:27,994 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 3 +2024-05-14 16:45:27,994 DEBUG SenderThread:137218 [sender.py:send():378] send: stats +2024-05-14 16:45:27,994 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:27,995 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-14 16:45:27,995 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:27,995 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-14 16:45:27,995 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 4 +2024-05-14 16:45:27,995 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:27,995 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-14 16:45:27,995 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:27,995 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-14 16:45:27,995 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 5 +2024-05-14 16:45:27,995 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:27,995 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-14 16:45:27,996 DEBUG SenderThread:137218 [sender.py:send():378] send: summary +2024-05-14 16:45:27,996 INFO SenderThread:137218 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-14 16:45:27,996 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:27,996 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-14 16:45:27,996 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 6 +2024-05-14 16:45:27,996 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:27,996 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-14 16:45:27,997 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:27,997 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-14 16:45:27,999 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: status_report +2024-05-14 16:45:28,049 INFO Thread-12 :137218 [dir_watcher.py:_on_file_created():271] file/dir created: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-summary.json +2024-05-14 16:45:28,068 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 7 +2024-05-14 16:45:28,068 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,068 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-14 16:45:28,068 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,068 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-14 16:45:28,588 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 8 +2024-05-14 16:45:28,588 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,588 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-14 16:45:28,589 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,589 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-14 16:45:28,589 INFO SenderThread:137218 [job_builder.py:build():432] Attempting to build job artifact +2024-05-14 16:45:28,589 INFO SenderThread:137218 [job_builder.py:_get_source_type():576] no source found +2024-05-14 16:45:28,589 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 9 +2024-05-14 16:45:28,589 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:28,589 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-14 16:45:28,589 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:28,589 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-14 16:45:28,589 INFO SenderThread:137218 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-14 16:45:28,989 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:29,049 INFO SenderThread:137218 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/config.yaml +2024-05-14 16:45:29,049 INFO SenderThread:137218 [dir_watcher.py:_on_file_modified():288] file/dir modified: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/output.log +2024-05-14 16:45:29,050 INFO SenderThread:137218 [dir_watcher.py:finish():388] scan: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files +2024-05-14 16:45:29,050 INFO SenderThread:137218 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-metadata.json wandb-metadata.json +2024-05-14 16:45:29,050 INFO SenderThread:137218 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/config.yaml config.yaml +2024-05-14 16:45:29,050 INFO SenderThread:137218 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/output.log output.log +2024-05-14 16:45:29,050 INFO SenderThread:137218 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/requirements.txt requirements.txt +2024-05-14 16:45:29,050 INFO SenderThread:137218 [dir_watcher.py:finish():402] scan save: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-summary.json wandb-summary.json +2024-05-14 16:45:29,050 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 10 +2024-05-14 16:45:29,050 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:29,053 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:29,054 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-14 16:45:29,056 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:29,056 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-14 16:45:29,056 INFO SenderThread:137218 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:45:29,308 INFO wandb-upload_2:137218 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/requirements.txt +2024-05-14 16:45:29,458 INFO wandb-upload_1:137218 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/config.yaml +2024-05-14 16:45:29,534 INFO wandb-upload_0:137218 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/output.log +2024-05-14 16:45:29,549 INFO wandb-upload_3:137218 [upload_job.py:push():130] Uploaded file /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/files/wandb-summary.json +2024-05-14 16:45:29,750 INFO Thread-11 (_thread_body):137218 [sender.py:transition_state():613] send defer: 11 +2024-05-14 16:45:29,750 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:29,750 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-14 16:45:29,750 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:29,750 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-14 16:45:29,750 INFO SenderThread:137218 [file_pusher.py:join():175] waiting for file pusher +2024-05-14 16:45:29,751 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 12 +2024-05-14 16:45:29,751 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:29,751 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-14 16:45:29,751 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:29,751 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-14 16:45:29,751 INFO SenderThread:137218 [file_stream.py:finish():601] file stream finish called +2024-05-14 16:45:29,958 INFO SenderThread:137218 [file_stream.py:finish():605] file stream finish is done +2024-05-14 16:45:29,958 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 13 +2024-05-14 16:45:29,958 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:29,958 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-14 16:45:29,958 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:29,958 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-14 16:45:29,958 INFO SenderThread:137218 [sender.py:transition_state():613] send defer: 14 +2024-05-14 16:45:29,958 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: defer +2024-05-14 16:45:29,959 INFO HandlerThread:137218 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-14 16:45:29,959 DEBUG SenderThread:137218 [sender.py:send():378] send: final +2024-05-14 16:45:29,959 DEBUG SenderThread:137218 [sender.py:send():378] send: footer +2024-05-14 16:45:29,959 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: defer +2024-05-14 16:45:29,959 INFO SenderThread:137218 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-14 16:45:29,959 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:29,959 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:29,960 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-14 16:45:29,960 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: poll_exit +2024-05-14 16:45:29,960 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: server_info +2024-05-14 16:45:29,960 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: get_summary +2024-05-14 16:45:29,960 DEBUG SenderThread:137218 [sender.py:send_request():405] send_request: server_info +2024-05-14 16:45:29,961 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-14 16:45:29,961 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-14 16:45:30,025 INFO MainThread:137218 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-14 16:45:30,025 INFO MainThread:137218 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-14 16:45:30,025 INFO MainThread:137218 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-14 16:45:30,026 DEBUG HandlerThread:137218 [handler.py:handle_request():158] handle_request: shutdown +2024-05-14 16:45:30,026 INFO HandlerThread:137218 [handler.py:finish():882] shutting down handler +2024-05-14 16:45:30,960 INFO WriterThread:137218 [datastore.py:close():296] close: /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/run-zso8ava4.wandb +2024-05-14 16:45:31,025 INFO SenderThread:137218 [sender.py:finish():1545] shutting down sender +2024-05-14 16:45:31,025 INFO SenderThread:137218 [file_pusher.py:finish():169] shutting down file pusher +2024-05-14 16:45:31,025 INFO SenderThread:137218 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/logs/debug.log b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..329a4682c8d7365055509e0dd3dbfef292e0cb5d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_setup.py:_flush():76] Configure stats pid to 136019 +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_setup.py:_flush():76] Loading settings from /data/cronscript/lm-evaluation-harness/wandb/settings +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-14 16:45:21,804 WARNING MainThread:136019 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_init.py:_log_setup():520] Logging user logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/logs/debug.log +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_init.py:_log_setup():521] Logging internal logs to /data/cronscript/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/logs/debug-internal.log +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_init.py:init():560] calling init triggers +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_init.py:init():610] starting backend +2024-05-14 16:45:21,804 INFO MainThread:136019 [wandb_init.py:init():614] setting up manager +2024-05-14 16:45:21,806 INFO MainThread:136019 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-14 16:45:21,806 INFO MainThread:136019 [wandb_init.py:init():622] backend started and connected +2024-05-14 16:45:21,810 INFO MainThread:136019 [wandb_init.py:init():711] updated telemetry +2024-05-14 16:45:21,821 INFO MainThread:136019 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-14 16:45:22,052 INFO MainThread:136019 [wandb_run.py:_on_init():2396] communicating current version +2024-05-14 16:45:22,137 INFO MainThread:136019 [wandb_run.py:_on_init():2405] got version response +2024-05-14 16:45:22,138 INFO MainThread:136019 [wandb_init.py:init():795] starting run threads in backend +2024-05-14 16:45:22,359 INFO MainThread:136019 [wandb_run.py:_console_start():2374] atexit reg +2024-05-14 16:45:22,359 INFO MainThread:136019 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-14 16:45:22,359 INFO MainThread:136019 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-14 16:45:22,359 INFO MainThread:136019 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-14 16:45:22,360 INFO MainThread:136019 [wandb_init.py:init():838] run started, returning control to user process +2024-05-14 16:45:31,027 WARNING MsgRouterThr:136019 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/run-zso8ava4.wandb b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/run-zso8ava4.wandb new file mode 100644 index 0000000000000000000000000000000000000000..3299c0598b80ac390e7e1fc03e765b39539cb259 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240514_164521-zso8ava4/run-zso8ava4.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b03f20c0f082f03b6b1109af46104512f35a7e4d --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716467982 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/output.log b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..f34f110bc9911b9cb90a4522287857becac668f0 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:12:39:43,445 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:12:39:51,992 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:12:39:51,993 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:12:39:51,993 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000'} +2024-05-23:12:39:54,299 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..0a9b081337f61c7f88e576e5ffb6e7f96bc69f28 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T12:39:43.237790", + "startedAt": "2024-05-23T12:39:42.686396", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2327.3845312500002, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.001, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 209.58247756958008 + } + }, + "memory": { + "total": 1007.4379425048828 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..48beeca536f817a3c2679e1e7f0444563180f0e3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/logs/debug-internal.log @@ -0,0 +1,182 @@ +2024-05-23 12:39:42,709 INFO StreamThr :4638 [internal.py:wandb_internal():85] W&B internal server running at pid: 4638, started at: 2024-05-23 12:39:42.705544 +2024-05-23 12:39:42,712 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: status +2024-05-23 12:39:42,713 INFO WriterThread:4638 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/run-6rczgocn.wandb +2024-05-23 12:39:42,718 DEBUG SenderThread:4638 [sender.py:send():378] send: header +2024-05-23 12:39:42,718 DEBUG SenderThread:4638 [sender.py:send():378] send: run +2024-05-23 12:39:42,969 INFO SenderThread:4638 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files +2024-05-23 12:39:42,969 INFO SenderThread:4638 [sender.py:_start_run_threads():1123] run started: 6rczgocn with start time 1716467982.705404 +2024-05-23 12:39:42,973 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 12:39:42,973 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: check_version +2024-05-23 12:39:43,142 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 12:39:43,145 DEBUG HandlerThread:4638 [system_info.py:__init__():26] System info init +2024-05-23 12:39:43,145 DEBUG HandlerThread:4638 [system_info.py:__init__():41] System info init done +2024-05-23 12:39:43,145 INFO HandlerThread:4638 [system_monitor.py:start():194] Starting system monitor +2024-05-23 12:39:43,145 INFO SystemMonitor:4638 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 12:39:43,145 INFO HandlerThread:4638 [system_monitor.py:probe():214] Collecting system info +2024-05-23 12:39:43,152 INFO SystemMonitor:4638 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 12:39:43,152 INFO SystemMonitor:4638 [interfaces.py:start():188] Started disk monitoring +2024-05-23 12:39:43,158 INFO SystemMonitor:4638 [interfaces.py:start():188] Started memory monitoring +2024-05-23 12:39:43,158 INFO SystemMonitor:4638 [interfaces.py:start():188] Started network monitoring +2024-05-23 12:39:43,237 DEBUG HandlerThread:4638 [system_info.py:probe():150] Probing system +2024-05-23 12:39:43,241 DEBUG HandlerThread:4638 [system_info.py:_probe_git():135] Probing git +2024-05-23 12:39:43,251 ERROR HandlerThread:4638 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 12:39:43,251 DEBUG HandlerThread:4638 [system_info.py:_probe_git():143] Probing git done +2024-05-23 12:39:43,251 DEBUG HandlerThread:4638 [system_info.py:probe():198] Probing system done +2024-05-23 12:39:43,251 DEBUG HandlerThread:4638 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T12:39:43.237790', 'startedAt': '2024-05-23T12:39:42.686396', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step8000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.3845312500002, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.001, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 209.58247756958008}}, 'memory': {'total': 1007.4379425048828}} +2024-05-23 12:39:43,251 INFO HandlerThread:4638 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 12:39:43,251 INFO HandlerThread:4638 [system_monitor.py:probe():227] Publishing system info +2024-05-23 12:39:43,254 INFO HandlerThread:4638 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 12:39:43,259 DEBUG SenderThread:4638 [sender.py:send():378] send: files +2024-05-23 12:39:43,259 INFO SenderThread:4638 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 12:39:43,439 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 12:39:43,439 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: python_packages +2024-05-23 12:39:43,440 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 12:39:43,441 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: stop_status +2024-05-23 12:39:43,578 DEBUG SenderThread:4638 [sender.py:send():378] send: telemetry +2024-05-23 12:39:43,899 INFO wandb-upload_0:4638 [upload_job.py:push():130] Uploaded file /tmp/tmp7t4jpacywandb/xseht91z-wandb-metadata.json +2024-05-23 12:39:43,972 INFO Thread-12 :4638 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-metadata.json +2024-05-23 12:39:43,972 INFO Thread-12 :4638 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/output.log +2024-05-23 12:39:43,972 INFO Thread-12 :4638 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/requirements.txt +2024-05-23 12:39:45,971 INFO Thread-12 :4638 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/output.log +2024-05-23 12:39:48,580 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:39:53,980 INFO Thread-12 :4638 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/output.log +2024-05-23 12:39:53,994 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:39:54,311 DEBUG SenderThread:4638 [sender.py:send():378] send: exit +2024-05-23 12:39:54,311 INFO SenderThread:4638 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 12:39:54,311 INFO SenderThread:4638 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 12:39:54,312 INFO SenderThread:4638 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:39:54,312 INFO SenderThread:4638 [sender.py:send_exit():593] send defer +2024-05-23 12:39:54,313 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:54,313 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 12:39:54,313 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:54,313 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 12:39:54,313 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 1 +2024-05-23 12:39:54,313 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:54,313 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 12:39:54,313 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:54,313 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 12:39:54,313 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 2 +2024-05-23 12:39:54,313 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:54,313 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 12:39:54,313 INFO HandlerThread:4638 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 12:39:54,313 DEBUG SystemMonitor:4638 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 12:39:54,313 DEBUG SystemMonitor:4638 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 12:39:54,314 DEBUG SystemMonitor:4638 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 12:39:54,314 INFO HandlerThread:4638 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 12:39:54,314 INFO HandlerThread:4638 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 12:39:54,314 INFO HandlerThread:4638 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 12:39:54,314 INFO HandlerThread:4638 [interfaces.py:finish():200] Joined network monitor +2024-05-23 12:39:54,315 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:54,315 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 12:39:54,315 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 3 +2024-05-23 12:39:54,315 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:54,315 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 12:39:54,315 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:54,315 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 12:39:54,315 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 4 +2024-05-23 12:39:54,315 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:54,315 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 12:39:54,315 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:54,315 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 12:39:54,315 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 5 +2024-05-23 12:39:54,315 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:54,315 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 12:39:54,316 DEBUG SenderThread:4638 [sender.py:send():378] send: summary +2024-05-23 12:39:54,316 INFO SenderThread:4638 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 12:39:54,317 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:54,317 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 12:39:54,317 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 6 +2024-05-23 12:39:54,317 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:54,317 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 12:39:54,317 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:54,317 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 12:39:54,322 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 12:39:54,398 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 7 +2024-05-23 12:39:54,398 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:54,398 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 12:39:54,398 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:54,399 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 12:39:54,981 INFO Thread-12 :4638 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/config.yaml +2024-05-23 12:39:54,982 INFO Thread-12 :4638 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-summary.json +2024-05-23 12:39:55,311 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:39:55,604 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 8 +2024-05-23 12:39:55,604 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:39:55,604 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:55,604 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 12:39:55,604 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:55,604 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 12:39:55,604 INFO SenderThread:4638 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 12:39:55,605 INFO SenderThread:4638 [job_builder.py:_get_source_type():576] no source found +2024-05-23 12:39:55,605 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 9 +2024-05-23 12:39:55,605 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:55,605 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 12:39:55,605 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:55,605 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 12:39:55,605 INFO SenderThread:4638 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 12:39:55,983 INFO SenderThread:4638 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/output.log +2024-05-23 12:39:55,983 INFO SenderThread:4638 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files +2024-05-23 12:39:55,983 INFO SenderThread:4638 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/requirements.txt requirements.txt +2024-05-23 12:39:55,984 INFO SenderThread:4638 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-metadata.json wandb-metadata.json +2024-05-23 12:39:55,986 INFO SenderThread:4638 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/config.yaml config.yaml +2024-05-23 12:39:55,986 INFO SenderThread:4638 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-summary.json wandb-summary.json +2024-05-23 12:39:55,986 INFO SenderThread:4638 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/output.log output.log +2024-05-23 12:39:55,986 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 10 +2024-05-23 12:39:55,987 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:55,987 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 12:39:55,987 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:55,987 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 12:39:55,987 INFO SenderThread:4638 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:39:56,235 INFO wandb-upload_0:4638 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/requirements.txt +2024-05-23 12:39:56,311 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:39:56,311 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:39:56,512 INFO wandb-upload_1:4638 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/config.yaml +2024-05-23 12:39:56,601 INFO wandb-upload_3:4638 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/output.log +2024-05-23 12:39:56,603 INFO wandb-upload_2:4638 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/files/wandb-summary.json +2024-05-23 12:39:56,803 INFO Thread-11 (_thread_body):4638 [sender.py:transition_state():613] send defer: 11 +2024-05-23 12:39:56,804 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:56,804 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 12:39:56,804 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:56,804 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 12:39:56,804 INFO SenderThread:4638 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 12:39:56,804 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 12 +2024-05-23 12:39:56,804 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:56,804 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 12:39:56,804 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:56,805 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 12:39:56,805 INFO SenderThread:4638 [file_stream.py:finish():601] file stream finish called +2024-05-23 12:39:56,864 INFO SenderThread:4638 [file_stream.py:finish():605] file stream finish is done +2024-05-23 12:39:56,864 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 13 +2024-05-23 12:39:56,864 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:56,864 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 12:39:56,865 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:56,865 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 12:39:56,865 INFO SenderThread:4638 [sender.py:transition_state():613] send defer: 14 +2024-05-23 12:39:56,865 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: defer +2024-05-23 12:39:56,865 INFO HandlerThread:4638 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 12:39:56,865 DEBUG SenderThread:4638 [sender.py:send():378] send: final +2024-05-23 12:39:56,865 DEBUG SenderThread:4638 [sender.py:send():378] send: footer +2024-05-23 12:39:56,865 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: defer +2024-05-23 12:39:56,865 INFO SenderThread:4638 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 12:39:56,866 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:39:56,866 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 12:39:56,866 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 12:39:56,866 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 12:39:56,866 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 12:39:56,866 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 12:39:56,866 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:39:56,867 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 12:39:56,867 DEBUG SenderThread:4638 [sender.py:send_request():405] send_request: server_info +2024-05-23 12:39:56,918 INFO MainThread:4638 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 12:39:56,918 INFO MainThread:4638 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 12:39:56,918 INFO MainThread:4638 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 12:39:56,919 DEBUG HandlerThread:4638 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 12:39:56,919 INFO HandlerThread:4638 [handler.py:finish():882] shutting down handler +2024-05-23 12:39:57,866 INFO WriterThread:4638 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/run-6rczgocn.wandb +2024-05-23 12:39:57,918 INFO SenderThread:4638 [sender.py:finish():1545] shutting down sender +2024-05-23 12:39:57,918 INFO SenderThread:4638 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 12:39:57,918 INFO SenderThread:4638 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..6f70ad280afeed4eebd3df5f373e30650a08c07b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 12:39:42,700 INFO MainThread:4483 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 12:39:42,700 INFO MainThread:4483 [wandb_setup.py:_flush():76] Configure stats pid to 4483 +2024-05-23 12:39:42,700 INFO MainThread:4483 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 12:39:42,700 INFO MainThread:4483 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 12:39:42,700 INFO MainThread:4483 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 12:39:42,700 INFO MainThread:4483 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 12:39:42,700 WARNING MainThread:4483 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 12:39:42,700 INFO MainThread:4483 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 12:39:42,700 INFO MainThread:4483 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 12:39:42,701 INFO MainThread:4483 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/logs/debug.log +2024-05-23 12:39:42,701 INFO MainThread:4483 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/logs/debug-internal.log +2024-05-23 12:39:42,701 INFO MainThread:4483 [wandb_init.py:init():560] calling init triggers +2024-05-23 12:39:42,701 INFO MainThread:4483 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 12:39:42,701 INFO MainThread:4483 [wandb_init.py:init():610] starting backend +2024-05-23 12:39:42,701 INFO MainThread:4483 [wandb_init.py:init():614] setting up manager +2024-05-23 12:39:42,704 INFO MainThread:4483 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 12:39:42,705 INFO MainThread:4483 [wandb_init.py:init():622] backend started and connected +2024-05-23 12:39:42,708 INFO MainThread:4483 [wandb_init.py:init():711] updated telemetry +2024-05-23 12:39:42,717 INFO MainThread:4483 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 12:39:42,972 INFO MainThread:4483 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 12:39:43,136 INFO MainThread:4483 [wandb_run.py:_on_init():2405] got version response +2024-05-23 12:39:43,137 INFO MainThread:4483 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 12:39:43,440 INFO MainThread:4483 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 12:39:43,440 INFO MainThread:4483 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 12:39:43,440 INFO MainThread:4483 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 12:39:43,440 INFO MainThread:4483 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 12:39:43,442 INFO MainThread:4483 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 12:39:57,920 WARNING MsgRouterThr:4483 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/run-6rczgocn.wandb b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/run-6rczgocn.wandb new file mode 100644 index 0000000000000000000000000000000000000000..0b0300224b3b22f3c40ded259364dba485bc06e7 Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_123942-6rczgocn/run-6rczgocn.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/config.yaml b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..00bb5e363092e213b0e1b0f92f1ad4e96827c49f --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/config.yaml @@ -0,0 +1,43 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.41.1 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716469733 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 23 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.41.1 + 8: + - 5 + 13: linux-x86_64 diff --git a/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/output.log b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..76b228d76d2021f62c1000e383ec2e68548a5764 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/output.log @@ -0,0 +1,34 @@ + +2024-05-23:13:08:54,324 INFO [__main__.py:251] Verbosity set to INFO +2024-05-23:13:09:02,773 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande'] +2024-05-23:13:09:02,774 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-23:13:09:02,775 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000'} +2024-05-23:13:09:05,069 INFO [huggingface.py:164] Using device 'cuda' +Traceback (most recent call last): + File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main + return _run_code(code, main_globals, None, + File "/usr/lib/python3.10/runpy.py", line 86, in _run_code + exec(code, run_globals) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in + cli_evaluate() + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate + results = evaluator.simple_evaluate( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper + return fn(*args, **kwargs) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate + lm = lm_eval.api.registry.get_model(model).create_from_arg_string( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string + return cls(**args, **args2) + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__ + self._get_config( + File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config + self._config = transformers.AutoConfig.from_pretrained( + File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict + config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict + resolved_config_file = cached_file( + File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file + raise EnvironmentError( +OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000/tree/main' for available files. \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/requirements.txt b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..f675c3016b5332c1acf28f436e0b60adeead9c12 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/requirements.txt @@ -0,0 +1,155 @@ +DataProperty==1.0.1 +GitPython==3.1.43 +Jinja2==3.1.4 +Markdown==3.6 +MarkupSafe==2.1.5 +Pillow-SIMD==7.0.0.post3 +PyYAML==6.0 +Werkzeug==3.0.3 +absl-py==2.1.0 +accelerate==0.30.1 +aiohttp==3.9.5 +aiosignal==1.3.1 +async-timeout==4.0.3 +attrs==23.2.0 +av==9.2.0 +cachetools==5.3.3 +certifi==2024.2.2 +cffi==1.15.1 +cfgv==3.4.0 +chardet==5.2.0 +charset-normalizer==3.3.2 +click==8.1.7 +cmake==3.29.2 +colorama==0.4.6 +datasets==2.19.1 +deepspeed==0.12.4+hpu.synapse.v1.15.1 +dill==0.3.8 +distlib==0.3.8 +docker-pycreds==0.4.0 +einops==0.8.0 +evaluate==0.4.2 +exceptiongroup==1.2.1 +expecttest==0.2.1 +filelock==3.14.0 +frozenlist==1.4.1 +fsspec==2024.3.1 +gitdb==4.0.11 +google-auth-oauthlib==0.4.6 +google-auth==2.29.0 +grpcio==1.63.0 +habana-media-loader==1.15.1.15 +habana-pyhlml==1.15.1.15 +habana-torch-dataloader==1.15.1.15 +habana-torch-plugin==1.15.1.15 +habana_gpu_migration==1.15.1.15 +habana_quantization_toolkit==1.15.1.15 +hjson==3.1.0 +huggingface-hub==0.23.1 +identify==2.5.36 +idna==3.7 +iniconfig==2.0.0 +joblib==1.4.2 +jsonlines==4.0.0 +lightning-habana==1.4.0 +lightning-utilities==0.11.2 +lightning==2.2.0.post0 +lm_eval==0.4.2 +lm_eval==0.4.2 +lm_eval==0.4.2 +lxml==5.2.2 +mbstrdecoder==1.1.3 +more-itertools==10.2.0 +mpi4py==3.1.4 +mpmath==1.3.0 +multidict==6.0.5 +multiprocess==0.70.16 +networkx==3.3 +ninja==1.11.1.1 +nltk==3.8.1 +nodeenv==1.8.0 +numexpr==2.10.0 +numpy==1.23.5 +oauthlib==3.2.2 +packaging==24.0 +pandas==2.0.1 +pathspec==0.12.1 +pathvalidate==3.2.0 +peft==0.11.1 +perfetto==0.7.0 +pillow==10.3.0 +pip==22.0.2 +pip==23.3.1 +platformdirs==4.2.1 +pluggy==1.5.0 +portalocker==2.8.2 +pre-commit==3.3.3 +pretty-errors==1.2.25 +protobuf==3.20.3 +psutil==5.9.8 +py-cpuinfo==9.0.0 +pyarrow-hotfix==0.6 +pyarrow==16.1.0 +pyasn1==0.6.0 +pyasn1_modules==0.4.0 +pybind11==2.10.4 +pycparser==2.22 +pydantic==1.10.13 +pynvml==8.0.4 +pytablewriter==1.2.0 +pytest==8.2.0 +python-dateutil==2.9.0.post0 +pytorch-lightning==2.2.4 +pytz==2024.1 +regex==2023.5.5 +requests-oauthlib==2.0.0 +requests==2.31.0 +rouge_score==0.1.2 +rsa==4.9 +sacrebleu==2.4.2 +safetensors==0.4.3 +scikit-learn==1.5.0 +scipy==1.13.1 +sentencepiece==0.2.0 +sentry-sdk==2.3.0 +setproctitle==1.3.3 +setuptools==59.6.0 +setuptools==69.5.1 +six==1.16.0 +smmap==5.0.1 +sqlitedict==2.1.0 +symengine==0.11.0 +sympy==1.12 +tabledata==1.3.3 +tabulate==0.9.0 +tcolorpy==0.1.6 +tdqm==0.0.1 +tensorboard-data-server==0.6.1 +tensorboard-plugin-wit==1.8.1 +tensorboard==2.11.2 +threadpoolctl==3.5.0 +tokenizers==0.19.1 +tomli==2.0.1 +torch==2.2.0a0+git8964477 +torch_tb_profiler==0.4.0 +torchaudio==2.2.0+08901ad +torchdata==0.7.1+5e6f7b7 +torchmetrics==1.4.0 +torchtext==0.17.0+400da5c +torchvision==0.17.0+b2383d4 +tqdm-multiprocess==0.0.11 +tqdm==4.66.4 +transformers==4.41.1 +typepy==1.3.2 +typing_extensions==4.11.0 +tzdata==2024.1 +urllib3==1.26.18 +virtualenv==20.26.1 +wandb==0.17.0 +wheel==0.37.1 +wheel==0.43.0 +word2number==1.1 +xxhash==3.4.1 +yamllint==1.35.1 +yarl==1.9.4 +zstandard==0.22.0 \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-metadata.json b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..c1d77d47b2a6e051d564b3d64b58381e29eeeebb --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-metadata.json @@ -0,0 +1,850 @@ +{ + "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35", + "python": "3.10.12", + "heartbeatAt": "2024-05-23T13:08:54.117031", + "startedAt": "2024-05-23T13:08:53.634590", + "docker": null, + "cuda": null, + "args": [ + "--model", + "hf", + "--model_args", + "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000", + "--tasks", + "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc", + "--batch_size", + "auto", + "--wandb_args", + "project=bharatgpt,group=trial_expt_2" + ], + "state": "running", + "program": "-m lm_eval.__main__", + "codePathLocal": null, + "git": { + "remote": "https://github.com/EleutherAI/lm-evaluation-harness", + "commit": null + }, + "email": null, + "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness", + "host": "peacock-evaluation-worker-0", + "username": "root", + "executable": "/usr/bin/python3", + "cpu_count": 80, + "cpu_count_logical": 160, + "cpu_freq": { + "current": 2334.010675, + "min": 800.0, + "max": 3400.0 + }, + "cpu_freq_per_core": [ + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 3400.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + }, + { + "current": 2300.0, + "min": 800.0, + "max": 3400.0 + } + ], + "disk": { + "/": { + "total": 877.6341285705566, + "used": 211.62891006469727 + } + }, + "memory": { + "total": 1007.4379539489746 + } +} diff --git a/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-summary.json b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..8bf99d152ad35c3699ec8600ecb8b169d4e35875 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-summary.json @@ -0,0 +1 @@ +{"_wandb": {"runtime": 11}} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/logs/debug-internal.log b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..26c2b80934dc18f4827988a80c2e867179084228 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/logs/debug-internal.log @@ -0,0 +1,183 @@ +2024-05-23 13:08:53,654 INFO StreamThr :3128 [internal.py:wandb_internal():85] W&B internal server running at pid: 3128, started at: 2024-05-23 13:08:53.653043 +2024-05-23 13:08:53,659 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: status +2024-05-23 13:08:53,661 INFO WriterThread:3128 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/run-oqhkxkfg.wandb +2024-05-23 13:08:53,662 DEBUG SenderThread:3128 [sender.py:send():378] send: header +2024-05-23 13:08:53,665 DEBUG SenderThread:3128 [sender.py:send():378] send: run +2024-05-23 13:08:53,914 INFO SenderThread:3128 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files +2024-05-23 13:08:53,914 INFO SenderThread:3128 [sender.py:_start_run_threads():1123] run started: oqhkxkfg with start time 1716469733.65371 +2024-05-23 13:08:53,923 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: check_version +2024-05-23 13:08:53,923 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: check_version +2024-05-23 13:08:54,041 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: run_start +2024-05-23 13:08:54,043 DEBUG HandlerThread:3128 [system_info.py:__init__():26] System info init +2024-05-23 13:08:54,043 DEBUG HandlerThread:3128 [system_info.py:__init__():41] System info init done +2024-05-23 13:08:54,043 INFO HandlerThread:3128 [system_monitor.py:start():194] Starting system monitor +2024-05-23 13:08:54,044 INFO SystemMonitor:3128 [system_monitor.py:_start():158] Starting system asset monitoring threads +2024-05-23 13:08:54,044 INFO HandlerThread:3128 [system_monitor.py:probe():214] Collecting system info +2024-05-23 13:08:54,050 INFO SystemMonitor:3128 [interfaces.py:start():188] Started cpu monitoring +2024-05-23 13:08:54,051 INFO SystemMonitor:3128 [interfaces.py:start():188] Started disk monitoring +2024-05-23 13:08:54,052 INFO SystemMonitor:3128 [interfaces.py:start():188] Started memory monitoring +2024-05-23 13:08:54,053 INFO SystemMonitor:3128 [interfaces.py:start():188] Started network monitoring +2024-05-23 13:08:54,116 DEBUG HandlerThread:3128 [system_info.py:probe():150] Probing system +2024-05-23 13:08:54,120 DEBUG HandlerThread:3128 [system_info.py:_probe_git():135] Probing git +2024-05-23 13:08:54,130 ERROR HandlerThread:3128 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128) + cmdline: git rev-parse --show-toplevel + stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +To add an exception for this directory, call: + + git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness' +2024-05-23 13:08:54,130 DEBUG HandlerThread:3128 [system_info.py:_probe_git():143] Probing git done +2024-05-23 13:08:54,130 DEBUG HandlerThread:3128 [system_info.py:probe():198] Probing system done +2024-05-23 13:08:54,130 DEBUG HandlerThread:3128 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T13:08:54.117031', 'startedAt': '2024-05-23T13:08:53.634590', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step24000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2334.010675, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 211.62891006469727}}, 'memory': {'total': 1007.4379539489746}} +2024-05-23 13:08:54,130 INFO HandlerThread:3128 [system_monitor.py:probe():224] Finished collecting system info +2024-05-23 13:08:54,130 INFO HandlerThread:3128 [system_monitor.py:probe():227] Publishing system info +2024-05-23 13:08:54,133 INFO HandlerThread:3128 [system_monitor.py:probe():229] Finished publishing system info +2024-05-23 13:08:54,138 DEBUG SenderThread:3128 [sender.py:send():378] send: files +2024-05-23 13:08:54,138 INFO SenderThread:3128 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now +2024-05-23 13:08:54,318 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: python_packages +2024-05-23 13:08:54,318 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: python_packages +2024-05-23 13:08:54,320 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: stop_status +2024-05-23 13:08:54,320 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: stop_status +2024-05-23 13:08:54,435 DEBUG SenderThread:3128 [sender.py:send():378] send: telemetry +2024-05-23 13:08:54,713 INFO wandb-upload_0:3128 [upload_job.py:push():130] Uploaded file /tmp/tmp9i3pjivqwandb/a7dtr6g3-wandb-metadata.json +2024-05-23 13:08:54,916 INFO Thread-12 :3128 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/requirements.txt +2024-05-23 13:08:54,916 INFO Thread-12 :3128 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-metadata.json +2024-05-23 13:08:54,916 INFO Thread-12 :3128 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/output.log +2024-05-23 13:08:56,916 INFO Thread-12 :3128 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/output.log +2024-05-23 13:08:59,437 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:09:04,778 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:09:04,923 INFO Thread-12 :3128 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/output.log +2024-05-23 13:09:05,078 DEBUG SenderThread:3128 [sender.py:send():378] send: exit +2024-05-23 13:09:05,079 INFO SenderThread:3128 [sender.py:send_exit():585] handling exit code: 1 +2024-05-23 13:09:05,079 INFO SenderThread:3128 [sender.py:send_exit():587] handling runtime: 11 +2024-05-23 13:09:05,080 INFO SenderThread:3128 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:09:05,080 INFO SenderThread:3128 [sender.py:send_exit():593] send defer +2024-05-23 13:09:05,081 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:05,081 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 0 +2024-05-23 13:09:05,081 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:05,081 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 0 +2024-05-23 13:09:05,081 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 1 +2024-05-23 13:09:05,081 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:05,081 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 1 +2024-05-23 13:09:05,081 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:05,081 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 1 +2024-05-23 13:09:05,081 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 2 +2024-05-23 13:09:05,081 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:05,081 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 2 +2024-05-23 13:09:05,081 INFO HandlerThread:3128 [system_monitor.py:finish():203] Stopping system monitor +2024-05-23 13:09:05,081 DEBUG SystemMonitor:3128 [system_monitor.py:_start():172] Starting system metrics aggregation loop +2024-05-23 13:09:05,081 DEBUG SystemMonitor:3128 [system_monitor.py:_start():179] Finished system metrics aggregation loop +2024-05-23 13:09:05,082 DEBUG SystemMonitor:3128 [system_monitor.py:_start():183] Publishing last batch of metrics +2024-05-23 13:09:05,082 INFO HandlerThread:3128 [interfaces.py:finish():200] Joined cpu monitor +2024-05-23 13:09:05,083 INFO HandlerThread:3128 [interfaces.py:finish():200] Joined disk monitor +2024-05-23 13:09:05,083 INFO HandlerThread:3128 [interfaces.py:finish():200] Joined memory monitor +2024-05-23 13:09:05,084 INFO HandlerThread:3128 [interfaces.py:finish():200] Joined network monitor +2024-05-23 13:09:05,084 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:05,084 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 2 +2024-05-23 13:09:05,084 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 3 +2024-05-23 13:09:05,084 DEBUG SenderThread:3128 [sender.py:send():378] send: stats +2024-05-23 13:09:05,085 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:05,085 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 3 +2024-05-23 13:09:05,085 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:05,086 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 3 +2024-05-23 13:09:05,086 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 4 +2024-05-23 13:09:05,086 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:05,086 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 4 +2024-05-23 13:09:05,086 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:05,086 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 4 +2024-05-23 13:09:05,086 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 5 +2024-05-23 13:09:05,086 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:05,086 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 5 +2024-05-23 13:09:05,086 DEBUG SenderThread:3128 [sender.py:send():378] send: summary +2024-05-23 13:09:05,087 INFO SenderThread:3128 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end +2024-05-23 13:09:05,087 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:05,087 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 5 +2024-05-23 13:09:05,087 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 6 +2024-05-23 13:09:05,087 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:05,087 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 6 +2024-05-23 13:09:05,087 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:05,087 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 6 +2024-05-23 13:09:05,092 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: status_report +2024-05-23 13:09:05,181 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 7 +2024-05-23 13:09:05,181 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:05,181 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 7 +2024-05-23 13:09:05,182 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:05,182 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 7 +2024-05-23 13:09:05,924 INFO Thread-12 :3128 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/config.yaml +2024-05-23 13:09:05,925 INFO Thread-12 :3128 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-summary.json +2024-05-23 13:09:06,079 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:09:06,458 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 8 +2024-05-23 13:09:06,458 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:09:06,458 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:06,458 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 8 +2024-05-23 13:09:06,458 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:06,458 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 8 +2024-05-23 13:09:06,458 INFO SenderThread:3128 [job_builder.py:build():432] Attempting to build job artifact +2024-05-23 13:09:06,459 INFO SenderThread:3128 [job_builder.py:_get_source_type():576] no source found +2024-05-23 13:09:06,459 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 9 +2024-05-23 13:09:06,459 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:06,459 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 9 +2024-05-23 13:09:06,459 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:06,459 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 9 +2024-05-23 13:09:06,459 INFO SenderThread:3128 [dir_watcher.py:finish():358] shutting down directory watcher +2024-05-23 13:09:06,926 INFO SenderThread:3128 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/output.log +2024-05-23 13:09:06,926 INFO SenderThread:3128 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files +2024-05-23 13:09:06,926 INFO SenderThread:3128 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/output.log output.log +2024-05-23 13:09:06,927 INFO SenderThread:3128 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-summary.json wandb-summary.json +2024-05-23 13:09:06,929 INFO SenderThread:3128 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/config.yaml config.yaml +2024-05-23 13:09:06,929 INFO SenderThread:3128 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-metadata.json wandb-metadata.json +2024-05-23 13:09:06,929 INFO SenderThread:3128 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/requirements.txt requirements.txt +2024-05-23 13:09:06,929 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 10 +2024-05-23 13:09:06,929 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:06,930 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 10 +2024-05-23 13:09:06,930 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:06,930 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 10 +2024-05-23 13:09:06,930 INFO SenderThread:3128 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:09:07,079 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:09:07,079 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:09:07,174 INFO wandb-upload_0:3128 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/output.log +2024-05-23 13:09:07,534 INFO wandb-upload_1:3128 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/wandb-summary.json +2024-05-23 13:09:07,579 INFO wandb-upload_2:3128 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/config.yaml +2024-05-23 13:09:07,582 INFO wandb-upload_3:3128 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/files/requirements.txt +2024-05-23 13:09:07,782 INFO Thread-11 (_thread_body):3128 [sender.py:transition_state():613] send defer: 11 +2024-05-23 13:09:07,782 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:07,782 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 11 +2024-05-23 13:09:07,782 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:07,782 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 11 +2024-05-23 13:09:07,782 INFO SenderThread:3128 [file_pusher.py:join():175] waiting for file pusher +2024-05-23 13:09:07,782 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 12 +2024-05-23 13:09:07,783 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:07,783 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 12 +2024-05-23 13:09:07,783 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:07,783 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 12 +2024-05-23 13:09:07,783 INFO SenderThread:3128 [file_stream.py:finish():601] file stream finish called +2024-05-23 13:09:07,867 INFO SenderThread:3128 [file_stream.py:finish():605] file stream finish is done +2024-05-23 13:09:07,867 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 13 +2024-05-23 13:09:07,867 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:07,867 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 13 +2024-05-23 13:09:07,867 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:07,867 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 13 +2024-05-23 13:09:07,867 INFO SenderThread:3128 [sender.py:transition_state():613] send defer: 14 +2024-05-23 13:09:07,867 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: defer +2024-05-23 13:09:07,867 INFO HandlerThread:3128 [handler.py:handle_request_defer():184] handle defer: 14 +2024-05-23 13:09:07,867 DEBUG SenderThread:3128 [sender.py:send():378] send: final +2024-05-23 13:09:07,868 DEBUG SenderThread:3128 [sender.py:send():378] send: footer +2024-05-23 13:09:07,868 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: defer +2024-05-23 13:09:07,868 INFO SenderThread:3128 [sender.py:send_request_defer():609] handle sender defer: 14 +2024-05-23 13:09:07,868 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:09:07,868 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:09:07,869 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: poll_exit +2024-05-23 13:09:07,869 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: server_info +2024-05-23 13:09:07,869 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: get_summary +2024-05-23 13:09:07,869 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: sampled_history +2024-05-23 13:09:07,869 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: internal_messages +2024-05-23 13:09:07,869 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: poll_exit +2024-05-23 13:09:07,869 DEBUG SenderThread:3128 [sender.py:send_request():405] send_request: server_info +2024-05-23 13:09:07,924 INFO MainThread:3128 [wandb_run.py:_footer_history_summary_info():3994] rendering history +2024-05-23 13:09:07,924 INFO MainThread:3128 [wandb_run.py:_footer_history_summary_info():4026] rendering summary +2024-05-23 13:09:07,924 INFO MainThread:3128 [wandb_run.py:_footer_sync_info():3953] logging synced files +2024-05-23 13:09:07,925 DEBUG HandlerThread:3128 [handler.py:handle_request():158] handle_request: shutdown +2024-05-23 13:09:07,925 INFO HandlerThread:3128 [handler.py:finish():882] shutting down handler +2024-05-23 13:09:08,869 INFO WriterThread:3128 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/run-oqhkxkfg.wandb +2024-05-23 13:09:08,924 INFO SenderThread:3128 [sender.py:finish():1545] shutting down sender +2024-05-23 13:09:08,924 INFO SenderThread:3128 [file_pusher.py:finish():169] shutting down file pusher +2024-05-23 13:09:08,924 INFO SenderThread:3128 [file_pusher.py:join():175] waiting for file pusher diff --git a/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/logs/debug.log b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..09ebf1476e4ae000adfe5e8004af1cfd4bc4a6a3 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/logs/debug.log @@ -0,0 +1,29 @@ +2024-05-23 13:08:53,648 INFO MainThread:2973 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0 +2024-05-23 13:08:53,648 INFO MainThread:2973 [wandb_setup.py:_flush():76] Configure stats pid to 2973 +2024-05-23 13:08:53,648 INFO MainThread:2973 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings +2024-05-23 13:08:53,648 INFO MainThread:2973 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings +2024-05-23 13:08:53,648 INFO MainThread:2973 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2024-05-23 13:08:53,648 INFO MainThread:2973 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False} +2024-05-23 13:08:53,648 WARNING MainThread:2973 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__ +2024-05-23 13:08:53,648 INFO MainThread:2973 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'} +2024-05-23 13:08:53,648 INFO MainThread:2973 [wandb_setup.py:_flush():76] Applying login settings: {} +2024-05-23 13:08:53,648 INFO MainThread:2973 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/logs/debug.log +2024-05-23 13:08:53,649 INFO MainThread:2973 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/logs/debug-internal.log +2024-05-23 13:08:53,649 INFO MainThread:2973 [wandb_init.py:init():560] calling init triggers +2024-05-23 13:08:53,649 INFO MainThread:2973 [wandb_init.py:init():567] wandb.init called with sweep_config: {} +config: {} +2024-05-23 13:08:53,649 INFO MainThread:2973 [wandb_init.py:init():610] starting backend +2024-05-23 13:08:53,649 INFO MainThread:2973 [wandb_init.py:init():614] setting up manager +2024-05-23 13:08:53,652 INFO MainThread:2973 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2024-05-23 13:08:53,653 INFO MainThread:2973 [wandb_init.py:init():622] backend started and connected +2024-05-23 13:08:53,656 INFO MainThread:2973 [wandb_init.py:init():711] updated telemetry +2024-05-23 13:08:53,664 INFO MainThread:2973 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout +2024-05-23 13:08:53,922 INFO MainThread:2973 [wandb_run.py:_on_init():2396] communicating current version +2024-05-23 13:08:54,035 INFO MainThread:2973 [wandb_run.py:_on_init():2405] got version response +2024-05-23 13:08:54,035 INFO MainThread:2973 [wandb_init.py:init():795] starting run threads in backend +2024-05-23 13:08:54,319 INFO MainThread:2973 [wandb_run.py:_console_start():2374] atexit reg +2024-05-23 13:08:54,319 INFO MainThread:2973 [wandb_run.py:_redirect():2229] redirect: wrap_raw +2024-05-23 13:08:54,319 INFO MainThread:2973 [wandb_run.py:_redirect():2294] Wrapping output streams. +2024-05-23 13:08:54,319 INFO MainThread:2973 [wandb_run.py:_redirect():2319] Redirects installed. +2024-05-23 13:08:54,321 INFO MainThread:2973 [wandb_init.py:init():838] run started, returning control to user process +2024-05-23 13:09:08,925 WARNING MsgRouterThr:2973 [router.py:message_loop():77] message_loop has been closed diff --git a/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/run-oqhkxkfg.wandb b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/run-oqhkxkfg.wandb new file mode 100644 index 0000000000000000000000000000000000000000..5b30830e94ef611bbb5c948b420403cf42c2d80a Binary files /dev/null and b/lm-evaluation-harness/wandb/run-20240523_130853-oqhkxkfg/run-oqhkxkfg.wandb differ diff --git a/lm-evaluation-harness/wandb/run-20240529_130547-x16dn3vu/files/config.yaml b/lm-evaluation-harness/wandb/run-20240529_130547-x16dn3vu/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..264fe1a159f7ca369229a5508b6f55fdbd0fb957 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130547-x16dn3vu/files/config.yaml @@ -0,0 +1,283 @@ +wandb_version: 1 + +_wandb: + desc: null + value: + python_version: 3.10.12 + cli_version: 0.17.0 + framework: huggingface + huggingface_version: 4.36.2 + is_jupyter_run: false + is_kaggle_kernel: false + start_time: 1716987947 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + - 71 + - 98 + - 100 + 3: + - 2 + - 23 + - 62 + 4: 3.10.12 + 5: 0.17.0 + 6: 4.36.2 + 8: + - 5 + 13: linux-x86_64 +task_configs: + desc: null + value: + arc_easy: + task: arc_easy + group: + - ai2_arc + dataset_path: allenai/ai2_arc + dataset_name: ARC-Easy + training_split: train + validation_split: validation + test_split: test + doc_to_text: 'Question: {{question}} + + Answer:' + doc_to_target: '{{choices.label.index(answerKey)}}' + doc_to_choice: '{{choices.text}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: 'Question: {{question}} + + Answer:' + metadata: + version: 1.0 + boolq: + task: boolq + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: boolq + training_split: train + validation_split: validation + doc_to_text: '{{passage}} + + Question: {{question}}? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: passage + metadata: + version: 2.0 + copa: + task: copa + group: + - super-glue-lm-eval-v1 + dataset_path: super_glue + dataset_name: copa + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n # Drop the period\n connector =\ + \ {\n \"cause\": \"because\",\n \"effect\": \"therefore\",\n\ + \ }[doc[\"question\"]]\n return doc[\"premise\"].strip()[:-1] + f\"\ + \ {connector}\"\n" + doc_to_target: "def doc_to_target(doc):\n correct_choice = doc[\"choice1\"\ + ] if doc[\"label\"] == 0 else doc[\"choice2\"]\n # Connect the sentences\n\ + \ return \" \" + convert_choice(correct_choice)\n" + doc_to_choice: "def doc_to_choice(doc):\n return [\" \" + convert_choice(doc[\"\ + choice1\"]), \" \" + convert_choice(doc[\"choice2\"])]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + mrpc: + task: mrpc + group: glue + dataset_path: glue + dataset_name: mrpc + training_split: train + validation_split: validation + doc_to_text: 'Sentence 1: {{sentence1}} + + Sentence 2: {{sentence2}} + + Question: Do both sentences mean the same thing? + + Answer:' + doc_to_target: label + doc_to_choice: + - 'no' + - 'yes' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + - metric: f1 + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + piqa: + task: piqa + dataset_path: piqa + training_split: train + validation_split: validation + doc_to_text: 'Question: {{goal}} + + Answer:' + doc_to_target: label + doc_to_choice: '{{[sol1, sol2]}}' + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + - metric: acc_norm + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: goal + metadata: + version: 1.0 + sst2: + task: sst2 + group: glue + dataset_path: glue + dataset_name: sst2 + training_split: train + validation_split: validation + doc_to_text: '{{sentence}} + + Question: Is this sentence positive or negative? + + Answer:' + doc_to_target: label + doc_to_choice: + - negative + - positive + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + output_type: multiple_choice + repeats: 1 + should_decontaminate: false + metadata: + version: 1.0 + winogrande: + task: winogrande + dataset_path: winogrande + dataset_name: winogrande_xl + training_split: train + validation_split: validation + doc_to_text: "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n\ + \ return answer_to_num[doc[\"answer\"]]\n" + doc_to_target: "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"\ + _\") + 1\n return doc[\"sentence\"][idx:].strip()\n" + doc_to_choice: "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"\ + _\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"\ + sentence\"][:idx] + opt for opt in options]\n" + description: '' + target_delimiter: ' ' + fewshot_delimiter: ' + + + ' + num_fewshot: 0 + metric_list: + - metric: acc + aggregation: mean + higher_is_better: true + output_type: multiple_choice + repeats: 1 + should_decontaminate: true + doc_to_decontamination_query: sentence + metadata: + version: 1.0 +cli_configs: + desc: null + value: + model: hf + model_args: pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint/llamav2-3b/hf/global_step40000,tokenizer=huggyllama/llama-13b + batch_size: auto + batch_sizes: + - 64 + device: null + use_cache: null + limit: null + bootstrap_iters: 100000 + gen_kwargs: null diff --git a/lm-evaluation-harness/wandb/run-20240529_130547-x16dn3vu/files/media/table/evaluation/eval_results_1_f9e62f0af2531c926df9.table.json b/lm-evaluation-harness/wandb/run-20240529_130547-x16dn3vu/files/media/table/evaluation/eval_results_1_f9e62f0af2531c926df9.table.json new file mode 100644 index 0000000000000000000000000000000000000000..057b89a7caee98fb196860e26566266be303b76b --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130547-x16dn3vu/files/media/table/evaluation/eval_results_1_f9e62f0af2531c926df9.table.json @@ -0,0 +1 @@ +{"columns": ["Tasks", "Version", "Filter", "num_fewshot", "Metric", "Value", "Stderr"], "data": [["winogrande", 1.0, "none", 0, "acc", "0.505130228887135", "0.0141"], ["sst2", 1.0, "none", 0, "acc", "0.47706422018348627", "0.0169"], ["piqa", 1.0, "none", 0, "acc", "0.5244831338411317", "0.0117"], ["piqa", 1.0, "none", 0, "acc_norm", "0.49510337323177367", "0.0117"], ["mrpc", 1.0, "none", 0, "acc", "0.5122549019607843", "0.0248"], ["mrpc", 1.0, "none", 0, "f1", "0.6105675146771037", "0.0255"], ["copa", 1.0, "none", 0, "acc", "0.65", "0.0479"], ["boolq", 2.0, "none", 0, "acc", "0.5082568807339449", "0.0087"], ["arc_easy", 1.0, "none", 0, "acc", "0.257996632996633", "0.0090"], ["arc_easy", 1.0, "none", 0, "acc_norm", "0.2537878787878788", "0.0089"]]} \ No newline at end of file diff --git a/lm-evaluation-harness/wandb/run-20240529_130547-x16dn3vu/files/output.log b/lm-evaluation-harness/wandb/run-20240529_130547-x16dn3vu/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..3f69c5930c19a7c3a5d69dc35973ef8f24301d30 --- /dev/null +++ b/lm-evaluation-harness/wandb/run-20240529_130547-x16dn3vu/files/output.log @@ -0,0 +1,481 @@ + +2024-05-29:13:05:48,442 INFO [__main__.py:251] Verbosity set to INFO +2024-05-29:13:05:57,979 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande'] +2024-05-29:13:05:57,981 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234 +2024-05-29:13:05:57,981 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint/llamav2-3b/hf/global_step40000', 'tokenizer': 'huggyllama/llama-13b'} +2024-05-29:13:06:00,409 INFO [huggingface.py:164] Using device 'cuda' +/usr/local/lib/python3.10/dist-packages/habana_frameworks/torch/gpu_migration/torch/cuda/memory.py:36: UserWarning: No need to call empty_cache on HPU. It manages the memory internally in an effcient way. + warnings.warn( +/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Downloading readme: 100%|██████████| 9.00k/9.00k [00:00<00:00, 16.1MB/s] +Downloading data: 100%|██████████| 331k/331k [00:00<00:00, 3.98MB/s] +Downloading data: 100%|██████████| 346k/346k [00:00<00:00, 4.47MB/s] +Downloading data: 100%|██████████| 86.1k/86.1k [00:00<00:00, 1.07MB/s] +Generating train split: 100%|██████████| 2251/2251 [00:00<00:00, 94036.70 examples/s] +Generating test split: 100%|██████████| 2376/2376 [00:00<00:00, 318646.40 examples/s] +Generating validation split: 100%|██████████| 570/570 [00:00<00:00, 189471.65 examples/s] +2024-05-29:13:06:31,309 WARNING [task.py:763] [Task: boolq] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:06:31,309 WARNING [task.py:775] [Task: boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for super_glue contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/super_glue +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 30.7k/30.7k [00:00<00:00, 41.2MB/s] +Downloading readme: 100%|██████████| 18.2k/18.2k [00:00<00:00, 29.5MB/s] +Downloading data: 100%|██████████| 4.12M/4.12M [00:00<00:00, 36.5MB/s] +Generating train split: 100%|██████████| 9427/9427 [00:00<00:00, 21934.83 examples/s] +Generating validation split: 100%|██████████| 3270/3270 [00:00<00:00, 21563.32 examples/s] +Generating test split: 100%|██████████| 3245/3245 [00:00<00:00, 23042.93 examples/s] +2024-05-29:13:06:35,208 WARNING [task.py:763] [Task: copa] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:06:35,208 WARNING [task.py:775] [Task: copa] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 44.0k/44.0k [00:00<00:00, 60.3MB/s] +Generating train split: 100%|██████████| 400/400 [00:00<00:00, 8149.86 examples/s] +Generating validation split: 100%|██████████| 100/100 [00:00<00:00, 12525.17 examples/s] +Generating test split: 100%|██████████| 500/500 [00:00<00:00, 15116.24 examples/s] +2024-05-29:13:06:37,359 WARNING [task.py:763] [Task: mrpc] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:06:37,359 WARNING [task.py:775] [Task: mrpc] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +2024-05-29:13:06:37,360 WARNING [task.py:763] [Task: mrpc] metric f1 is defined, but aggregation is not. using default aggregation=f1 +2024-05-29:13:06:37,360 WARNING [task.py:775] [Task: mrpc] metric f1 is defined, but higher_is_better is not. using default higher_is_better=True +Downloading readme: 100%|██████████| 35.3k/35.3k [00:00<00:00, 43.6MB/s] +Downloading data: 100%|██████████| 649k/649k [00:00<00:00, 4.16MB/s] +Downloading data: 100%|██████████| 75.7k/75.7k [00:00<00:00, 523kB/s] +Downloading data: 100%|██████████| 308k/308k [00:00<00:00, 2.11MB/s] +Generating train split: 100%|██████████| 3668/3668 [00:00<00:00, 416670.02 examples/s] +Generating validation split: 100%|██████████| 408/408 [00:00<00:00, 175731.78 examples/s] +Generating test split: 100%|██████████| 1725/1725 [00:00<00:00, 367155.91 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for piqa contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/piqa +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.36k/5.36k [00:00<00:00, 11.9MB/s] +Downloading readme: 100%|██████████| 8.41k/8.41k [00:00<00:00, 17.7MB/s] +Downloading data: 100%|██████████| 1.82M/1.82M [00:00<00:00, 4.23MB/s] +Downloading data: 100%|██████████| 815k/815k [00:00<00:00, 25.2MB/s] +Generating train split: 100%|██████████| 16113/16113 [00:00<00:00, 23572.71 examples/s] +Generating test split: 100%|██████████| 3084/3084 [00:00<00:00, 24639.24 examples/s] +Generating validation split: 100%|██████████| 1838/1838 [00:00<00:00, 23687.17 examples/s] +2024-05-29:13:06:48,144 WARNING [task.py:763] [Task: sst2] metric acc is defined, but aggregation is not. using default aggregation=mean +2024-05-29:13:06:48,145 WARNING [task.py:775] [Task: sst2] metric acc is defined, but higher_is_better is not. using default higher_is_better=True +Downloading data: 100%|██████████| 3.11M/3.11M [00:00<00:00, 20.4MB/s] +Downloading data: 100%|██████████| 72.8k/72.8k [00:00<00:00, 498kB/s] +Downloading data: 100%|██████████| 148k/148k [00:00<00:00, 1.03MB/s] +Generating train split: 100%|██████████| 67349/67349 [00:00<00:00, 1421809.06 examples/s] +Generating validation split: 100%|██████████| 872/872 [00:00<00:00, 372561.18 examples/s] +Generating test split: 100%|██████████| 1821/1821 [00:00<00:00, 550910.82 examples/s] +/usr/local/lib/python3.10/dist-packages/datasets/load.py:1486: FutureWarning: The repository for winogrande contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/winogrande +You can avoid this message in future by passing the argument `trust_remote_code=True`. +Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`. + warnings.warn( +Downloading builder script: 100%|██████████| 5.65k/5.65k [00:00<00:00, 11.3MB/s] +Downloading readme: 100%|██████████| 9.97k/9.97k [00:00<00:00, 20.5MB/s] +Downloading data: 100%|██████████| 3.40M/3.40M [00:00<00:00, 7.19MB/s] +Generating train split: 100%|██████████| 40398/40398 [00:01<00:00, 24190.59 examples/s] +Generating test split: 100%|██████████| 1767/1767 [00:00<00:00, 24297.63 examples/s] +Generating validation split: 100%|██████████| 1267/1267 [00:00<00:00, 23882.36 examples/s] +2024-05-29:13:07:01,178 INFO [task.py:395] Building contexts for winogrande on rank 0... +100%|██████████| 1267/1267 [00:00<00:00, 68124.44it/s] +2024-05-29:13:07:01,263 INFO [task.py:395] Building contexts for sst2 on rank 0... +100%|██████████| 872/872 [00:00<00:00, 2555.88it/s] +2024-05-29:13:07:01,634 INFO [task.py:395] Building contexts for piqa on rank 0... +100%|██████████| 1838/1838 [00:01<00:00, 1077.65it/s] +2024-05-29:13:07:03,415 INFO [task.py:395] Building contexts for mrpc on rank 0... +100%|██████████| 408/408 [00:00<00:00, 1818.70it/s] +2024-05-29:13:07:03,658 INFO [task.py:395] Building contexts for copa on rank 0... +100%|██████████| 100/100 [00:00<00:00, 70504.35it/s] +2024-05-29:13:07:03,669 INFO [task.py:395] Building contexts for boolq on rank 0... +100%|██████████| 3270/3270 [00:01<00:00, 1984.82it/s] +2024-05-29:13:07:05,448 INFO [task.py:395] Building contexts for arc_easy on rank 0... +100%|██████████| 2376/2376 [00:02<00:00, 1071.72it/s] +2024-05-29:13:07:07,813 INFO [evaluator.py:379] Running loglikelihood requests +Running loglikelihood requests: 0%| | 0/25011 [00:00 + + Passing `token=True` is required when you want to use a private model. + + + + Returns: + `Dict`: The configuration of the image processor. + + Examples: + + ```python + # Download configuration from huggingface.co and cache. + image_processor_config = get_image_processor_config("google-bert/bert-base-uncased") + # This model does not have a image processor config so the result will be an empty dict. + image_processor_config = get_image_processor_config("FacebookAI/xlm-roberta-base") + + # Save a pretrained image processor locally and you can reload its config + from transformers import AutoTokenizer + + image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") + image_processor.save_pretrained("image-processor-test") + image_processor_config = get_image_processor_config("image-processor-test") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") + token = use_auth_token + + resolved_config_file = get_file_from_repo( + pretrained_model_name_or_path, + IMAGE_PROCESSOR_NAME, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + if resolved_config_file is None: + logger.info( + "Could not locate the image processor configuration file, will try to use the model config instead." + ) + return {} + + with open(resolved_config_file, encoding="utf-8") as reader: + return json.load(reader) + + +class AutoImageProcessor: + r""" + This is a generic image processor class that will be instantiated as one of the image processor classes of the + library when created with the [`AutoImageProcessor.from_pretrained`] class method. + + This class cannot be instantiated directly using `__init__()` (throws an error). + """ + + def __init__(self): + raise EnvironmentError( + "AutoImageProcessor is designed to be instantiated " + "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." + ) + + @classmethod + @replace_list_option_in_docstrings(IMAGE_PROCESSOR_MAPPING_NAMES) + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + r""" + Instantiate one of the image processor classes of the library from a pretrained model vocabulary. + + The image processor class to instantiate is selected based on the `model_type` property of the config object + (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's + missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: + + List options + + Params: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained image_processor hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a image processor file saved using the + [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g., + `./my_model_directory/`. + - a path or url to a saved image processor JSON *file*, e.g., + `./my_model_directory/preprocessor_config.json`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model image processor should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the image processor files and override the cached versions if + they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final image processor object. If `True`, then this + functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary + consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of + `kwargs` which has not been used to update `image_processor` and is otherwise ignored. + trust_remote_code (`bool`, *optional*, defaults to `False`): + Whether or not to allow for custom models defined on the Hub in their own modeling files. This option + should only be set to `True` for repositories you trust and in which you have read the code, as it will + execute code present on the Hub on your local machine. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are image processor attributes will be used to override the + loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is + controlled by the `return_unused_kwargs` keyword parameter. + + + + Passing `token=True` is required when you want to use a private model. + + + + Examples: + + ```python + >>> from transformers import AutoImageProcessor + + >>> # Download image processor from huggingface.co and cache. + >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") + + >>> # If image processor files are in a directory (e.g. image processor was saved using *save_pretrained('./test/saved_model/')*) + >>> # image_processor = AutoImageProcessor.from_pretrained("./test/saved_model/") + ```""" + use_auth_token = kwargs.pop("use_auth_token", None) + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config = kwargs.pop("config", None) + trust_remote_code = kwargs.pop("trust_remote_code", None) + kwargs["_from_auto"] = True + + config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs) + image_processor_class = config_dict.get("image_processor_type", None) + image_processor_auto_map = None + if "AutoImageProcessor" in config_dict.get("auto_map", {}): + image_processor_auto_map = config_dict["auto_map"]["AutoImageProcessor"] + + # If we still don't have the image processor class, check if we're loading from a previous feature extractor config + # and if so, infer the image processor class from there. + if image_processor_class is None and image_processor_auto_map is None: + feature_extractor_class = config_dict.pop("feature_extractor_type", None) + if feature_extractor_class is not None: + logger.warning( + "Could not find image processor class in the image processor config or the model config. Loading " + "based on pattern matching with the model's feature extractor configuration. Please open a " + "PR/issue to update `preprocessor_config.json` to use `image_processor_type` instead of " + "`feature_extractor_type`. This warning will be removed in v4.40." + ) + image_processor_class = feature_extractor_class.replace("FeatureExtractor", "ImageProcessor") + if "AutoFeatureExtractor" in config_dict.get("auto_map", {}): + feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"] + image_processor_auto_map = feature_extractor_auto_map.replace("FeatureExtractor", "ImageProcessor") + logger.warning( + "Could not find image processor auto map in the image processor config or the model config. " + "Loading based on pattern matching with the model's feature extractor configuration. Please open a " + "PR/issue to update `preprocessor_config.json` to use `AutoImageProcessor` instead of " + "`AutoFeatureExtractor`. This warning will be removed in v4.40." + ) + + # If we don't find the image processor class in the image processor config, let's try the model config. + if image_processor_class is None and image_processor_auto_map is None: + if not isinstance(config, PretrainedConfig): + config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) + # It could be in `config.image_processor_type`` + image_processor_class = getattr(config, "image_processor_type", None) + if hasattr(config, "auto_map") and "AutoImageProcessor" in config.auto_map: + image_processor_auto_map = config.auto_map["AutoImageProcessor"] + + if image_processor_class is not None: + image_processor_class = image_processor_class_from_name(image_processor_class) + + has_remote_code = image_processor_auto_map is not None + has_local_code = image_processor_class is not None or type(config) in IMAGE_PROCESSOR_MAPPING + trust_remote_code = resolve_trust_remote_code( + trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code + ) + + if has_remote_code and trust_remote_code: + image_processor_class = get_class_from_dynamic_module( + image_processor_auto_map, pretrained_model_name_or_path, **kwargs + ) + _ = kwargs.pop("code_revision", None) + if os.path.isdir(pretrained_model_name_or_path): + image_processor_class.register_for_auto_class() + return image_processor_class.from_dict(config_dict, **kwargs) + elif image_processor_class is not None: + return image_processor_class.from_dict(config_dict, **kwargs) + # Last try: we use the IMAGE_PROCESSOR_MAPPING. + elif type(config) in IMAGE_PROCESSOR_MAPPING: + image_processor_class = IMAGE_PROCESSOR_MAPPING[type(config)] + return image_processor_class.from_dict(config_dict, **kwargs) + + raise ValueError( + f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a " + f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following " + f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}" + ) + + @staticmethod + def register(config_class, image_processor_class, exist_ok=False): + """ + Register a new image processor for this class. + + Args: + config_class ([`PretrainedConfig`]): + The configuration corresponding to the model to register. + image_processor_class ([`ImageProcessingMixin`]): The image processor to register. + """ + IMAGE_PROCESSOR_MAPPING.register(config_class, image_processor_class, exist_ok=exist_ok) diff --git a/venv/lib/python3.10/site-packages/transformers/models/byt5/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/byt5/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..662a427383ff693bde17e96b0f74264442a1cc0f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/byt5/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import _LazyModule + + +_import_structure = {"tokenization_byt5": ["ByT5Tokenizer"]} + + +if TYPE_CHECKING: + from .tokenization_byt5 import ByT5Tokenizer +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f72eff7cab10e5ef6ea9ab7ec29609cede151a0d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/convert_byt5_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/convert_byt5_original_tf_checkpoint_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f642d4b43aaee504d9f75eb008ff522e46aa6af8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/convert_byt5_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e446016a3f6e446f472a2924ef747569c41389d Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/byt5/__pycache__/tokenization_byt5.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..7d9a20f3b0b395ffd31a2e8445d94aedb6036a6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# Copyright 2018 The T5 authors and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert T5 checkpoint.""" + + +import argparse + +from transformers import T5Config, T5ForConditionalGeneration, load_tf_weights_in_t5 +from transformers.utils import logging + + +logging.set_verbosity_info() + + +def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): + # Initialise PyTorch model + config = T5Config.from_json_file(config_file) + print(f"Building PyTorch model from configuration: {config}") + model = T5ForConditionalGeneration(config) + + # Load weights from tf checkpoint + load_tf_weights_in_t5(model, config, tf_checkpoint_path) + + # Save pytorch-model + print(f"Save PyTorch model to {pytorch_dump_path}") + model.save_pretrained(pytorch_dump_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." + ) + parser.add_argument( + "--config_file", + default=None, + type=str, + required=True, + help=( + "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture." + ), + ) + parser.add_argument( + "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + args = parser.parse_args() + convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py b/venv/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py new file mode 100644 index 0000000000000000000000000000000000000000..68c70db0d18d65e25bf60a672615f833bd5e504b --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/byt5/tokenization_byt5.py @@ -0,0 +1,234 @@ +# coding=utf-8 +# Copyright 2021 T5 Authors and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Tokenization class for model ByT5.""" + + +import warnings +from typing import List, Optional, Tuple + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class ByT5Tokenizer(PreTrainedTokenizer): + """ + Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + extra_ids (`int`, *optional*, defaults to 125): + Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are + accessible as "" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are + indexed from the end of the vocabulary up to beginning ("" is the last token in the vocabulary + like in ByT5 preprocessing see + [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)). + additional_special_tokens (`List[str]`, *optional*): + Additional special tokens used by the tokenizer. + """ + + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + eos_token="", + unk_token="", + pad_token="", + extra_ids=125, + additional_special_tokens=None, + **kwargs, + ) -> None: + # Add extra_ids to the special token list + if extra_ids > 0 and additional_special_tokens is None: + additional_special_tokens = [f"" for i in range(extra_ids)] + elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0: + # Check that we have the right number of extra_id special tokens + extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens))) + if extra_tokens != extra_ids: + raise ValueError( + f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" + " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" + " extra_ids tokens" + ) + + pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token + # we force left and right stripping for backward compatibility. The byt5tests depend on this. + eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token + # unk token needs to be in the vocab with correct index + self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token} + self.offset = len(self._added_tokens_decoder) + self._utf_vocab_size = 2**8 # utf is 8 bits + super().__init__( + eos_token=eos_token, + unk_token=unk_token, + pad_token=pad_token, + extra_ids=0, + additional_special_tokens=additional_special_tokens, # TODO extra ids are not used :sweatywmile: + **kwargs, + ) + + @property + def vocab_size(self): + return self._utf_vocab_size + + def get_vocab(self): + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)} + vocab.update(self.added_tokens_encoder) + return vocab + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + # normal case: some special tokens + if token_ids_1 is None: + return ([0] * len(token_ids_0)) + [1] + return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + + def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]: + """Do not add eos again if user already added it.""" + if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id: + warnings.warn( + f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" + " eos tokens being added." + ) + return token_ids + else: + return token_ids + [self.eos_token_id] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not + make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + eos = [self.eos_token_id] + + if token_ids_1 is None: + return len(token_ids_0 + eos) * [0] + return len(token_ids_0 + eos + token_ids_1 + eos) * [0] + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A sequence has the following format: + + - single sequence: `X ` + - pair of sequences: `A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + token_ids_0 = self._add_eos_if_not_present(token_ids_0) + if token_ids_1 is None: + return token_ids_0 + else: + token_ids_1 = self._add_eos_if_not_present(token_ids_1) + return token_ids_0 + token_ids_1 + + def _tokenize(self, text: str) -> List[str]: + """Take as input a string and return a list of strings (tokens) for words/sub-words""" + tokens = [chr(i) for i in text.encode("utf-8")] + return tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + + if len(token) != 1: + token_id = None + else: + token_id = ord(token) + self.offset + + return token_id + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + token = chr(index - self.offset) + return token + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + bstring = b"" + for token in tokens: + if token in self.added_tokens_decoder: + tok_string = self.added_tokens_decoder[token].encode("utf-8") + elif token in self.added_tokens_encoder: + tok_string = token.encode("utf-8") + else: + tok_string = bytes([ord(token)]) + bstring += tok_string + string = bstring.decode("utf-8", errors="ignore") + return string + + # ByT5Tokenizer has no vocab file + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + return () diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d96fba4d47b5e755ea40dd00df466b09b4e98ad5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__init__.py @@ -0,0 +1,70 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = { + "configuration_jukebox": [ + "JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP", + "JukeboxConfig", + "JukeboxPriorConfig", + "JukeboxVQVAEConfig", + ], + "tokenization_jukebox": ["JukeboxTokenizer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_jukebox"] = [ + "JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST", + "JukeboxModel", + "JukeboxPreTrainedModel", + "JukeboxVQVAE", + "JukeboxPrior", + ] + +if TYPE_CHECKING: + from .configuration_jukebox import ( + JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, + JukeboxConfig, + JukeboxPriorConfig, + JukeboxVQVAEConfig, + ) + from .tokenization_jukebox import JukeboxTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_jukebox import ( + JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, + JukeboxModel, + JukeboxPreTrainedModel, + JukeboxPrior, + JukeboxVQVAE, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d08378c7b85f2ed72eef7893bb59a21f053d4c2f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63fd64ddcf40d543eff2699c4b77990dbe5376af Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/configuration_jukebox.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/convert_jukebox.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/convert_jukebox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3437548b465c72dbcdf16759a4a22c73175482c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/convert_jukebox.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3beeb352aa56c20fb2bde9ec767fe32da7175f00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/modeling_jukebox.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9c6154a7bc2aa0f59bbc6fd050b981db7d24ac9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/jukebox/__pycache__/tokenization_jukebox.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/configuration_jukebox.py b/venv/lib/python3.10/site-packages/transformers/models/jukebox/configuration_jukebox.py new file mode 100644 index 0000000000000000000000000000000000000000..4c68051310248811b64f32c31cdd763f45984eeb --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/jukebox/configuration_jukebox.py @@ -0,0 +1,613 @@ +# coding=utf-8 +# Copyright 2022 The OpenAI Team Authors and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Jukebox configuration""" + +import os +from typing import List, Union + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +_LARGE_ATTENTION = [ + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "cross_attention", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "cross_attention", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "cross_attention", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "cross_attention", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "cross_attention", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "cross_attention", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "block_attn", + "transpose_block_attn", + "prev_block_attn", + "cross_attention", +] +_RawColumnPreviousRowAttention = ["block_attn", "transpose_block_attn", "prev_block_attn"] +_FullDenseAttention = ["dense_attention"] +_PrimePrimeDenseAttention = ["prime_attn", "prime_attn", "dense_attn"] + + +def full_dense_attention(layer): + return _FullDenseAttention[0] + + +def raw_column_previous_row_attention(layer): + return _RawColumnPreviousRowAttention[layer % 3] + + +def large_separated_enc_dec_w_lyrics(layer): + return _LARGE_ATTENTION[layer % 79] + + +def enc_dec_with_lyrics(layer): + if layer % 16 == 15: + return _PrimePrimeDenseAttention[layer % 3] + return _RawColumnPreviousRowAttention[layer % 3] + + +ATTENTION_PATTERNS = { + "full_dense_attention": full_dense_attention, + "raw_column_previous_row_attention": raw_column_previous_row_attention, # Alternate row, column and previous row attn + "large_separated_enc_dec_w_lyrics": large_separated_enc_dec_w_lyrics, # Used by large separated_enc_dec model with lyrics + "enc_dec_with_lyrics": enc_dec_with_lyrics, # Used by encoder_decoder model with lyrics +} + + +class JukeboxPriorConfig(PretrainedConfig): + """ + This is the configuration class to store the configuration of a [`JukeboxPrior`]. It is used to instantiate a + `JukeboxPrior` according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the top level prior from the + [openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox + -1b-lyrics) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + + Args: + act_fn (`str`, *optional*, defaults to `"quick_gelu"`): + Activation function. + alignment_head (`int`, *optional*, defaults to 2): + Head that is responsible of the alignment between lyrics and music. Only used to compute the lyric to audio + alignment + alignment_layer (`int`, *optional*, defaults to 68): + Index of the layer that is responsible of the alignment between lyrics and music. Only used to compute the + lyric to audio alignment + attention_multiplier (`float`, *optional*, defaults to 0.25): + Multiplier coefficient used to define the hidden dimension of the attention layers. 0.25 means that + 0.25*width of the model will be used. + attention_pattern (`str`, *optional*, defaults to `"enc_dec_with_lyrics"`): + Which attention pattern to use for the decoder/ + attn_dropout (`int`, *optional*, defaults to 0): + Dropout probability for the post-attention layer dropout in the decoder. + attn_res_scale (`bool`, *optional*, defaults to `False`): + Whether or not to scale the residuals in the attention conditioner block. + blocks (`int`, *optional*, defaults to 64): + Number of blocks used in the `block_attn`. A sequence of length seq_len is factored as `[blocks, seq_len // + blocks]` in the `JukeboxAttention` layer. + conv_res_scale (`int`, *optional*): + Whether or not to scale the residuals in the conditioner block. Since the top level prior does not have a + conditioner, the default value is to None and should not be modified. + num_layers (`int`, *optional*, defaults to 72): + Number of layers of the transformer architecture. + emb_dropout (`int`, *optional*, defaults to 0): + Embedding dropout used in the lyric decoder. + encoder_config (`JukeboxPriorConfig`, *optional*) : + Configuration of the encoder which models the prior on the lyrics. + encoder_loss_fraction (`float`, *optional*, defaults to 0.4): + Multiplication factor used in front of the lyric encoder loss. + hidden_size (`int`, *optional*, defaults to 2048): + Hidden dimension of the attention layers. + init_scale (`float`, *optional*, defaults to 0.2): + Initialization scales for the prior modules. + is_encoder_decoder (`bool`, *optional*, defaults to `True`): + Whether or not the prior is an encoder-decoder model. In case it is not, and `nb_relevant_lyric_tokens` is + greater than 0, the `encoder` args should be specified for the lyric encoding. + mask (`bool`, *optional*, defaults to `False`): + Whether or not to mask the previous positions in the attention. + max_duration (`int`, *optional*, defaults to 600): + Maximum supported duration of the generated song in seconds. + max_nb_genres (`int`, *optional*, defaults to 1): + Maximum number of genres that can be used to condition the model. + merged_decoder (`bool`, *optional*, defaults to `True`): + Whether or not the decoder and the encoder inputs are merged. This is used for the separated + encoder-decoder architecture + metadata_conditioning (`bool`, *optional*, defaults to `True)`: + Whether or not to condition on the artist and genre metadata. + metadata_dims (`List[int]`, *optional*, defaults to `[604, 7898]`): + Number of genres and the number of artists that were used to train the embedding layers of the prior + models. + min_duration (`int`, *optional*, defaults to 0): + Minimum duration of the generated audio on which the model was trained. + mlp_multiplier (`float`, *optional*, defaults to 1.0): + Multiplier coefficient used to define the hidden dimension of the MLP layers. 0.25 means that 0.25*width of + the model will be used. + music_vocab_size (`int`, *optional*, defaults to 2048): + Number of different music tokens. Should be similar to the `JukeboxVQVAEConfig.nb_discrete_codes`. + n_ctx (`int`, *optional*, defaults to 6144): + Number of context tokens for each prior. The context tokens are the music tokens that are attended to when + generating music tokens. + n_heads (`int`, *optional*, defaults to 2): + Number of attention heads. + nb_relevant_lyric_tokens (`int`, *optional*, defaults to 384): + Number of lyric tokens that are used when sampling a single window of length `n_ctx` + res_conv_depth (`int`, *optional*, defaults to 3): + Depth of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the + `JukeboxMusicTokenConditioner`. + res_conv_width (`int`, *optional*, defaults to 128): + Width of the `JukeboxDecoderConvBock` used to upsample the previously sampled audio in the + `JukeboxMusicTokenConditioner`. + res_convolution_multiplier (`int`, *optional*, defaults to 1): + Multiplier used to scale the `hidden_dim` of the `JukeboxResConv1DBlock`. + res_dilation_cycle (`int`, *optional*): + Dilation cycle used to define the `JukeboxMusicTokenConditioner`. Usually similar to the ones used in the + corresponding level of the VQVAE. The first prior does not use it as it is not conditioned on upper level + tokens. + res_dilation_growth_rate (`int`, *optional*, defaults to 1): + Dilation grow rate used between each convolutionnal block of the `JukeboxMusicTokenConditioner` + res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`): + Downsampling rates used in the audio conditioning network + res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`): + Striding used in the audio conditioning network + resid_dropout (`int`, *optional*, defaults to 0): + Residual dropout used in the attention pattern. + sampling_rate (`int`, *optional*, defaults to 44100): + Sampling rate used for training. + spread (`int`, *optional*): + Spread used in the `summary_spread_attention` pattern + timing_dims (`int`, *optional*, defaults to 64): + Dimension of the timing embedding. + zero_out (`bool`, *optional*, defaults to `False`): + Whether or not to zero out convolution weights when initializing. + """ + + model_type = "jukebox_prior" + attribute_map = { + "max_position_embeddings": "n_positions", + "num_attention_heads": "n_head", + } + + def __init__( + self, + act_fn="quick_gelu", + level=0, + alignment_head=2, + alignment_layer=68, + attention_multiplier=0.25, + attention_pattern="enc_dec_with_lyrics", + attn_dropout=0, + attn_res_scale=False, + blocks=64, + conv_res_scale=None, + num_layers=72, + emb_dropout=0, + encoder_config=None, + encoder_loss_fraction=0.4, + hidden_size=2048, + init_scale=0.2, + is_encoder_decoder=True, + lyric_vocab_size=80, + mask=False, + max_duration=600, + max_nb_genres=1, + merged_decoder=True, + metadata_conditioning=True, + metadata_dims=[604, 7898], + min_duration=0, + mlp_multiplier=1.0, + music_vocab_size=2048, + n_ctx=6144, + n_heads=2, + nb_relevant_lyric_tokens=384, + res_conv_depth=3, + res_conv_width=128, + res_convolution_multiplier=1, + res_dilation_cycle=None, + res_dilation_growth_rate=1, + res_downs_t=[3, 2, 2], + res_strides_t=[2, 2, 2], + resid_dropout=0, + sampling_rate=44100, + spread=None, + timing_dims=64, + zero_out=False, + **kwargs, + ): + self.act_fn = act_fn + self.alignment_head = alignment_head + self.alignment_layer = alignment_layer + self.attention_multiplier = attention_multiplier + self.attention_pattern = attention_pattern + self.attn_dropout = attn_dropout + self.attn_res_scale = attn_res_scale + self.blocks = blocks + self.conv_res_scale = conv_res_scale + self.num_layers = num_layers + self.emb_dropout = emb_dropout + self.music_vocab_size = music_vocab_size + if encoder_config is not None: + self.encoder_config = JukeboxPriorConfig(**encoder_config) + else: + self.encoder_config = None + self.encoder_loss_fraction = encoder_loss_fraction + self.init_scale = init_scale + self.is_encoder_decoder = is_encoder_decoder + self.lyric_vocab_size = lyric_vocab_size + self.level = level + self.mask = mask + self.max_duration = max_duration + self.max_nb_genres = max_nb_genres + self.merged_decoder = merged_decoder + self.metadata_conditioning = metadata_conditioning + self.metadata_dims = metadata_dims + self.min_duration = min_duration + self.mlp_multiplier = mlp_multiplier + self.n_ctx = n_ctx + self.n_heads = n_heads + self.nb_relevant_lyric_tokens = nb_relevant_lyric_tokens + self.res_conv_depth = res_conv_depth + self.res_conv_width = res_conv_width + self.res_convolution_multiplier = res_convolution_multiplier + self.res_dilation_cycle = res_dilation_cycle + self.res_dilation_growth_rate = res_dilation_growth_rate + self.res_downs_t = res_downs_t + self.res_strides_t = res_strides_t + self.resid_dropout = resid_dropout + self.sampling_rate = sampling_rate + self.spread = spread + self.timing_dims = timing_dims + self.hidden_size = hidden_size + self.zero_out = zero_out + + @classmethod + def from_pretrained( + cls, pretrained_model_name_or_path: Union[str, os.PathLike], level=0, **kwargs + ) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the prior config dict if we are loading from JukeboxConfig + if config_dict.get("model_type") == "jukebox": + config_dict = config_dict[f"prior_{level}"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class JukeboxVQVAEConfig(PretrainedConfig): + """ + This is the configuration class to store the configuration of a [`JukeboxVQVAE`]. It is used to instantiate a + `JukeboxVQVAE` according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the VQVAE from + [openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + act_fn (`str`, *optional*, defaults to `"relu"`): + Activation function of the model. + nb_discrete_codes (`int`, *optional*, defaults to 2048): + Number of codes of the VQVAE. + commit (`float`, *optional*, defaults to 0.02): + Commit loss multiplier. + conv_input_shape (`int`, *optional*, defaults to 1): + Number of audio channels. + conv_res_scale (`bool`, *optional*, defaults to `False`): + Whether or not to scale the residuals of the `JukeboxResConv1DBlock`. + embed_dim (`int`, *optional*, defaults to 64): + Embedding dimension of the codebook vectors. + hop_fraction (`List[int]`, *optional*, defaults to `[0.125, 0.5, 0.5]`): + Fraction of non-intersecting window used when continuing the sampling process. + levels (`int`, *optional*, defaults to 3): + Number of hierarchical levels that used in the VQVAE. + lmu (`float`, *optional*, defaults to 0.99): + Used in the codebook update, exponential moving average coefficient. For more detail refer to Appendix A.1 + of the original [VQVAE paper](https://arxiv.org/pdf/1711.00937v2.pdf) + multipliers (`List[int]`, *optional*, defaults to `[2, 1, 1]`): + Depth and width multipliers used for each level. Used on the `res_conv_width` and `res_conv_depth` + res_conv_depth (`int`, *optional*, defaults to 4): + Depth of the encoder and decoder block. If no `multipliers` are used, this is the same for each level. + res_conv_width (`int`, *optional*, defaults to 32): + Width of the encoder and decoder block. If no `multipliers` are used, this is the same for each level. + res_convolution_multiplier (`int`, *optional*, defaults to 1): + Scaling factor of the hidden dimension used in the `JukeboxResConv1DBlock`. + res_dilation_cycle (`int`, *optional*): + Dilation cycle value used in the `JukeboxResnet`. If an int is used, each new Conv1 block will have a depth + reduced by a power of `res_dilation_cycle`. + res_dilation_growth_rate (`int`, *optional*, defaults to 3): + Resnet dilation growth rate used in the VQVAE (dilation_growth_rate ** depth) + res_downs_t (`List[int]`, *optional*, defaults to `[3, 2, 2]`): + Downsampling rate for each level of the hierarchical VQ-VAE. + res_strides_t (`List[int]`, *optional*, defaults to `[2, 2, 2]`): + Stride used for each level of the hierarchical VQ-VAE. + sample_length (`int`, *optional*, defaults to 1058304): + Provides the max input shape of the VQVAE. Is used to compute the input shape of each level. + init_scale (`float`, *optional*, defaults to 0.2): + Initialization scale. + zero_out (`bool`, *optional*, defaults to `False`): + Whether or not to zero out convolution weights when initializing. + """ + + model_type = "jukebox_vqvae" + + def __init__( + self, + act_fn="relu", + nb_discrete_codes=2048, + commit=0.02, + conv_input_shape=1, + conv_res_scale=False, + embed_dim=64, + hop_fraction=[0.125, 0.5, 0.5], + levels=3, + lmu=0.99, + multipliers=[2, 1, 1], + res_conv_depth=4, + res_conv_width=32, + res_convolution_multiplier=1, + res_dilation_cycle=None, + res_dilation_growth_rate=3, + res_downs_t=[3, 2, 2], + res_strides_t=[2, 2, 2], + sample_length=1058304, + init_scale=0.2, + zero_out=False, + **kwargs, + ): + self.hop_fraction = hop_fraction + self.conv_input_shape = conv_input_shape + self.sample_length = sample_length + + # VQVAE parameters (all used) + self.levels = levels + self.embed_dim = embed_dim + self.nb_discrete_codes = nb_discrete_codes + self.res_conv_width = res_conv_width + self.res_conv_depth = res_conv_depth + self.res_convolution_multiplier = res_convolution_multiplier + self.res_dilation_growth_rate = res_dilation_growth_rate + self.res_dilation_cycle = res_dilation_cycle + self.multipliers = multipliers + self.res_downs_t = res_downs_t + self.res_strides_t = res_strides_t + self.lmu = lmu + self.commit = commit + self.conv_res_scale = conv_res_scale + self.act_fn = act_fn + self.init_scale = init_scale + self.zero_out = zero_out + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + cls._set_token_in_kwargs(kwargs) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the text config dict if we are loading from CLIPConfig + if config_dict.get("model_type") == "jukebox": + config_dict = config_dict["vqvae_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class JukeboxConfig(PretrainedConfig): + """ + This is the configuration class to store the configuration of a [`JukeboxModel`]. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will + yield a similar configuration to that of + [openai/jukebox-1b-lyrics](https://huggingface.co/openai/jukebox-1b-lyrics) architecture. + + + The downsampling and stride are used to determine downsampling of the input sequence. For example, downsampling = + (5,3), and strides = (2, 2) will downsample the audio by 2^5 = 32 to get the first level of codes, and 2**8 = 256 + to get the second level codes. This is mostly true for training the top level prior and the upsamplers. + + Args: + vqvae_config (`JukeboxVQVAEConfig`, *optional*): + Configuration for the `JukeboxVQVAE` model. + prior_config_list (`List[JukeboxPriorConfig]`, *optional*): + List of the configs for each of the `JukeboxPrior` of the model. The original architecture uses 3 priors. + nb_priors (`int`, *optional*, defaults to 3): + Number of prior models that will sequentially sample tokens. Each prior is conditional auto regressive + (decoder) model, apart from the top prior, which can include a lyric encoder. The available models were + trained using a top prior and 2 upsampler priors. + sampling_rate (`int`, *optional*, defaults to 44100): + Sampling rate of the raw audio. + timing_dims (`int`, *optional*, defaults to 64): + Dimensions of the JukeboxRangeEmbedding layer which is equivalent to traditional positional embedding + layer. The timing embedding layer converts the absolute and relative position in the currently sampled + audio to a tensor of length `timing_dims` that will be added to the music tokens. + min_duration (`int`, *optional*, defaults to 0): + Minimum duration of the audios to generate + max_duration (`float`, *optional*, defaults to 600.0): + Maximum duration of the audios to generate + max_nb_genres (`int`, *optional*, defaults to 5): + Maximum number of genres that can be used to condition a single sample. + metadata_conditioning (`bool`, *optional*, defaults to `True`): + Whether or not to use metadata conditioning, corresponding to the artist, the genre and the min/maximum + duration. + + Example: + + ```python + >>> from transformers import JukeboxModel, JukeboxConfig + + >>> # Initializing a Jukebox configuration + >>> configuration = JukeboxConfig() + + >>> # Initializing a model from the configuration + >>> model = JukeboxModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` + """ + + model_type = "jukebox" + + def __init__( + self, + vqvae_config=None, + prior_config_list=None, + nb_priors=3, + sampling_rate=44100, + timing_dims=64, + min_duration=0, + max_duration=600.0, + max_nb_genres=5, + metadata_conditioning=True, + **kwargs, + ): + if vqvae_config is None: + vqvae_config = {} + logger.info("vqvae_config is None. initializing the JukeboxVQVAE with default values.") + + self.vqvae_config = JukeboxVQVAEConfig(**vqvae_config) + if prior_config_list is not None: + self.prior_configs = [JukeboxPriorConfig(**prior_config) for prior_config in prior_config_list] + else: + self.prior_configs = [] + for prior_idx in range(nb_priors): + prior_config = kwargs.pop(f"prior_{prior_idx}", None) + if prior_config is None: + prior_config = {} + logger.info( + f"prior_{prior_idx}'s config is None. Initializing the JukeboxPriorConfig list with default" + " values." + ) + self.prior_configs.append(JukeboxPriorConfig(**prior_config)) + + self.hop_fraction = self.vqvae_config.hop_fraction + + self.nb_priors = nb_priors + + # Metadata conditioning + self.max_nb_genres = max_nb_genres + self.sampling_rate = sampling_rate + self.timing_dims = timing_dims + self.min_duration = min_duration + self.max_duration = max_duration + self.metadata_conditioning = metadata_conditioning + + super().__init__(**kwargs) + + @classmethod + def from_configs(cls, prior_configs: List[JukeboxPriorConfig], vqvae_config: JukeboxVQVAEConfig, **kwargs): + r""" + Instantiate a [`JukeboxConfig`] (or a derived class) from clip text model configuration and clip vision model + configuration. + + Returns: + [`JukeboxConfig`]: An instance of a configuration object + """ + prior_config_list = [config.to_dict() for config in prior_configs] + return cls(prior_config_list=prior_config_list, vqvae_config_dict=vqvae_config.to_dict(), **kwargs) + + def to_dict(self): + # Override the default to_dict to apply to_dict to the list of prior configs. + result = super().to_dict() + result["prior_config_list"] = [config.to_dict() for config in result.pop("prior_configs")] + return result diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/convert_jukebox.py b/venv/lib/python3.10/site-packages/transformers/models/jukebox/convert_jukebox.py new file mode 100644 index 0000000000000000000000000000000000000000..b56a25c57c70d113bfa12003fa92a86e272f8e86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/jukebox/convert_jukebox.py @@ -0,0 +1,279 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Jukebox checkpoints""" + +import argparse +import json +import os +from pathlib import Path + +import requests +import torch + +from transformers import JukeboxConfig, JukeboxModel +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +PREFIX = "https://openaipublic.azureedge.net/jukebox/models/" +MODEL_MAPPING = { + "jukebox-1b-lyrics": [ + "5b/vqvae.pth.tar", + "5b/prior_level_0.pth.tar", + "5b/prior_level_1.pth.tar", + "1b_lyrics/prior_level_2.pth.tar", + ], + "jukebox-5b-lyrics": [ + "5b/vqvae.pth.tar", + "5b/prior_level_0.pth.tar", + "5b/prior_level_1.pth.tar", + "5b_lyrics/prior_level_2.pth.tar", + ], +} + + +def replace_key(key): + if key.endswith(".model.1.bias") and len(key.split(".")) > 10: + key = key.replace(".model.1.bias", ".conv1d_1.bias") + elif key.endswith(".model.1.weight") and len(key.split(".")) > 10: + key = key.replace(".model.1.weight", ".conv1d_1.weight") + elif key.endswith(".model.3.bias") and len(key.split(".")) > 10: + key = key.replace(".model.3.bias", ".conv1d_2.bias") + elif key.endswith(".model.3.weight") and len(key.split(".")) > 10: + key = key.replace(".model.3.weight", ".conv1d_2.weight") + + if "conditioner_blocks.0." in key: + key = key.replace("conditioner_blocks.0", "conditioner_blocks") + + if "prime_prior" in key: + key = key.replace("prime_prior", "encoder") + + if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: + key = key.replace(".emb.", ".") + + if key.endswith("k"): # replace vqvae.X.k with vqvae.X.codebook + return key.replace(".k", ".codebook") + if "y_emb." in key: + return key.replace("y_emb.", "metadata_embedding.") + + if "x_emb.emb." in key: + key = key.replace("0.x_emb.emb", "embed_tokens") + + if "prime_state_ln" in key: + return key.replace("prime_state_ln", "encoder.final_layer_norm") + if ".ln" in key: + return key.replace(".ln", ".layer_norm") + if "_ln" in key: + return key.replace("_ln", "_layer_norm") + + if "prime_state_proj" in key: + return key.replace("prime_state_proj", "encoder.proj_in") + if "prime_x_out" in key: + return key.replace("prime_x_out", "encoder.lm_head") + if "prior.x_out" in key: + return key.replace("x_out", "fc_proj_out") + if "x_emb" in key: + return key.replace("x_emb", "embed_tokens") + + return key + + +def fix_jukebox_keys(state_dict, model_state_dict, key_prefix, mapping): + new_dict = {} + import re + + re_encoder_block_conv_in = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)") + re_encoder_block_resnet = re.compile( + r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" + ) + re_encoder_block_proj_out = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)") + + re_decoder_block_conv_out = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)") + re_decoder_block_resnet = re.compile( + r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" + ) + re_decoder_block_proj_in = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)") + + re_prior_cond_conv_out = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)") + re_prior_cond_resnet = re.compile( + r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" + ) + re_prior_cond_proj_in = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)") + + for original_key, value in state_dict.items(): + # rename vqvae.encoder keys + if re_encoder_block_conv_in.fullmatch(original_key): + regex_match = re_encoder_block_conv_in.match(original_key) + groups = regex_match.groups() + block_index = int(groups[2]) * 2 + int(groups[3]) + re_new_key = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}" + key = re_encoder_block_conv_in.sub(re_new_key, original_key) + + elif re_encoder_block_resnet.fullmatch(original_key): + regex_match = re_encoder_block_resnet.match(original_key) + groups = regex_match.groups() + block_index = int(groups[2]) * 2 + int(groups[3]) + conv_index = {"1": 1, "3": 2}[groups[-2]] + prefix = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}." + resnet_block = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" + re_new_key = prefix + resnet_block + key = re_encoder_block_resnet.sub(re_new_key, original_key) + + elif re_encoder_block_proj_out.fullmatch(original_key): + regex_match = re_encoder_block_proj_out.match(original_key) + groups = regex_match.groups() + re_new_key = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}" + key = re_encoder_block_proj_out.sub(re_new_key, original_key) + + # rename vqvae.decoder keys + elif re_decoder_block_conv_out.fullmatch(original_key): + regex_match = re_decoder_block_conv_out.match(original_key) + groups = regex_match.groups() + block_index = int(groups[2]) * 2 + int(groups[3]) - 2 + re_new_key = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}" + key = re_decoder_block_conv_out.sub(re_new_key, original_key) + + elif re_decoder_block_resnet.fullmatch(original_key): + regex_match = re_decoder_block_resnet.match(original_key) + groups = regex_match.groups() + block_index = int(groups[2]) * 2 + int(groups[3]) - 2 + conv_index = {"1": 1, "3": 2}[groups[-2]] + prefix = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}." + resnet_block = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" + re_new_key = prefix + resnet_block + key = re_decoder_block_resnet.sub(re_new_key, original_key) + + elif re_decoder_block_proj_in.fullmatch(original_key): + regex_match = re_decoder_block_proj_in.match(original_key) + groups = regex_match.groups() + re_new_key = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}" + key = re_decoder_block_proj_in.sub(re_new_key, original_key) + + # rename prior cond.model to upsampler.upsample_block and resnet + elif re_prior_cond_conv_out.fullmatch(original_key): + regex_match = re_prior_cond_conv_out.match(original_key) + groups = regex_match.groups() + block_index = int(groups[1]) * 2 + int(groups[2]) - 2 + re_new_key = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}" + key = re_prior_cond_conv_out.sub(re_new_key, original_key) + + elif re_prior_cond_resnet.fullmatch(original_key): + regex_match = re_prior_cond_resnet.match(original_key) + groups = regex_match.groups() + block_index = int(groups[1]) * 2 + int(groups[2]) - 2 + conv_index = {"1": 1, "3": 2}[groups[-2]] + prefix = f"conditioner_blocks.upsampler.upsample_block.{block_index}." + resnet_block = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" + re_new_key = prefix + resnet_block + key = re_prior_cond_resnet.sub(re_new_key, original_key) + + elif re_prior_cond_proj_in.fullmatch(original_key): + regex_match = re_prior_cond_proj_in.match(original_key) + groups = regex_match.groups() + re_new_key = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}" + key = re_prior_cond_proj_in.sub(re_new_key, original_key) + + # keep original key + else: + key = original_key + + key = replace_key(key) + + if f"{key_prefix}.{key}" not in model_state_dict or key is None: + print(f"failed converting {original_key} to {key}, does not match") + + # handle missmatched shape + elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape: + val = model_state_dict[f"{key_prefix}.{key}"] + print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match") + key = original_key + + mapping[key] = original_key + new_dict[key] = value + + return new_dict + + +@torch.no_grad() +def convert_openai_checkpoint(model_name=None, pytorch_dump_folder_path=None): + """ + Copy/paste/tweak model's weights to our Jukebox structure. + """ + for file in MODEL_MAPPING[model_name]: + if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}"): + r = requests.get(f"{PREFIX}{file}", allow_redirects=True) + os.makedirs(f"{pytorch_dump_folder_path}/", exist_ok=True) + open(f"{pytorch_dump_folder_path}/{file.split('/')[-1]}", "wb").write(r.content) + + model_to_convert = MODEL_MAPPING[model_name.split("/")[-1]] + + config = JukeboxConfig.from_pretrained(model_name) + model = JukeboxModel(config) + + weight_dict = [] + mapping = {} + for i, dict_name in enumerate(model_to_convert): + old_dic = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/')[-1]}")["model"] + + new_dic = {} + for k in old_dic.keys(): + if k.endswith(".b"): + new_dic[k.replace("b", "bias")] = old_dic[k] + elif k.endswith(".w"): + new_dic[k.replace("w", "weight")] = old_dic[k] + elif "level_2" not in dict_name and "cond.model." in k: + new_dic[k.replace(".blocks.", ".model.")] = old_dic[k] + else: + new_dic[k] = old_dic[k] + + key_prefix = "vqvae" if i == 0 else f"priors.{3 - i}" + new_dic = fix_jukebox_keys(new_dic, model.state_dict(), key_prefix, mapping) + weight_dict.append(new_dic) + + vqvae_state_dict = weight_dict.pop(0) + model.vqvae.load_state_dict(vqvae_state_dict) + for i in range(len(weight_dict)): + model.priors[i].load_state_dict(weight_dict[2 - i]) + + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + with open(f"{pytorch_dump_folder_path}/mapping.json", "w") as txtfile: + json.dump(mapping, txtfile) + + print(f"Saving model {model_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + + return weight_dict + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="jukebox-5b-lyrics", + type=str, + help="Name of the model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default="jukebox-5b-lyrics-converted", + type=str, + help="Path to the output PyTorch model directory.", + ) + args = parser.parse_args() + convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/modeling_jukebox.py b/venv/lib/python3.10/site-packages/transformers/models/jukebox/modeling_jukebox.py new file mode 100644 index 0000000000000000000000000000000000000000..282cfdc5b4439bad3151f745db48c931e6655619 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/jukebox/modeling_jukebox.py @@ -0,0 +1,2666 @@ +# coding=utf-8 +# Copyright 2022 The OpenAI Team Authors and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch Jukebox model.""" + +import math +import os +from typing import List, Optional, Tuple + +import numpy as np +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn import LayerNorm as FusedLayerNorm + +from ...activations import ACT2FN +from ...modeling_utils import PreTrainedModel +from ...utils import add_start_docstrings, logging +from ...utils.logging import tqdm +from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +def filter_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")): + """ + Filter a distribution of logits using top-k and/or nucleus (top-p) filtering + + Args: + logits (`torch.Tensor`): + logits distribution shape (vocabulary size) + top_k (`int`, *optional*, defaults to 0): + When `top_k >0` keep only top key tokens with highest probability (top-k filtering). + top_p (`int`, *optional*, defaults to 0): + When `top_p>0.0` keep the top tokens with cumulative probability >= `top_p` (nucleus filtering). + """ + logits = logits.clone() + top_k = min(top_k, logits.size(-1)) # Safety check + + if top_k > 0: + # Remove all tokens with a probability less than the last token of the top-k + indices_to_remove = logits < torch.topk(logits, top_k, dim=-1)[0][..., -1:] + logits[indices_to_remove] = filter_value + + if top_p > 0.0: + sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1) + cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) + + # Remove tokens with cumulative probability above the threshold + sorted_indices_to_remove = cumulative_probs > top_p + # Shift the indices to the right to keep also the first token above the threshold + sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() + sorted_indices_to_remove[..., 0] = 0 + + # indices_to_remove = sorted_indices[sorted_indices_to_remove] + indices_to_remove = torch.zeros_like(logits, dtype=torch.bool).scatter_( + dim=-1, index=sorted_indices, src=sorted_indices_to_remove + ) + logits[indices_to_remove] = filter_value + return logits + + +def get_relevant_lyric_tokens(full_tokens, max_n_lyric_tokens, total_length, offset, duration): + """ + Extract only the relevant tokens based on the character position. A total of `max_n_lyric_tokens` tokens will be + returned. If the provided token sequence is smaller, it will be padded, otherwise, only characters ranging from the + midpoint - `max_n_lyric_tokens//2` to the midpoint + `max_n_lyric_tokens//2` will be returned. This *focuses* on + the most relevant tokens (in time) for the sequence. + + Args: + full_tokens (`List[int]`): + List containing the token ids of the entire lyrics. + total_length (`int`): + Total expected length of the music (not all of it is generated, see duration), in samples. + offset (`int`): + Starting sample in the music. If the offset is greater than 0, the lyrics will be shifted take that into + account + duration (`int`): + Expected duration of the generated music, in samples. The duration has to be smaller than the total length, + which represent the overall length of the signal, + """ + full_tokens = full_tokens[0] + if len(full_tokens) < max_n_lyric_tokens: + tokens = torch.cat( + [torch.zeros(max_n_lyric_tokens - len(full_tokens), dtype=torch.long).to(full_tokens.device), full_tokens] + ) + indices = [-1] * (max_n_lyric_tokens - len(full_tokens)) + list(range(0, len(full_tokens))) + else: + midpoint = int(len(full_tokens) * (offset + duration / 2.0) / total_length) + midpoint = min(max(midpoint, max_n_lyric_tokens // 2), len(full_tokens) - max_n_lyric_tokens // 2) + tokens = full_tokens[midpoint - max_n_lyric_tokens // 2 : midpoint + max_n_lyric_tokens // 2] + indices = list(range(midpoint - max_n_lyric_tokens // 2, midpoint + max_n_lyric_tokens // 2)) + return tokens.unsqueeze(dim=0), indices + + +# Break total_length into hops/windows of size n_ctx separated by hop_length +def get_starts(total_length, n_ctx, hop_length): + starts = [] + for start in range(0, total_length - n_ctx + hop_length, hop_length): + if start + n_ctx >= total_length: + # Last hop could be smaller, we make it n_ctx to maximise context + start = total_length - n_ctx + starts.append(start) + return starts + + +def get_alignment(music_tokens, labels, prior, config): + level = prior.levels - 1 # Top level used + n_ctx = prior.n_ctx + tokens = music_tokens[level] + batch_size, total_length = tokens.shape[0], tokens.shape[1] + if total_length < n_ctx: + padding_length = n_ctx - total_length + tokens = torch.cat( + [tokens, torch.zeros(batch_size, n_ctx - total_length, dtype=tokens.dtype, device=tokens.device)], dim=1 + ) + total_length = tokens.shape[1] + else: + padding_length = 0 + + hop_length = int(config.hop_fraction[-level - 1] * prior.n_ctx) + alignment_head, alignment_layer = config.prior_alignment_head[0], config.prior_alignment_layer[0] + attn_layers = {alignment_layer} + alignment_hops = {} + indices_hops = {} + for start in tqdm(get_starts(total_length, n_ctx, hop_length), desc="Computing lyric to music alignment "): + end = start + n_ctx + # set metadata offset, sample_length and lyrics tokens + metadata, indices_hop = prior.get_metadata(labels, start, config.sample_length, get_indices=True, offset=0) + tokens_bs = torch.chunk(tokens, batch_size, dim=0) + metadata_bs = torch.chunk(metadata, batch_size, dim=0) + w_hops = [] + for tokens_i, metadata_i in zip(tokens_bs, metadata_bs): + w_hop = prior.forward_tokens(tokens_i[:, start:end], [], metadata_i, get_attn_weights=attn_layers) + w_hops.append(w_hop[0][:, alignment_head]) + del w_hop + weights = torch.cat(w_hops, dim=0) + del w_hops + alignment_hop = weights.float().cpu().numpy() + del weights + + # alignment_hop has shape (bs, n_ctx, nb_relevant_lyric_tokens) + # indices_hop is a list of len=bs, each entry of len hps.nb_relevant_lyric_tokens + indices_hops[start] = indices_hop + alignment_hops[start] = alignment_hop + + # Combine attn for each hop into attn for full range + # Use indices to place them into correct place for corresponding source tokens + alignments = [] + for item in range(batch_size): + # Note each item has different length lyrics + full_tokens = labels[0, 3:] + alignment = np.zeros((total_length, len(full_tokens) + 1)) + for start in reversed(get_starts(total_length, n_ctx, hop_length)): + end = start + n_ctx + alignment_hop = alignment_hops[start][item] + indices = indices_hops[start][item] + alignment[start:end, indices] = alignment_hop + alignment = alignment[: total_length - padding_length, :-1] # remove token padding, and last lyric index + alignments.append(alignment) + return alignments + + +def save_temp_audio(fname, lvl, metas, aud): + aud = torch.clamp(aud, -1, 1).cpu().numpy() + for i in list(range(aud.shape[0])): + if metas is not None: + artists, genres, lyrics = list(metas)[i].values() + path = f"{fname}/lvl_{lvl}-{artists}-{genres}-{lyrics[:5]}-{i}" + np.save(path, aud[i]) + else: + np.save(f"{fname}/lvl_{lvl}-sample-{i}", aud[i]) + + +def get_mask(mask, query_length, key_value_length, blocks, spread, device, sample, sample_t): + # returns a mask of shape 1 x 1 x query_length x key_value_length or None if masking is not needed. + if mask is None or query_length == 1: + return None + offset = sample_t - query_length if sample else max(key_value_length - query_length, 0) + if mask == "autoregressive": + # Masked dense + mask = torch.ones(query_length, key_value_length, device=device).tril(offset) + elif mask == "summary": + # Masked summary + mask = torch.ones(query_length, query_length, device=device).tril() + mask = torch.ones(query_length, query_length, device=device).tril() + mask = mask.view(query_length, blocks, query_length // blocks)[:, :-1, -key_value_length // blocks :] + mask = ( + torch.nn.functional.pad( + mask, + (0, 0, 1, 0), + value=1, + ) + .contiguous() + .view(query_length, key_value_length) + ) + elif mask == "prime": + mask = torch.ones(query_length, key_value_length, device=device).tril(offset) + return mask.view(1, 1, query_length, key_value_length) + + +class JukeboxConv1D(nn.Module): + def __init__(self, input_width, output_width): + super().__init__() + self.input_width = input_width + self.output_width = output_width + weight = torch.empty(input_width, output_width) + bias = torch.zeros(output_width) + self.weight = nn.Parameter(weight) + self.bias = nn.Parameter(bias) + + def forward(self, hidden_states): + size_out = (*hidden_states.size()[:-1], self.output_width) + hidden_states = torch.addmm( + self.bias.type_as(hidden_states), + hidden_states.view(-1, hidden_states.size(-1)), + self.weight.type_as(hidden_states), + ) + hidden_states = hidden_states.view(*size_out) + return hidden_states + + +class JukeboxResConv1DBlock(nn.Module): + def __init__(self, config, conv_width, depth=1, res_scale=1.0): + super().__init__() + hidden_dim = config.res_convolution_multiplier * conv_width + dilation = config.res_dilation_growth_rate**depth + padding = dilation + + self.res_scale = res_scale + self.activation = nn.ReLU() + self.conv1d_1 = nn.Conv1d(conv_width, hidden_dim, 3, 1, padding, dilation) + self.conv1d_2 = nn.Conv1d(hidden_dim, conv_width, 1, 1, 0) + + def forward(self, hidden_states): + residuals = hidden_states + hidden_states = self.activation(hidden_states) + hidden_states = self.conv1d_1(hidden_states) + hidden_states = self.activation(hidden_states) + hidden_states = self.conv1d_2(hidden_states) + return residuals + self.res_scale * hidden_states + + +class JukeboxResnet1D(nn.Module): + def __init__(self, config, conv_width, n_depth, reverse_dilation=False): + super().__init__() + self.dilation_cycle = config.res_dilation_cycle + res_scale = 1.0 if not config.conv_res_scale else 1.0 / math.sqrt(n_depth) + + blocks = [] + for depth in range(n_depth): + block_depth = depth if self.dilation_cycle is None else depth % self.dilation_cycle + blocks.append(JukeboxResConv1DBlock(config, conv_width, block_depth, res_scale)) + + if reverse_dilation: + blocks = blocks[::-1] + self.resnet_block = nn.ModuleList(blocks) + + def forward(self, hidden_states): + for block in self.resnet_block: + hidden_states = block(hidden_states) + return hidden_states + + +class JukeboxEncoderConvBlock(nn.Module): + def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t): + super().__init__() + blocks = [] + filter_t = stride_t * 2 + pad_t = stride_t // 2 + if down_t > 0: + for i in range(down_t): + blocks.append(nn.Conv1d(embed_dim if i == 0 else hidden_dim, hidden_dim, filter_t, stride_t, pad_t)) + blocks.append(JukeboxResnet1D(config, hidden_dim, depth)) + self.proj_out = nn.Conv1d(hidden_dim, config.embed_dim, 3, 1, 1) + self.downsample_block = nn.ModuleList(blocks) + + def forward(self, hidden_states): + for block in self.downsample_block: + hidden_states = block(hidden_states) + hidden_states = self.proj_out(hidden_states) + return hidden_states + + +class JukeboxEncoder(nn.Module): + def __init__(self, config, width, depth, levels, downs_t, strides_t): + super().__init__() + self.levels = levels + self.level_blocks = nn.ModuleList() + + iterator = zip(list(range(self.levels)), downs_t, strides_t) + for i, down_t, stride_t in iterator: + self.level_blocks.append( + JukeboxEncoderConvBlock( + config, config.conv_input_shape if i == 0 else config.embed_dim, width, depth, down_t, stride_t + ) + ) + + def forward(self, hidden_states): + all_hidden_states = [] + + # 64, 32, ... + for level in range(self.levels): + level_block = self.level_blocks[level] + hidden_states = level_block(hidden_states) + all_hidden_states.append(hidden_states) + + return all_hidden_states + + +class JukeboxDecoderConvBock(nn.Module): + def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t, reverse_dilation=True): + self.embed_dim = embed_dim + self.hidden_dim = hidden_dim + super().__init__() + blocks = [] + if down_t > 0: + filter_t = stride_t * 2 + pad_t = stride_t // 2 + self.proj_in = nn.Conv1d(embed_dim, hidden_dim, 3, 1, 1) + for i in range(down_t): + blocks.append(JukeboxResnet1D(config, hidden_dim, depth, reverse_dilation)) + blocks.append( + nn.ConvTranspose1d( + hidden_dim, hidden_dim if i < down_t - 1 else embed_dim, filter_t, stride_t, pad_t + ) + ) + self.upsample_block = nn.ModuleList(blocks) + + def forward(self, hidden_states): + hidden_states = self.proj_in(hidden_states) + for block in self.upsample_block: + hidden_states = block(hidden_states) + return hidden_states + + +class JukeboxDecoder(nn.Module): + def __init__(self, config, hidden_dim, depth, levels, downs_t, strides_t): + super().__init__() + self.levels = levels + self.level_blocks = nn.ModuleList() + for level, down_t, stride_t in zip(list(range(self.levels)), downs_t, strides_t): + self.level_blocks.append( + JukeboxDecoderConvBock(config, config.embed_dim, hidden_dim, depth, down_t, stride_t) + ) + + self.out = nn.Conv1d(config.embed_dim, config.conv_input_shape, 3, 1, 1) + + def forward(self, hidden_states, all_levels=True): + hidden_state = hidden_states[-1] + + # 32, 64 ... + for level in reversed(range(self.levels)): + level_block = self.level_blocks[level] + hidden_state = level_block(hidden_state) + + if level != 0 and all_levels: + hidden_state = hidden_state + hidden_states[level - 1] + + hidden_state = self.out(hidden_state) + return hidden_state + + +class JukeboxBottleneckBlock(nn.Module): + def __init__(self, config: JukeboxVQVAEConfig): + super().__init__() + self.nb_discrete_codes = config.nb_discrete_codes + self.codebook_width = config.embed_dim + self.mu = config.lmu + self.threshold = 1.0 + self.init = False + self.codebook_sum = None + self.codebook_elem = None + self.register_buffer("codebook", torch.zeros(self.nb_discrete_codes, self.codebook_width)) + + def _tile(self, hidden_states): + dim, embed_width = hidden_states.shape + if dim < self.nb_discrete_codes: + n_repeats = (self.nb_discrete_codes + dim - 1) // dim + std = 0.01 / np.sqrt(embed_width) + hidden_states = hidden_states.repeat(n_repeats, 1) + hidden_states = hidden_states + torch.randn_like(hidden_states) * std + return hidden_states + + def init_codebook(self, hidden_states): + nb_discrete_codes = self.nb_discrete_codes + self.init = True + codes = self._tile(hidden_states) + self.codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes] + self.codebook_sum = self.codebook + self.codebook_elem = torch.ones(nb_discrete_codes, device=self.codebook.device) + + def update_codebook(self, hidden_states, latent_states): + mu, codebook_width, nb_discrete_codes = self.mu, self.codebook_width, self.nb_discrete_codes + with torch.no_grad(): + # Calculate new centres + # nb_discrete_codes, batch_size * seq_length + latent_states_onehot = torch.zeros(nb_discrete_codes, hidden_states.shape[0], device=hidden_states.device) + latent_states_onehot.scatter_(0, latent_states.view(1, hidden_states.shape[0]), 1) + + _codebook_sum = torch.matmul(latent_states_onehot, hidden_states) + _codebook_elem = latent_states_onehot.sum(dim=-1) # nb_discrete_codes + codes = self._tile(hidden_states) + _random_codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes] + + # Update centres + old_codebook = self.codebook + self.codebook_sum = mu * self.codebook_sum + (1.0 - mu) * _codebook_sum + self.codebook_elem = mu * self.codebook_elem + (1.0 - mu) * _codebook_elem # nb_discrete_codes + usage = (self.codebook_elem.view(nb_discrete_codes, 1) >= self.threshold).float() + + norm_code = self.codebook_sum.view(nb_discrete_codes, codebook_width) / self.codebook_elem.view( + nb_discrete_codes, 1 + ) + self.codebook = usage * (norm_code) + (1 - usage) * _random_codebook + _codebook_prob = _codebook_elem / torch.sum(_codebook_elem) # prob of each bin + entropy = -torch.sum(_codebook_prob * torch.log(_codebook_prob + 1e-8)) # entropy ie how diverse + used_curr = (_codebook_elem >= self.threshold).sum() + usage = torch.sum(usage) + dk = torch.norm(self.codebook - old_codebook) / np.sqrt(np.prod(old_codebook.shape)) + return {"entropy": entropy, "used_curr": used_curr, "usage": usage, "dk": dk} + + def preprocess(self, hidden_states): + hidden_states = hidden_states.permute(0, 2, 1).contiguous() + hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) + + if hidden_states.shape[-1] == self.codebook_width: + prenorm = torch.norm(hidden_states - torch.mean(hidden_states)) / np.sqrt(np.prod(hidden_states.shape)) + elif hidden_states.shape[-1] == 2 * self.codebook_width: + x1, x2 = hidden_states[..., : self.codebook_width], hidden_states[..., self.codebook_width :] + prenorm = (torch.norm(x1 - torch.mean(x1)) / np.sqrt(np.prod(x1.shape))) + ( + torch.norm(x2 - torch.mean(x2)) / np.sqrt(np.prod(x2.shape)) + ) + + # Normalise + hidden_states = x1 + x2 + + return hidden_states, prenorm + + def postprocess(self, latent_states, dequantised_states, x_shape): + batch_size, time = x_shape + dequantised_states = dequantised_states.view(batch_size, time, -1).permute(0, 2, 1).contiguous() + latent_states = latent_states.view(batch_size, time) + return latent_states, dequantised_states + + def quantise(self, latent_states): + # Calculate latent code latent_states + codebook_weights = self.codebook.t() + distance = ( + torch.sum(latent_states**2, dim=-1, keepdim=True) + - 2 * torch.matmul(latent_states, codebook_weights) + + torch.sum(codebook_weights**2, dim=0, keepdim=True) + ) # (batch_size * latent_states , codebook_weights) + min_distance, music_tokens = torch.min(distance, dim=-1) + fit = torch.mean(min_distance) + return music_tokens, fit + + def dequantise(self, music_tokens): + dequantised_states = F.embedding(music_tokens, self.codebook) + return dequantised_states + + def encode(self, latent_states): + samples, _, seq_len = latent_states.shape + + # Preprocess. + latent_states, _ = self.preprocess(latent_states) + + # Quantise + music_tokens, _ = self.quantise(latent_states) + + # Postprocess. + music_tokens = music_tokens.view(samples, seq_len) + return music_tokens + + def decode(self, music_tokens): + samples, seq_len = music_tokens.shape + + # Dequantise + dequantised_states = self.dequantise(music_tokens) + + # Postprocess + dequantised_states = ( + dequantised_states.view(samples, seq_len, self.codebook_width).permute(0, 2, 1).contiguous() + ) + return dequantised_states + + def forward(self, hidden_states, update_codebook=True): + samples, _, seq_len = hidden_states.shape + + # Preprocess + hidden_states, prenorm = self.preprocess(hidden_states) + + # Init codebook if not inited + if update_codebook and not self.init: + self.init_codebook(hidden_states) + + # Quantise and dequantise through bottleneck + music_tokens, fit = self.quantise(hidden_states) + dequantised_states = self.dequantise(music_tokens) + + # Update embeddings + if update_codebook: + update_metrics = self.update_codebook(hidden_states, music_tokens) + else: + update_metrics = {} + + # Loss + commit_loss = torch.norm(dequantised_states.detach() - hidden_states) ** 2 / np.prod(hidden_states.shape) + + # Passthrough + dequantised_states = hidden_states + (dequantised_states - hidden_states).detach() + + # Postprocess + music_tokens, dequantised_states = self.postprocess(music_tokens, dequantised_states, (samples, seq_len)) + return music_tokens, dequantised_states, commit_loss, dict(fit=fit, pn=prenorm, **update_metrics) + + +class JukeboxBottleneck(nn.Module): + def __init__(self, config, levels): + super().__init__() + self.levels = levels + self.level_blocks = nn.ModuleList() + for level in range(self.levels): + self.level_blocks.append(JukeboxBottleneckBlock(config)) + + def encode(self, raw_audio): + music_tokens = [ + level_block.encode(hidden_states) for (level_block, hidden_states) in zip(self.level_blocks, raw_audio) + ] + return music_tokens + + def decode(self, music_tokens, start_level=0, end_level=None): + if end_level is None: + end_level = self.levels + quantised_audio = [ + level_block.decode(z) for (level_block, z) in zip(self.level_blocks[start_level:end_level], music_tokens) + ] + return quantised_audio + + def forward(self, input_audio): + music_tokens, quantised_states, commit_losses, metrics = [], [], [], [] + for level in range(self.levels): + level_block = self.level_blocks[-level - 1] + hidden_states = input_audio[level] + sampled_tokens, quantised_state, commit_loss, metric = level_block( + hidden_states, update_codebook=self.training + ) + music_tokens.append(sampled_tokens) + if not self.training: + # Be extra paranoid and make sure the encoder weights can't + # change from straight-through estimator + quantised_state = quantised_state.detach() + quantised_states.append(quantised_state) + commit_losses.append(commit_loss) + if self.training: + metrics.append(metric) + return music_tokens, quantised_states, commit_losses, metrics + + +JUKEBOX_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config (`JukeboxConfig`): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + """The Hierarchical VQ-VAE model used in Jukebox. This model follows the Hierarchical VQVAE paper from [Will Williams, Sam +Ringer, Tom Ash, John Hughes, David MacLeod, Jamie Dougherty](https://arxiv.org/abs/2002.08111). + + """, + JUKEBOX_START_DOCSTRING, +) +class JukeboxVQVAE(PreTrainedModel): + config_class = JukeboxVQVAEConfig + base_model_prefix = "vqvae" + + def _init_weights(self, module): + if isinstance(module, nn.Embedding): # embed_tokens + module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale) + elif isinstance(module, JukeboxConv1D): + if self.config.zero_out: + module.weight.data.zero_() + else: + module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale) + elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out: + module.conv1d_2.weight.data.zero_() + module.conv1d_2.bias.data.zero_() + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + def __init__(self, config: JukeboxVQVAEConfig): + super().__init__(config) + downs_t = config.res_downs_t + strides_t = config.res_strides_t + if not config.sample_length: + downsamples = [stride**down for stride, down in zip(strides_t, downs_t)] + top_raw_to_tokens = np.prod(downsamples) + config.sample_length = ( + config.sample_length_in_seconds * config.sampling_rate // top_raw_to_tokens + ) * top_raw_to_tokens + config.sample_length = config.sample_length.astype(int) + + self.nb_discrete_codes = config.nb_discrete_codes + self.commit = config.commit + self.sample_length = config.sample_length + + self.downsamples = [stride**down for stride, down in zip(strides_t, downs_t)] + self.hop_lengths = np.cumprod(self.downsamples) + self.levels = levels = config.levels + self.music_tokens_shapes = [ + (int(self.sample_length // self.hop_lengths[-level - 1])) for level in range(levels) + ] + + self.multipliers = config.multipliers if config.multipliers is not None else [1] * levels + + self.encoders = nn.ModuleList() + self.decoders = nn.ModuleList() + for level in range(levels): + width = config.res_conv_width * self.multipliers[level] + depth = config.res_conv_depth * self.multipliers[level] + self.encoders.append( + JukeboxEncoder(config, width, depth, level + 1, downs_t[: level + 1], strides_t[: level + 1]) + ) + self.decoders.append( + JukeboxDecoder(config, width, depth, level + 1, downs_t[: level + 1], strides_t[: level + 1]) + ) + + self.bottleneck = JukeboxBottleneck(config, levels) + + def _decode(self, music_tokens, start_level=0, end_level=None): + # Decode + if end_level is None: + end_level = self.levels + latent_states = self.bottleneck.decode(music_tokens, start_level=start_level, end_level=end_level) + # Use only lowest level + decoder, dequantised_state = self.decoders[start_level], latent_states[0:1] + dequantised_state = decoder(dequantised_state, all_levels=False) + dequantised_state = dequantised_state.permute(0, 2, 1) + return dequantised_state + + def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1) -> torch.Tensor: + """ + Transforms the input `music_tokens` to their `raw_audio` representation. + + Args: + music_tokens (`torch.LongTensor`): + Tensor of music tokens which will be decoded to raw audio by using the codebook. Each music token + should be an index to a corresponding `code` vector in the codebook. + start_level (`int`, *optional*): + Level at which the decoding process will start. Default to 0. + end_level (`int`, *optional*): + Level at which the decoding process will start. Default to None. + bs_chunks (int, *optional*): + Number of chunks to process at the same time. + """ + token_chunks = [torch.chunk(token, bs_chunks, dim=0) for token in music_tokens] + dequantised_states = [] + for i in range(bs_chunks): + music_tokens_i = [chunks[i] for chunks in token_chunks] + dequantised_state = self._decode(music_tokens_i, start_level=start_level, end_level=end_level) + dequantised_states.append(dequantised_state) + return torch.cat(dequantised_states, dim=0) + + def _encode(self, raw_audio, start_level=0, end_level=None): + # Encode + if end_level is None: + end_level = self.levels + input_audio = raw_audio.permute(0, 2, 1).float() + latent_states = [] + for level in range(self.levels): + encoder = self.encoders[level] + latent_state = encoder(input_audio) + latent_states.append(latent_state[-1]) + music_tokens = self.bottleneck.encode(latent_states) + return music_tokens[start_level:end_level] + + def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1): + """ + Transforms the `input_audio` to a discrete representation made out of `music_tokens`. + + Args: + input_audio (`torch.Tensor`): + Raw audio which will be encoded to its discrete representation using the codebook. The closest `code` + form the codebook will be computed for each sequence of samples. + start_level (`int`, *optional*, defaults to 0): + Level at which the encoding process will start. Default to 0. + end_level (`int`, *optional*): + Level at which the encoding process will start. Default to None. + bs_chunks (int, *optional*, defaults to 1): + Number of chunks of raw audio to process at the same time. + """ + audio_chunks = torch.chunk(input_audio, bs_chunks, dim=0) + music_tokens_list = [] + for chunk_i in audio_chunks: + music_tokens_i = self._encode(chunk_i, start_level=start_level, end_level=end_level) + music_tokens_list.append(music_tokens_i) + music_tokens = [torch.cat(music_tokens_level, dim=0) for music_tokens_level in zip(*music_tokens_list)] + return music_tokens + + def sample(self, n_samples): + music_tokens = [ + torch.randint(0, self.nb_discrete_codes, size=(n_samples, *music_tokens_shape), device="cpu") + for music_tokens_shape in self.music_tokens_shapes + ] + return self.decode(music_tokens) + + def forward(self, raw_audio: torch.FloatTensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level. + The commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is + computed. + + Args: + raw_audio (`torch.FloatTensor`): + Audio input which will be encoded and decoded. + + Returns: + `Tuple[torch.Tensor, torch.Tensor]` + + + Example: + ```python + >>> from transformers import JukeboxVQVAE, set_seed + >>> import torch + + >>> model = JukeboxVQVAE.from_pretrained("openai/jukebox-1b-lyrics").eval() + >>> set_seed(0) + >>> zs = [torch.randint(100, (4, 1))] + >>> model.decode(zs).shape + torch.Size([4, 8, 1]) + ``` + """ + + # Encode/Decode + input_audio = raw_audio.permute(0, 2, 1).float() + latent_states = [] + for level in range(self.levels): + encoder = self.encoders[level] + latent_state = encoder(input_audio) + latent_states.append(latent_state[-1]) + + _, music_tokens, commit_losses, _ = self.bottleneck(latent_states) + dequantised_states = [] + for level in range(self.levels): + decoder = self.decoders[level] + dequantised_state = decoder(music_tokens[level : level + 1], all_levels=False) + dequantised_states.append(dequantised_state.permute(0, 2, 1)) + + commit_loss = sum(commit_losses) + loss = self.commit * commit_loss + + return dequantised_states, loss + + +class JukeboxMLP(nn.Module): + def __init__(self, config): + # a single channel is always used in original code + super().__init__() + embed_dim = config.hidden_size + hidden_dim = int(config.mlp_multiplier * embed_dim) + + self.c_fc = JukeboxConv1D(embed_dim, hidden_dim) + self.c_proj = JukeboxConv1D(hidden_dim, embed_dim) + self.act = ACT2FN[config.act_fn] + self.dropout = nn.Dropout(config.resid_dropout) + + def forward(self, hidden_states): + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class JukeboxLayerNorm(FusedLayerNorm): + def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): + super().__init__(normalized_shape, eps=eps, elementwise_affine=elementwise_affine) + self.width = np.prod(normalized_shape) + self.max_numel = 65535 * self.width + + def forward(self, input): + if input.numel() > self.max_numel: + return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps).type_as(input) + else: + return super().forward(input).type_as(input) + + +class JukeboxAttention(nn.Module): + def __init__(self, config, n_ctx, attn_func="dense_attn"): + super().__init__() + self.embed_dim = config.hidden_size + self.n_heads = config.n_heads + self.dropout = config.attn_dropout + hidden_dim = int(config.attention_multiplier * self.embed_dim) + + self.head_dim = hidden_dim // config.n_heads + self.n_ctx = n_ctx + self.hidden_dim = hidden_dim + self.scale = self.head_dim**-0.25 + self.mask = config.mask + + if attn_func == "cross_attention": + self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim) + self.c_enc_kv = JukeboxConv1D(self.embed_dim, hidden_dim * 2) + else: + self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim * 3) + + self.c_proj = JukeboxConv1D(hidden_dim, self.embed_dim) + self.attn_dropout = nn.Dropout(config.attn_dropout) + self.resid_dropout = nn.Dropout(config.resid_dropout) + + # Sequence of length seq_len is factored as [blocks, seq_len // blocks] + self.attn_func = attn_func + if attn_func == "cross_attention": + self.qkv = self.decode_qkv + elif attn_func == "prime_attn": + self.qkv = self.prime_qkv + else: + self.qkv = self.factored_qkv + + ATTENTION_MAP = { + "dense_attn": (self.dense_attn, "autoregressive"), + "block_attn": (self.block_attn, "autoregressive"), + "transpose_block_attn": (self.transpose_block_attn, "autoregressive"), + "prev_block_attn": (self.prev_block_attn, None), + "summary_attn": (self.summary_attn, "summary"), + "summary_spread_attn": (self.summary_spread_attn, "summary"), + "cross_attention": (self.dense_attn, None), + "prime_attn": (self.prime_attn, "prime"), + } + self.attn, self.attn_mask = ATTENTION_MAP[attn_func] + + self.blocks = config.blocks + self.spread = config.spread + if self.blocks is not None: + self.block_ctx = self.n_ctx // self.blocks + + self.sample_t = 0 + self.cache = {} + self.encoder_len = config.nb_relevant_lyric_tokens # length of the encoder input ids + self.record_attn = False + + def _attn(self, query_states, key_states, value_states, sample): + scale = self.scale + if self.training: + attention_weight = torch.matmul(query_states * scale, key_states * scale) + else: + attention_weight = torch.matmul(query_states, key_states) + attention_weight.mul_(scale * scale) + attn_weight_type = attention_weight.dtype + attention_weight = attention_weight.float() + if self.mask: + # Generate appropriate mask to mask out all positions before current + # Might take up lot of memory for dense, so can cache it + mask = get_mask( + self.attn_mask, + query_states.size(-2), + key_states.size(-1), + self.blocks, + self.spread, + attention_weight.device, + sample, + self.sample_t, + ) + if mask is not None: + attention_weight = attention_weight * mask + -1e9 * (1 - mask) + attention_prob = F.softmax(attention_weight, dim=-1).type(attn_weight_type) + if self.record_attn: + self.attention_prob = attention_prob + if self.attn_func == "prime_attn": + # only keep music queries and lyrics keys/values + self.attention_prob = self.attention_prob[:, :, self.encoder_len :, : self.encoder_len] + attention_prob = self.attn_dropout(attention_prob) + context_states = torch.matmul(attention_prob, value_states) + return context_states + + def merge_heads(self, hidden_states): + hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous() + new_hidden_states_shape = (*hidden_states.size()[:-2], hidden_states.size(-2) * hidden_states.size(-1)) + return hidden_states.view(*new_hidden_states_shape) # in Tensorflow implem: fct merge_states + + def split_heads(self, hidden_states, is_key=False): + new_hidden_states_shape = ( + *hidden_states.size()[:-1], + self.n_heads, + hidden_states.size(-1) // self.n_heads, + ) + hidden_states = hidden_states.view(*new_hidden_states_shape) # in Tensorflow implem: fct split_states + if is_key: + return hidden_states.permute(0, 2, 3, 1) + else: + return hidden_states.permute(0, 2, 1, 3) + + def dense_attn(self, query, key, value, sample): + query = self.split_heads(query) + key = self.split_heads(key, is_key=True) + value = self.split_heads(value) + context_states = self._attn(query, key, value, sample) + context_states = self.merge_heads(context_states) + return context_states + + def block_attn(self, query, key, value, sample): + block_ctx = self.block_ctx + batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t + if sample: + return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) + else: + query_length = query.shape[1] + query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim) + if query_length < seq_len: + seq_len = query_length + key = key[:, -seq_len:].contiguous() + value = value[:, -seq_len:].contiguous() + key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) + value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) + return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) + + def transpose_block_attn(self, query, key, value, sample): + block_ctx = self.block_ctx + batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t + if sample: + block_len = (seq_len - 1) % block_ctx + key = key[:, block_len::block_ctx, :] + value = value[:, block_len::block_ctx, :] + return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) + else: + query_length = query.shape[1] + query = query.view(batch_size, query_length // block_ctx, block_ctx, embed_dim) + query = query.transpose(1, 2).contiguous() + query = query.view(batch_size * block_ctx, query_length // block_ctx, embed_dim) + + key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim) + key = key.transpose(1, 2).contiguous() + key = key.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim) + + value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim) + value = value.transpose(1, 2).contiguous() + value = value.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim) + + block_attn = self.dense_attn(query, key, value, sample) + block_attn = block_attn.view(batch_size, block_ctx, query_length // block_ctx, embed_dim) + block_attn = block_attn.transpose(1, 2).contiguous() + block_attn = block_attn.view(batch_size, query_length, embed_dim) + + return block_attn + + def prev_block_attn(self, query, key, value, sample): + block_ctx = self.block_ctx + batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t + if sample: + block = (seq_len - 1) // block_ctx + prev_l = (block - 1) * block_ctx + if block > 0: + key = key[:, prev_l : prev_l + block_ctx, :] + value = value[:, prev_l : prev_l + block_ctx, :] + else: + key = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype) + value = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype) + return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) + else: + query_length = query.shape[1] + query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim) + + key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :] + key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)) + key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) + + value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :] + value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)) + value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) + + if query_length < seq_len: + nb_query_blocks = query_length // block_ctx + nb_key_blocks = seq_len // block_ctx + seq_len = query_length + key = key.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:] + key = key.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim) + + value = value.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:] + value = value.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim) + + return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) + + def summary_attn(self, query, key, value, sample): + blocks = self.blocks + block_ctx = self.block_ctx + batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t + if sample: + key = key[:, block_ctx - 1 : blocks * block_ctx - 1 : block_ctx, :] + key = torch.nn.functional.pad(key, (0, 0, 1, 0)) + + value = value[:, block_ctx - 1 : blocks * block_ctx - 1 : block_ctx, :] + value = torch.nn.functional.pad(value, (0, 0, 1, 0)) + return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) + else: + key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :] + key = torch.nn.functional.pad(key, (0, 0, 1, 0)) # batch_size, blocks, embed_dim + + value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :] + value = torch.nn.functional.pad(value, (0, 0, 1, 0)) # batch_size, blocks, embed_dim + return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) + + def summary_spread_attn(self, query, key, value, sample): + blocks = self.blocks + spread = self.spread + + batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t + if sample: + raise NotImplementedError + else: + key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :] + key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)).contiguous() + key = key.view(batch_size, blocks * spread, embed_dim) + + value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :] + value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)).contiguous() + value = value.view(batch_size, blocks * spread, embed_dim) + + return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) + + def prime_attn(self, query, key, value, sample): + encoder_len = self._encoder_len + key = key[:, :encoder_len] + value = value[:, :encoder_len] + return self.dense_attn(query, key, value, sample) + + def factored_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): + curr_ctx = hidden_states.shape[1] + if last_encoder_hidden_states is not None: + raise TypeError("last_encoder_hidden_states should be None") + + query, key, value = hidden_states.chunk(3, dim=2) + if sample: + self.sample_t += curr_ctx + key, value = self._append_cache(key, value) + l_cache = self._suff_cache_len() + if self._cache_len() > l_cache: + self._slice_cache(-l_cache) + if curr_ctx > 1: + if self.attn_func != "dense_attn": + query = self._pad_to_block_ctx(query, query=True) + key = self._pad_to_block_ctx(key) + value = self._pad_to_block_ctx(value) + sample = False + else: + key = self.cache["key"] + value = self.cache["value"] + return query, key, value, sample + + def prime_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): + curr_ctx = hidden_states.shape[1] + if last_encoder_hidden_states is not None: + raise TypeError("last_encoder_hidden_states should be None") + query, key, value = hidden_states.chunk(3, dim=2) + if sample: + if self._cache_len() < self._encoder_len: + self._append_cache(key, value) + if self._cache_len() > self._encoder_len: + self._slice_cache(0, self._encoder_len) + key, value = self.cache["key"], self.cache["value"] + self.sample_t += curr_ctx + return query, key, value, sample + + def decode_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): + curr_ctx = hidden_states.shape[1] + query = hidden_states + if sample: + if self.sample_t == 0: + self.cache["key"], self.cache["value"] = self.c_enc_kv( + last_encoder_hidden_states.type_as(hidden_states) + ).chunk(2, dim=2) + key, value = self.cache["key"], self.cache["value"] + self.sample_t += curr_ctx + else: + key, value = self.c_enc_kv(last_encoder_hidden_states.type_as(hidden_states)).chunk(2, dim=2) + return query, key, value, sample + + def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False): + curr_ctx = hidden_states.shape[1] + hidden_states = self.c_attn(hidden_states) + query, key, value, sample = self.qkv( + hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample + ) + attention_scores = self.attn(query, key, value, sample) + if attention_scores.shape[1] != curr_ctx: + offset = self._offset(curr_ctx) + attention_scores = attention_scores[:, offset : offset + curr_ctx, :].contiguous() + attention_scores = self.c_proj(attention_scores) + return self.resid_dropout(attention_scores) + + @property + def _encoder_len(self): + encoder_len = self.encoder_len + encoder_blocks = (encoder_len // self.blocks) + 1 + return encoder_blocks * self.blocks + + def _offset(self, curr_ctx): + if self.attn_func == "dense_attn": + return 0 + return (self.sample_t - curr_ctx) % self.block_ctx + + def _pad_to_block_ctx(self, hidden_states, query=False): + seq_len = hidden_states.shape[1] + offset = self._offset(seq_len) if query else 0 + n_blocks = (seq_len + offset + self.block_ctx - 1) // self.block_ctx + pad = n_blocks * self.block_ctx - seq_len - offset + if pad == 0 and offset == 0: + return hidden_states + else: + return F.pad(hidden_states, (0, 0, offset, pad)) + + def _cache_len(self): + return 0 if "key" not in self.cache else self.cache["key"].shape[1] + + def _suff_cache_len(self): + """ + Precondition: + key and value are appended with the current context and self.sample_t reflects the 1-indexed sample + location in the context. + """ + previous_block_length = (self.sample_t - 1) % self.block_ctx + 1 + self.block_ctx + REQUIRED_CACHE_LEN = { + "dense_attn": self.sample_t, + "block_attn": (self.sample_t - 1) % self.block_ctx + 1, + "transpose_block_attn": self.sample_t, + "prev_block_attn": self.sample_t if self.sample_t <= self.block_ctx else previous_block_length, + "cross_attn": self.encoder_len, + "prime_attn": min(self.sample_t, self._encoder_len), + } + + return REQUIRED_CACHE_LEN[self.attn_func] + + def _slice_cache(self, start, end=None): + self.cache["key"] = self.cache["key"][:, start:end] + self.cache["value"] = self.cache["value"][:, start:end] + + def _append_cache(self, key, value): + if "key" not in self.cache: + self.cache["key"] = key + self.cache["value"] = value + else: + old_key, old_value = key, value + key = torch.cat([self.cache["key"], old_key], dim=1) + value = torch.cat([self.cache["value"], old_value], dim=1) + del self.cache["key"] + del self.cache["value"] + del old_key + del old_value + self.cache["key"] = key + self.cache["value"] = value + return self.cache["key"], self.cache["value"] + + def del_cache(self): + self.sample_t = 0 + if "key" in self.cache: + del self.cache["key"] + if "value" in self.cache: + del self.cache["value"] + self.cache = {} + + +class JukeboxBlock(nn.Module): + def __init__(self, config, n_ctx, attn_func="dense_attn"): + super().__init__() + self.width = config.hidden_size + self.attn = JukeboxAttention(config, n_ctx, attn_func=attn_func) + + self.layer_norm_0 = JukeboxLayerNorm(config.hidden_size) + self.mlp = JukeboxMLP(config) + self.layer_norm_1 = JukeboxLayerNorm(config.hidden_size) + self.res_scale = 1.0 / config.num_layers if config.attn_res_scale else 1.0 + self.attn_func = attn_func + + def forward(self, hidden_states, last_encoder_hidden_states, sample=False): + residuals = hidden_states + hidden_states = self.layer_norm_0(hidden_states) + hidden_states = self.attn(hidden_states, last_encoder_hidden_states, sample) + + output_states = self.layer_norm_1(residuals + hidden_states) + output_states = self.mlp(output_states) + if self.res_scale == 1.0: + output = residuals + hidden_states + output_states + else: + output = residuals + self.res_scale * (hidden_states + output_states) + return output + + +class JukeboxLayerStack(nn.Module): + def __init__(self, config, n_ctx): + super().__init__() + self.n_ctx = n_ctx + self.width = config.hidden_size + self.num_layers = config.num_layers + self.blocks = config.blocks + self.attention_pattern = config.attention_pattern + if self.blocks is not None: + self.block_ctx = n_ctx // self.blocks + self.encoder_len = config.nb_relevant_lyric_tokens + self.n_heads = config.n_heads + + # Orders of attn_func + attention_pattern = ATTENTION_PATTERNS[self.attention_pattern] + self._attn_mods = nn.ModuleList() + for depth in range(self.num_layers): + self._attn_mods.append(JukeboxBlock(config, n_ctx, attn_func=attention_pattern(depth))) + + self.saved_attn_weights = [] + + def set_record_attn(self, record_attn): + """ + Makes forward prop dump self-attention softmaxes to self.saved_attn_weights. + + Args: + record_attn (`Union[bool,set]`): + Either a set of layer indices indicating which layers to store, or a boolean value indicating Whether + to dump all. + """ + + def _should_record_attn(layer_idx): + if isinstance(record_attn, bool): + return record_attn + return layer_idx in record_attn + + for i, layer in enumerate(self._attn_mods): + layer.attn.record_attn = _should_record_attn(i) + + if not record_attn: + self.saved_attn_weights = [] + + def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False): + # Blocks + for i, attn_layer in enumerate(self._attn_mods): + if attn_layer.attn_func == "cross_attention": # attend to the lyrics + hidden_states = attn_layer( + hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample + ) + else: + hidden_states = attn_layer(hidden_states, last_encoder_hidden_states=None, sample=sample) + if attn_layer.attn.record_attn: + self.saved_attn_weights.append(attn_layer.attn.c_attn.weight) + return hidden_states + + def del_cache(self): + for attn_layer in self._attn_mods: + attn_layer.attn.del_cache() + + +class JukeboxPositionalEmbedding(nn.Module): + def __init__(self, embed_dim, width): + super().__init__() + self.pos_emb = nn.Parameter(torch.empty((embed_dim, width))) + + def forward(self): + pos_emb = self.pos_emb + return pos_emb + + +class JukeboxConditionalAutoregressive(nn.Module): + def __init__( + self, + config, + n_ctx=None, + embed_dim=None, + audio_conditioning=False, + metadata_conditioning=False, + is_encoder=False, + ): + """ + Autoregressive model on either lyric tokens or music tokens, or both. The attention pattern should be properly + set fro each configuration. + + Args: + config (`JukeboxPriorConfig`): + Model configuration class with all the parameters of the model. Initializing with a config file does + not load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. + n_ctx (`int`, *optional*): + Number of tokens or lyrics tokens provided in a single pass. + embed_dim (`int`, *optional*): + Either equals to the dimension of the codebook, or the sum of n_vocab (lyrics) and codeboook dimension, + if the model combines lyrics and music tokens, or simply n_vocab if the model is a seperate encoder + audio_conditioning (`bool`, *optional*, defaults to `False`): + Whether or not the prior supports conditionning on audio. + metadata_conditioning (`bool`, *optional*, defaults to `False`): + Whether or not the prior supports conditionning on artitst, genres, lyrics and timing. + is_encoder (`bool`, *optional*, defaults to `False`): + Whether the model is an encoder only model. + """ + + super().__init__() + self.width = config.hidden_size + self.num_layers = config.num_layers + self.n_ctx = n_ctx if n_ctx is not None else config.n_ctx + self.embed_dim = embed_dim if embed_dim is not None else config.music_vocab_size + self.embed_tokens = nn.Embedding(self.embed_dim, config.hidden_size) + self.embed_tokens_dropout = nn.Dropout(config.emb_dropout) + self.metadata_conditioning = metadata_conditioning + self.audio_conditioning = audio_conditioning + if not metadata_conditioning: + self.start_token = nn.Parameter(torch.empty((1, config.hidden_size))) + self.pos_emb = JukeboxPositionalEmbedding(self.n_ctx, config.hidden_size) + self.pos_emb_dropout = nn.Dropout(config.emb_dropout) + + self.transformer = JukeboxLayerStack(config, n_ctx=self.n_ctx) + self.is_encoder = is_encoder + self.encoder_len = config.nb_relevant_lyric_tokens + + if config.merged_decoder: + # Merged piped model uses this setup + self.add_cond_after_transformer = False + self.share_embed_tokens_fc_proj_out = False + else: + self.add_cond_after_transformer = True + self.share_embed_tokens_fc_proj_out = True + + if not is_encoder: + self.fc_proj_out = nn.Linear(config.hidden_size, self.embed_dim, bias=False) + if self.share_embed_tokens_fc_proj_out: + self.fc_proj_out.weight = self.embed_tokens.weight + self.loss = torch.nn.CrossEntropyLoss() + + def forward( + self, + tokens, + audio_conditioning=None, + metadata_conditioning=None, + last_encoder_hidden_states=None, + get_preds=False, + get_acts=False, + get_sep_loss=False, + ): + """ + Args: + tokens (`torch.tensor`): + Can represent music tokens, lyrics tokens or both, depending on the configuration. + """ + # Preprocess. + batch_size = tokens.shape[0] + with torch.no_grad(): + tokens = tokens.view(batch_size, -1).long() + + if not self.audio_conditioning: + audio_conditioning = torch.zeros( + (batch_size, 1, self.width), + device=tokens.device, + dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype, + ) + + target = tokens # Target + hidden_states = self.embed_tokens(tokens) + # Shift by 1, and fill in start token + hidden_states = torch.cat((hidden_states[:, -1:], hidden_states[:, :-1]), dim=1) + if self.metadata_conditioning: + hidden_states[:, 0] = metadata_conditioning.view(batch_size, self.width) + else: + hidden_states[:, 0] = self.start_token + + hidden_states = ( + self.embed_tokens_dropout(hidden_states) + self.pos_emb_dropout(self.pos_emb()) + audio_conditioning + ) # Pos emb and dropout + + hidden_states = self.transformer( + hidden_states, last_encoder_hidden_states=last_encoder_hidden_states + ) # Transformer + if self.add_cond_after_transformer: # Piped doesnt add x_cond + hidden_states = hidden_states + audio_conditioning + + activations = hidden_states + if self.is_encoder: + return hidden_states + + hidden_states = self.fc_proj_out(hidden_states) # Predictions + loss_fn = nn.CrossEntropyLoss() + if get_sep_loss: + lyric_hidden_states = hidden_states[:, : self.encoder_len].reshape(-1, self.embed_dim) + token_hidden_states = hidden_states[:, self.encoder_len :].reshape(-1, self.embed_dim) + + lyric_loss = loss_fn(lyric_hidden_states, target[:, : self.encoder_len].reshape(-1)) / np.log(2.0) + music_token_loss = loss_fn(token_hidden_states, target[:, self.encoder_len :].reshape(-1)) / np.log(2.0) + + loss = (lyric_loss, music_token_loss) # Note order! Lyric is first + else: + loss = loss_fn(hidden_states.view(-1, self.embed_dim), target.view(-1)) / np.log(2.0) # Loss + + if get_preds: + return loss, hidden_states + elif get_acts: + return loss, activations + else: + return loss, None + + def get_emb(self, sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning): + if sample_t == 0: + hidden_states = torch.empty(n_samples, 1, self.width, dtype=self.embed_tokens.weight.dtype).to( + self.embed_tokens.weight.device + ) + if self.metadata_conditioning: + hidden_states[:, 0] = metadata_conditioning.view(n_samples, self.width) + else: + hidden_states[:, 0] = self.start_token + else: + hidden_states = self.embed_tokens(tokens) + if audio_conditioning.shape == (n_samples, self.n_ctx, self.width): + cond = audio_conditioning[:, sample_t : sample_t + 1, :] + else: + cond = audio_conditioning + # Pos emb, dropout is identity at eval time + hidden_states = hidden_states + self.pos_emb()[sample_t : sample_t + 1] + cond + return hidden_states, cond + + def sample( + self, + n_samples, + audio_conditioning=None, + metadata_conditioning=None, + last_encoder_hidden_states=None, + temp=1.0, + top_k=0, + top_p=0.0, + get_preds=False, + sample_tokens=None, + ): + if sample_tokens is None: + sample_tokens = self.n_ctx + + if not self.audio_conditioning: + audio_conditioning = torch.zeros( + (n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype + ).to(self.fc_proj_out.device) + + with torch.no_grad(): + sampled_tokens = [] + tokens = None + if get_preds: + preds = [] + + iter = tqdm(range(0, sample_tokens), leave=False) + for sample_t in iter: + iter.set_description(f"Ancestral sampling {sample_tokens} music tokens", refresh=True) + hidden_states, cond = self.get_emb( + sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning + ) + + hidden_states = self.transformer( + hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True + ) + if self.add_cond_after_transformer: + hidden_states = hidden_states + cond + hidden_states = self.fc_proj_out(hidden_states) # Predictions + if get_preds: + preds.append(hidden_states.clone()) + # Adjust logits + hidden_states = hidden_states / temp + hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p) + # Sample and replace hidden_states + tokens = torch.distributions.Categorical(logits=hidden_states).sample() + sampled_tokens.append(tokens.clone()) + + del tokens + self.transformer.del_cache() + + tokens = torch.cat(sampled_tokens, dim=1) + if get_preds: + preds = torch.cat(preds, dim=1) + if get_preds: + return tokens, preds + else: + return tokens + + def split_chunks(self, length, chunk_size): + n_passes = (length + chunk_size - 1) // chunk_size + chunk_sizes = [*[chunk_size] * (n_passes - 1), (length - 1) % chunk_size + 1] + return chunk_sizes + + def primed_sample( + self, + n_samples, + lyric_and_music_tokens, + audio_conditioning=None, + metadata_conditioning=None, + last_encoder_hidden_states=None, + temp=1.0, + top_k=0, + top_p=0.0, + get_preds=False, + chunk_size=None, + sample_tokens=None, + ): + if sample_tokens is None: + sample_tokens = self.n_ctx + # Preprocess. + batch_size = lyric_and_music_tokens.shape[0] + with torch.no_grad(): + lyric_and_music_tokens = lyric_and_music_tokens.view(batch_size, -1).long() + + sampled_audio = torch.split(lyric_and_music_tokens, 1, dim=1) + sampled_audio = list(sampled_audio) + + if not self.audio_conditioning: + audio_conditioning = torch.zeros( + (n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype + ).to(lyric_and_music_tokens.device) + + with torch.no_grad(): + if get_preds: + preds = [] + + # Fill up key/value cache for past context by runing forward pass. + # We do so in chunks instead of doing the whole past in one forward pass to reduce max memory usage. + if chunk_size is None: + chunk_size = len(sampled_audio) + chunk_sizes = self.split_chunks(len(sampled_audio), chunk_size) + x_primes = [] + start = 0 + token = None + + for current_chunk_size in tqdm(chunk_sizes, desc="Preparing past key value", leave=False): + sampled_audio_prime, conds_prime = [], [] + for sample_t in range(start, start + current_chunk_size): + x_prime, cond_prime = self.get_emb( + sample_t, n_samples, token, audio_conditioning, metadata_conditioning + ) + token = sampled_audio[sample_t] + sampled_audio_prime.append(x_prime) + conds_prime.append(cond_prime) + start = start + current_chunk_size + x_prime, cond_prime = torch.cat(sampled_audio_prime, dim=1), torch.cat(conds_prime, dim=1) + del sampled_audio_prime + del conds_prime + if not get_preds: + del cond_prime + x_prime = self.transformer(x_prime, last_encoder_hidden_states=last_encoder_hidden_states, sample=True) + + if get_preds: + if self.add_cond_after_transformer: + x_prime = x_prime + cond_prime + del cond_prime + x_primes.append(x_prime) + else: + del x_prime + + if get_preds: + x_prime = torch.cat(x_primes, dim=1) + x_prime = self.fc_proj_out(x_prime) # Predictions + preds.append(x_prime) + + # the input of the encoder and decoder can be merged into (lyrics, music tokens) + input_tokens = sampled_audio[-1] + + itererator = tqdm( + range(len(sampled_audio), sample_tokens), + desc=f"Sampling {len(range(len(sampled_audio), sample_tokens))} music tokens", + leave=False, + ) + for sample_t in itererator: + hidden_states, cond = self.get_emb( + sample_t, n_samples, input_tokens, audio_conditioning, metadata_conditioning + ) + + hidden_states = self.transformer( + hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True + ) + if self.add_cond_after_transformer: + hidden_states = hidden_states + cond + hidden_states = self.fc_proj_out(hidden_states) # Predictions + if get_preds: + preds.append(hidden_states) + # Adjust logits + hidden_states = hidden_states / temp + hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p) + # only music tokens are sampled + music_tokens = torch.distributions.Categorical(logits=hidden_states).sample() + sampled_audio.append(music_tokens.clone()) + input_tokens = music_tokens + + del input_tokens, music_tokens + self.transformer.del_cache() + + music_tokens = torch.cat(sampled_audio, dim=1) + if get_preds: + preds = torch.cat(preds, dim=1) + if get_preds: + return music_tokens, preds + else: + return music_tokens + + +class JukeboxMusicTokenConditioner(nn.Module): + """ + The `JukeboxMusicTokenConditioner` takes music tokens as an input (coresponding to the codes of the VQVAE's + codebook) and upsamples it using a single layer of decoder convolution block (the same is used in the VQVAE). + """ + + def __init__(self, config, level): + super().__init__() + self.embed_tokens = nn.Embedding(config.music_vocab_size, config.hidden_size) + config.embed_dim = config.music_vocab_size # setting correct argument for the `JukeboxDecoder` + + self.upsampler = JukeboxDecoderConvBock( + config, + config.hidden_size, + config.res_conv_width, + config.res_conv_depth, + config.res_downs_t[level], + config.res_strides_t[level], + reverse_dilation=False, + ) + self.layer_norm = JukeboxLayerNorm(config.hidden_size) + + def forward(self, music_tokens, raw_audio_conditionning=None): + """ + Args: + music_tokens (`torch.LongTensor`): + Music tokens form the uper level in range(nb_discrete_codes) + raw_audio_conditionning (`torch.LongTensor`, *optional*): + Audio used when primed sampling, raw audio information that conditions the generation + """ + if raw_audio_conditionning is None: + raw_audio_conditionning = 0.0 + # Embed music_tokens + music_tokens = music_tokens.long() + hidden_states = self.embed_tokens(music_tokens) + hidden_states = hidden_states + raw_audio_conditionning + + # Run conditioner + hidden_states = hidden_states.permute(0, 2, 1) + hidden_states = self.upsampler(hidden_states) + hidden_states = hidden_states.permute(0, 2, 1) + hidden_states = self.layer_norm(hidden_states) + return hidden_states + + +class JukeboxRangeEmbedding(nn.Module): + """ + The `JukeboxRangeEmbedding` interpolate the given [pos_start, pos_end] to obtain an equivalent of time positional + embedding of length `n_ctx`. + + Binning process : For each pos in position tensor, find its bin [start,end) mapped to [0,1,...,bins-1] [start,end) + -> [0,1) -> [0, bins) -> floor -> [0,...,bins-1] NOTE: Open ended interval on right, so start <= pos < end, not <= + end + """ + + def __init__(self, n_time, embed_dim, range, out_width, clamp=False): + super().__init__() + self.n_time = n_time + self.embed_dim = embed_dim + self.emb = nn.Embedding(embed_dim, out_width) + self.pos_min, self.pos_max = range + self.clamp = clamp + + def forward(self, pos_start, pos_end=None): + # Check if [pos_start,pos_end] in [pos_min, pos_max) + if not len(pos_start.shape) == 2: + raise TypeError(f"Expected shape with 2 dims, got {pos_start.shape}") + if not (self.pos_min <= pos_start).all() and (pos_start < self.pos_max).all(): + raise TypeError(f"Range is [{self.pos_min},{self.pos_max}), got {pos_start}") + + pos_start = pos_start.float() + if pos_end is not None: + if self.clamp: + pos_end = pos_end.clamp(self.pos_min, self.pos_max) + + pos_end = pos_end.float() + # Interpolate so that [pos_start, ..., pos_end] <-> position tensor of length n_ctx + n_time = self.n_time + if n_time != 1: + interpolation = ( + torch.arange(0, n_time, dtype=torch.float, device=pos_start.device).view(1, n_time) / n_time + ) + position = pos_start + (pos_end - pos_start) * interpolation + else: + position = pos_start + + # Bin each value to bins_ + # [0,1) -> [0,1..,embed_dim) -> [0,1...,embed_dim-1 + normalised_position = (position - self.pos_min) / (self.pos_max - self.pos_min) + bins_ = (self.embed_dim * normalised_position).floor().long().detach() + return self.emb(bins_) + + +class JukeboxLabelConditioner(nn.Module): + def __init__(self, config, include_time_signal): + super().__init__() + + embed_dim = config.hidden_size + timing_dims = config.timing_dims + sampling_rate = config.sampling_rate + nb_genres, nb_artists = config.metadata_dims + music_tokens_shape = config.n_ctx + + self.max_nb_genres = config.max_nb_genres + self.bow_genre_emb = nn.Embedding(nb_genres, embed_dim) + self.artist_emb = nn.Embedding(nb_artists, embed_dim) + self.include_time_signal = include_time_signal + if self.include_time_signal: + total_length_range = (config.min_duration * sampling_rate, config.max_duration * sampling_rate) + absolute_pos_range = (0.0, config.max_duration * sampling_rate) + relative_pos_range = (0.0, 1.0) + self.total_length_emb = JukeboxRangeEmbedding(1, timing_dims, total_length_range, embed_dim) + self.absolute_pos_emb = JukeboxRangeEmbedding( + music_tokens_shape, timing_dims, absolute_pos_range, embed_dim + ) + self.relative_pos_emb = JukeboxRangeEmbedding( + music_tokens_shape, timing_dims, relative_pos_range, embed_dim, clamp=True + ) + + def forward(self, metadata): + total_length = metadata[:, 0:1] + offset = metadata[:, 1:2] + length = metadata[:, 2:3] + artist = metadata[:, 3:4] + genre = metadata[:, 4:] + + # Start embedding of length 1 + artist_emb = self.artist_emb(artist) + # Empty genre slots are denoted by -1. We mask these out. + mask = (genre >= 0).float().unsqueeze(2) + genre_emb = (self.bow_genre_emb(genre.clamp(0)) * mask).sum(dim=1, keepdim=True) + start_emb = genre_emb + artist_emb + + # Pos embedding of length n_ctx + if self.include_time_signal: + start, end = offset, offset + length + total_length = total_length.float() + start = start.float() + end = end.float() + pos_emb = ( + self.total_length_emb(total_length) + + self.absolute_pos_emb(start, end) + + self.relative_pos_emb(start / total_length, end / total_length) + ) + else: + pos_emb = None + return start_emb, pos_emb + + +class JukeboxPrior(PreTrainedModel): + """ + The JukeboxPrior class, which is a wrapper around the various conditioning and the transformer. JukeboxPrior can be + seen as language models trained on music. They model the next `music token` prediction task. If a (lyric) `encoderù + is defined, it also models the `next character` prediction on the lyrics. Can be conditionned on timing, artist, + genre, lyrics and codes from lower-levels Priors. + + Args: + config (`JukeboxPriorConfig`): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. + level (`int`, *optional*): + Current level of the Prior. Should be in range `[0,nb_priors]`. + nb_priors (`int`, *optional*, defaults to 3): + Total number of priors. + vqvae_encoder (`Callable`, *optional*): + Encoding method of the VQVAE encoder used in the forward pass of the model. Passing functions instead of + the vqvae module to avoid getting the parameters. + vqvae_decoder (`Callable`, *optional*): + Decoding method of the VQVAE decoder used in the forward pass of the model. Passing functions instead of + the vqvae module to avoid getting the parameters. + """ + + config_class = JukeboxPriorConfig + + def _init_weights(self, module): + init_scale = self.config.init_scale + + if isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=0.02 * init_scale) + elif isinstance(module, JukeboxConv1D): + if self.config.zero_out: + module.weight.data.zero_() + else: + module.weight.data.normal_(mean=0.0, std=0.02 * init_scale) + elif isinstance(module, JukeboxPositionalEmbedding): + module.pos_emb.data.normal_(mean=0.0, std=0.01 * init_scale) + elif isinstance(module, JukeboxRangeEmbedding): + module.emb.weight.data.normal_(mean=0.0, std=0.01 * init_scale) + elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, "lm_head"): + module.lm_head.weight.data.normal_(mean=0.0, std=0.02 * init_scale) + elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, "start_token"): + module.start_token.data.normal_(mean=0.0, std=0.01 * init_scale) + elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out: + module.conv1d_2.weigth.data.zero_() + module.conv1d_2.bias.data.zero_() + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + def __init__(self, config: JukeboxPriorConfig, level=None, nb_priors=3, vqvae_encoder=None, vqvae_decoder=None): + super().__init__(config) + # Passing functions instead of the vqvae module to avoid getting params, only used in the + # forward loop + self.vqvae_encoder = vqvae_encoder + self.vqvae_decoder = vqvae_decoder + + self.levels = nb_priors + self.level = level if level is not None else config.level + + self.base_model_prefix = f"priors.{self.level}" + + self.n_ctx = config.n_ctx + + self.lyric_conditioning = config.nb_relevant_lyric_tokens > 0 + self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens + self.encoder_loss_fraction = config.encoder_loss_fraction + + # Audio conditioning : conditioning on music tokens (either from audio or from previous levels or both) + self.audio_conditioning = self.level != 0 + self.cond_level = self.level - 1 + if self.audio_conditioning: + self.conditioner_blocks = JukeboxMusicTokenConditioner(config, self.level) + + # metadata conditioning : contioning on timing, genres, and artist + self.metadata_conditioning = config.metadata_conditioning + if self.metadata_conditioning: + self.metadata_embedding = JukeboxLabelConditioner(config, include_time_signal=not self.audio_conditioning) + + # define encoder-decoder or encoder and decoder + self.is_encoder_decoder = config.is_encoder_decoder + if config.is_encoder_decoder: + # encoder-decoder transformer + self.input_shapes = [config.nb_relevant_lyric_tokens, config.n_ctx] + self.embed_dim_shift = [0, config.lyric_vocab_size] + self.width = config.hidden_size + + self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens + + self.prior = JukeboxConditionalAutoregressive( + config, + n_ctx=config.nb_relevant_lyric_tokens + config.n_ctx, + embed_dim=config.lyric_vocab_size + config.music_vocab_size, + audio_conditioning=(self.audio_conditioning or self.metadata_conditioning), + metadata_conditioning=True, + ) + + else: + # Separate encoder-decoder transformer + encoder_config = config.encoder_config + + if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning: + self.lyric_acts_width = encoder_config.hidden_size + self.encoder_width = config.hidden_size + self.encoder_dim = config.lyric_vocab_size + self.encoder = JukeboxConditionalAutoregressive( + encoder_config, + n_ctx=self.nb_relevant_lyric_tokens, + embed_dim=self.encoder_dim, + audio_conditioning=False, + metadata_conditioning=False, + is_encoder=True, + ) + self.encoder.proj_in = JukeboxConv1D(encoder_config.hidden_size, config.hidden_size) + self.encoder.final_layer_norm = JukeboxLayerNorm(config.hidden_size) + self.encoder.lm_head = nn.Linear(config.hidden_size, config.lyric_vocab_size, bias=False) + else: + self.nb_relevant_lyric_tokens = 0 + + # decoder model on the tokens + self.prior = JukeboxConditionalAutoregressive( + config, + audio_conditioning=(self.audio_conditioning or self.metadata_conditioning), + metadata_conditioning=self.metadata_conditioning, + ) + + self.next_token_prediction_loss_dims = config.n_ctx + self.total_loss_dims = self.nb_relevant_lyric_tokens + self.next_token_prediction_loss_dims + + self.downsamples = [stride**down for stride, down in zip(config.res_strides_t, config.res_downs_t)] + self.cond_downsample = self.downsamples[self.level] if self.level != 0 else None + self.raw_to_tokens = np.prod(self.downsamples[: nb_priors - self.level]) + self.sample_length = self.n_ctx * self.raw_to_tokens + + logger.info( + f"Level:{self.level}, Cond downsample:{self.cond_downsample}, Raw to tokens:{self.raw_to_tokens}, Sample" + f" length:{self.sample_length}" + ) + + def get_metadata(self, labels, start, total_length, offset, get_indices=False): + metadata = labels.clone() + metadata[:, 0] = total_length + # Set sample_length to match this level + metadata[:, 2] = int(self.sample_length) + + # Set offset + metadata[:, 1:2] = int(offset * self.raw_to_tokens) + int(start * self.raw_to_tokens) + # here since metadata has the full token_list, we just need to selected the ones that are relevant + + # Set lyric tokens + metadata, indices = self.set_metadata_lyric_tokens(metadata) + if get_indices: + return metadata, indices + else: + return metadata + + def set_metadata_lyric_tokens(self, labels): + """ + Processes the full labels to only retreive the relevant lyric tokens and keep the metadata conditioning tokens. + """ + if self.nb_relevant_lyric_tokens > 0: + tokens_list = torch.zeros( + (labels.shape[0], self.nb_relevant_lyric_tokens), dtype=torch.long, device=labels.device + ) + indices_list = [] # whats the index of each current character in original array + for idx in range(labels.shape[0]): + full_tokens = labels.clone()[:, 4 + self.metadata_embedding.max_nb_genres :] + total_length, offset, duration = labels[idx, 0], labels[idx, 1], labels[idx, 2] + tokens, indices = get_relevant_lyric_tokens( + full_tokens, self.nb_relevant_lyric_tokens, total_length, offset, duration + ) + tokens_list[idx, :] = tokens + indices_list.append(indices) + + return ( + torch.cat((labels[:, : 4 + self.metadata_embedding.max_nb_genres], tokens_list), dim=-1), + indices_list, + ) + else: + return labels, None + + def get_music_tokens_conds(self, music_tokens, start, end): + """ + Extracts current level's conditioning music tokens. + """ + if self.level != 0: + music_tokens_cond = music_tokens[self.level - 1] + music_tokens = music_tokens_cond[:, start // self.cond_downsample : end // self.cond_downsample] + missing_cond_len = self.n_ctx // self.cond_downsample - music_tokens_cond[-1].shape[-1] + if missing_cond_len > 0: + init_cond = torch.zeros(1, missing_cond_len).to(music_tokens_cond.device) + music_tokens_cond = torch.cat((music_tokens_cond, init_cond), dim=-1).long() + music_tokens_conds = [music_tokens_cond] + else: + music_tokens_conds = None + return music_tokens_conds + + def prior_preprocess(self, tokens, conds): + """ + Shifts the input tokens to account for the dictionary merge. The embed_dim_shift give by how much the music + tokens should be shifted by. It is equal to `lyric_vocab_size`. + """ + batch_size = tokens[0].shape[0] + for i in range(len(tokens)): + tokens[i] = (tokens[i] + int(self.embed_dim_shift[i])).view(batch_size, -1) + + for i in range(len(conds)): + if conds[i] is None: + conds[i] = torch.zeros( + (batch_size, self.input_shapes[i], self.width), dtype=tokens[0].dtype, device=tokens[0].device + ) + + return torch.cat(tokens, dim=1), torch.cat(conds, dim=1) + + def prior_postprocess(self, tokens): + """ + Shifts back the input tokens if the model uses an encoder decoder architecture. As the embedding layer is + shared, `prior_embed_dim_shift` shifts the music token ids by `lyric_vocab_size`. Only returns the music + tokens. + """ + batch_size = tokens.shape[0] + dims = (self.input_shapes[0], tokens.shape[1] - self.input_shapes[0]) + tokens = list(torch.split(tokens, dims, dim=1)) + + # Some of the input tokens might be shifted to take into account the voccabulary fusion + for i in range(len(tokens)): + bins_shift = int(self.embed_dim_shift[i]) + tokens[i] = (tokens[i] - bins_shift).view(batch_size, -1) + tokens[i] = torch.clamp(tokens[i], min=0) + # If not masking loss, model may have generated lyric/midi tokens which are now shifted <0 by bin_shift + return tokens[-1] + + def embed_tokens(self, music_tokens_conds): + """ + Embeds the upper level music tokens and upsamples them to provide as audio conditioning. + """ + music_tokens_conds = music_tokens_conds[: self.cond_level + 1] + audio_conditioning = None + for music_tokens_cond, conditioner_block in reversed(list(zip(music_tokens_conds, [self.conditioner_blocks]))): + audio_conditioning = conditioner_block(music_tokens_cond, audio_conditioning) + return audio_conditioning + + def encode(self, hidden_states, start_level=None, end_level=None, bs_chunks=1): + """ + Encodes the hidden states (raw audio) using the VQVAE's encoder. Returns latent_states. + """ + if start_level is None: + start_level = self.level + if end_level is None: + end_level = self.levels + # Get latents + with torch.no_grad(): + latent_states = self.vqvae_encoder( + hidden_states, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks + ) + return latent_states + + def decode(self, music_tokens, start_level=None, end_level=None, bs_chunks=1): + """ + Usamples the sequence of codebook vectors to a raw audio. + """ + if start_level is None: + start_level = self.level + if end_level is None: + end_level = self.levels + with torch.no_grad(): + output = self.vqvae_decoder( + music_tokens, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks + ) + return output + + def get_cond(self, music_tokens_conds, metadata): + """ + Converts the input tokens to input_embeddings. Splits the lyrics form the rest of the metadata. Lyric tokens + can be None. + """ + if metadata is not None: + n_labels = metadata.shape[1] - self.nb_relevant_lyric_tokens + metadata, lyric_tokens = metadata[:, :n_labels], metadata[:, n_labels:] + else: + metadata, lyric_tokens = None, None + metadata_conditioning, metadata_pos = ( + self.metadata_embedding(metadata) if self.metadata_conditioning else (None, None) + ) + audio_conditioning = self.embed_tokens(music_tokens_conds) if self.audio_conditioning else metadata_pos + return audio_conditioning, metadata_conditioning, lyric_tokens + + def sample( + self, + n_samples, + music_tokens=None, + music_tokens_conds=None, + metadata=None, + temp=1.0, + top_k=0, + top_p=0.0, + chunk_size=None, + sample_tokens=None, + ): + """ + Ancestral/Prime sampling a window of tokens using the provided conditioning and metadatas. + + Args: + n_samples (`int`): + Number of samples to generate. + music_tokens (`List[torch.LongTensor]`, *optional*): + Previously gemerated tokens at the current level. Used as context for the generation. + music_tokens_conds (`List[torch.FloatTensor]`, *optional*): + Upper-level music tokens generated by the previous prior model. Is `None` if the generation is not + conditionned on the upper-level tokens. + metadata (`List[torch.LongTensor]`, *optional*): + List containing the metatdata tensor with the artist, genre and the lyric tokens. + temp (`float`, *optional*, defaults to 1.0): + Sampling temperature. + top_k (`int`, *optional*, defaults to 0): + Top k probabilities used for filtering. + top_p (`float`, *optional*, defaults to 0.0): + Top p probabilities used for filtering. + chunk_size (`int`, *optional*): + Size of the chunks used to prepare the cache of the transformer. + sample_tokens (`int`, *optional*): + Number of tokens to sample. + + """ + no_past_context = music_tokens is None or music_tokens.shape[1] == 0 + name = {True: "Ancestral", False: "Primed"}[no_past_context] + logger.info(f"{name} sampling {n_samples} samples with temp={temp}, top_k={top_k}, top_p={top_p}") + + with torch.no_grad(): + # Currently audio_conditioning only uses immediately above layer + audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata) + if self.is_encoder_decoder: + if no_past_context: # the prime_sample function will be used with music_tokens set to None + lyric_and_music_tokens, audio_conditioning = self.prior_preprocess( + [lyric_tokens], [None, audio_conditioning] + ) + else: + lyric_and_music_tokens, audio_conditioning = self.prior_preprocess( + [lyric_tokens, music_tokens], [None, audio_conditioning] + ) + if sample_tokens is not None: + sample_tokens += self.nb_relevant_lyric_tokens + music_tokens = self.prior.primed_sample( + n_samples, + lyric_and_music_tokens, + audio_conditioning, + metadata_conditioning, + temp=temp, + top_k=top_k, + top_p=top_p, + chunk_size=chunk_size, + sample_tokens=sample_tokens, + ) + music_tokens = self.prior_postprocess(music_tokens) + else: + last_encoder_hidden_states = self.get_encoder_states(lyric_tokens, sample=True) + if no_past_context: + music_tokens = self.prior.sample( + n_samples, + audio_conditioning, + metadata_conditioning, + last_encoder_hidden_states, + temp=temp, + top_k=top_k, + top_p=top_p, + sample_tokens=sample_tokens, + ) + else: + music_tokens = self.prior.primed_sample( + n_samples, + music_tokens, + audio_conditioning, + metadata_conditioning, + last_encoder_hidden_states, + temp=temp, + top_k=top_k, + top_p=top_p, + chunk_size=chunk_size, + sample_tokens=sample_tokens, + ) + return music_tokens + + def get_encoder_states(self, lyric_tokens, sample=False): + """ + Retreive the last hidden_states of the lyric encoder that will be attended to by the decoder. Forwards through + the lyric encoder. + """ + if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning: + if sample: + self.encoder = self.encoder.to(lyric_tokens.device) + lyric_acts = self.encoder(lyric_tokens, None, None, None) + lyric_acts = self.encoder.proj_in(lyric_acts) + last_encoder_hidden_states = self.encoder.final_layer_norm(lyric_acts) + else: + last_encoder_hidden_states = None + return last_encoder_hidden_states + + def get_encoder_loss(self, last_encoder_hidden_states, target_lyrics): + """ + Computes the loss for the lyric encoder: next lyric token prediction. + """ + if self.lyric_conditioning: + last_encoder_hidden_states = self.encoder.lm_head(last_encoder_hidden_states) + encoder_loss = nn.functional.cross_entropy( + last_encoder_hidden_states.view(-1, self.encoder_dim), target_lyrics.view(-1) + ) / np.log(2.0) + else: + encoder_loss = torch.tensor(0.0, device=last_encoder_hidden_states.device) + return encoder_loss + + def forward_tokens( + self, music_tokens, music_tokens_conds=[], metadata=None, get_preds=False, get_attn_weights=False + ): + """ + Applies a forward pass using the conditioning tokens. Different from the classic forward as it does not use the + vqvae's encoding layers. + """ + if get_attn_weights: + self.prior.transformer.set_record_attn(get_attn_weights) + audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata) + + if self.is_encoder_decoder: # the preprocess returns the full tokens (Lyrics and Music tokens), shifted + tokens, audio_conditioning = self.prior_preprocess( + [lyric_tokens, music_tokens], [None, audio_conditioning] + ) + (encoder_loss, next_token_prediction_loss), preds = self.prior( + tokens, audio_conditioning, metadata_conditioning, get_sep_loss=True, get_preds=get_preds + ) + else: + last_encoder_hidden_states = self.get_encoder_states(lyric_tokens) + encoder_loss = self.get_encoder_loss(last_encoder_hidden_states, lyric_tokens) + next_token_prediction_loss, preds = self.prior( + music_tokens, + audio_conditioning, + metadata_conditioning, + last_encoder_hidden_states, + get_preds=get_preds, + ) + loss = self.encoder_loss_fraction * encoder_loss * self.nb_relevant_lyric_tokens / self.total_loss_dims + loss += next_token_prediction_loss * self.next_token_prediction_loss_dims / self.total_loss_dims + + metrics = { + "bpd": next_token_prediction_loss.clone().detach(), + "encoder_loss": encoder_loss.clone().detach(), + "next_token_prediction_loss": next_token_prediction_loss.clone().detach(), + } + if get_preds: + metrics["preds"] = preds.clone().detach() + if get_attn_weights: + saved_attn_weights = self.prior.transformer.saved_attn_weights + self.prior.transformer.set_record_attn(False) + return saved_attn_weights + else: + return loss, metrics + + def forward( + self, + hidden_states: torch.Tensor, + metadata: Optional[List[torch.LongTensor]], + decode: Optional[bool] = False, + get_preds: Optional[bool] = False, + ) -> List[torch.Tensor]: + """ + Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens` + function. The loss is the sum of the `encoder` loss and the `decoder` loss. + + Args: + hidden_states (`torch.Tensor`): + Hidden states which should be raw audio + metadata (`List[torch.LongTensor]`, *optional*): + List containing the metadata conditioning tensorwith the lyric and the metadata tokens. + decode (`bool`, *optional*, defaults to `False`): + Whether or not to decode the encoded to tokens. + get_preds (`bool`, *optional*, defaults to `False`): + Whether or not to return the actual predicitons of the model. + """ + batch_size = hidden_states.shape[0] + music_tokens, *music_tokens_conds = self.encode(hidden_states, bs_chunks=batch_size) + loss, metrics = self.forward_tokens( + music_tokens=music_tokens, + music_tokens_conds=music_tokens_conds, + metadata=metadata, + get_preds=get_preds, + ) + if decode: + dequantised_states = self.decode([music_tokens, *music_tokens_conds]) + else: + dequantised_states = None + return dequantised_states, loss, metrics + + +class JukeboxPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = JukeboxConfig + base_model_prefix = "jukebox" + supports_gradient_checkpointing = False + + def _init_weights(self, module): + if isinstance(module, JukeboxPrior) or isinstance(module, JukeboxVQVAE): + module.apply(module._init_weights) + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + +JUKEBOX_SAMPLING_INPUT_DOCSTRING = r""" + labels (`List[torch.LongTensor]` of length `n_sample`, and shape `(self.levels, self.config.max_nb_genre + lyric_sequence_length)` : + List of metadata such as `artist_id`, `genre_id` and the full list of lyric tokens which are used to + condition the generation. + sampling_kwargs (`Dict[Any]`): + Various additional sampling arguments that are used by the `_sample` function. A detail list of the + arguments can bee seen in the [`_sample`] function documentation. +""" + + +@add_start_docstrings( + """The bare JUKEBOX Model used for music generation. 4 sampling techniques are supported : `primed_sample`, `upsample`, + `continue_sample` and `ancestral_sample`. It does not have a `forward` method as the training is not end to end. If + you want to fine-tune the model, it is recommended to use the `JukeboxPrior` class and train each prior + individually. + """, + JUKEBOX_START_DOCSTRING, +) +class JukeboxModel(JukeboxPreTrainedModel): + _no_split_modules = ["JukeboxBlock"] + + def __init__(self, config): + super().__init__(config) + vqvae_config = config.vqvae_config + self.vqvae = JukeboxVQVAE(vqvae_config) + self.set_shared_params(config) + self.priors = nn.ModuleList( + [JukeboxPrior(config.prior_configs[level], level) for level in range(config.nb_priors)] + ) + + def set_shared_params(self, model_config): + """ + Initialises the parameters that are shared. This has to be done here because the list of `JukeboxPriorConfig` + is nest, and is thus unreachable in the `from_dict` function + """ + for config in model_config.prior_configs: + config.sampling_rate = model_config.sampling_rate + config.timing_dims = model_config.timing_dims + config.min_duration = model_config.min_duration + config.max_duration = model_config.max_duration + config.max_nb_genres = model_config.max_nb_genres + config.metadata_conditioning = model_config.metadata_conditioning + + def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1): + return self.vqvae.decode(music_tokens, start_level, end_level, bs_chunks) + + def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1): + return self.vqvae.encode(input_audio, start_level, end_level, bs_chunks) + + def split_batch(self, obj, n_samples, split_size): + n_passes = (n_samples + split_size - 1) // split_size + if isinstance(obj, torch.Tensor): + return torch.split(obj, split_size, dim=0) + elif isinstance(obj, list): + return list(zip(*[torch.split(item, split_size, dim=0) for item in obj])) + elif obj is None: + return [None] * n_passes + else: + raise TypeError("Unknown input type") + + # Sample a partial window of length= self.priors[level].n_ctx: + iterator = get_starts(total_length, self.priors[level].n_ctx, hop_length) + for start in iterator: + music_tokens = self.sample_single_window( + music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size + ) + + else: + music_tokens = self.sample_partial_window( + music_tokens, labels, offset, sampling_kwargs, level, total_length, max_batch_size + ) + return music_tokens + + @torch.no_grad() + def _sample( + self, + music_tokens, + labels, + sample_levels, + metas=None, + chunk_size=32, + sampling_temperature=0.98, + lower_batch_size=16, + max_batch_size=16, + sample_length_in_seconds=24, + compute_alignments=False, + sample_tokens=None, + offset=0, + save_results=True, + sample_length=None, + ) -> List[torch.LongTensor]: + """ + Core sampling function used to generate music tokens. Iterates over the provided list of levels, while saving + the generated raw audio at each step. + + Args: + music_tokens (`List[torch.LongTensor]`): + A sequence of music tokens of length `self.levels` which will be used as context to continue the + sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain + level. + labels (`List[torch.LongTensor]`): + List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre + + lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens + which are used to condition the generation. + sample_levels (`List[int]`): + List of the desired levels at which the sampling will be done. A level is equivalent to the index of + the prior in the list of priors + metas (`List[Any]`, *optional*): + Metadatas used to generate the `labels` + chunk_size (`int`, *optional*, defaults to 32): + Size of a chunk of audio, used to fill up the memory in chuncks to prevent OOM erros. Bigger chunks + means faster memory filling but more consumption. + sampling_temperature (`float`, *optional*, defaults to 0.98): + Temperature used to ajust the randomness of the sampling. + lower_batch_size (`int`, *optional*, defaults to 16): + Maximum batch size for the lower level priors + max_batch_size (`int`, *optional*, defaults to 16): + Maximum batch size for the top level priors + sample_length_in_seconds (`int`, *optional*, defaults to 24): + Desired length of the generation in seconds + compute_alignments (`bool`, *optional*, defaults to `False`): + Whether or not to compute the alignment between the lyrics and the audio using the top_prior + sample_tokens (`int`, *optional*): + Precise number of tokens that should be sampled at each level. This is mostly useful for running dummy + experiments + offset (`int`, *optional*, defaults to 0): + Audio offset used as conditioning, corresponds to the starting sample in the music. If the offset is + greater than 0, the lyrics will be shifted take that intoaccount + save_results (`bool`, *optional*, defaults to `True`): + Whether or not to save the intermediate results. If `True`, will generate a folder named with the start + time. + sample_length (`int`, *optional*): + Desired length of the generation in samples. + + Returns: torch.Tensor + + Example: + + ```python + >>> from transformers import AutoTokenizer, JukeboxModel, set_seed + >>> import torch + + >>> metas = dict(artist="Zac Brown Band", genres="Country", lyrics="I met a traveller from an antique land") + >>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics") + >>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval() + + >>> labels = tokenizer(**metas)["input_ids"] + >>> set_seed(0) + >>> zs = [torch.zeros(1, 0, dtype=torch.long) for _ in range(3)] + >>> zs = model._sample(zs, labels, [0], sample_length=40 * model.priors[0].raw_to_tokens, save_results=False) + >>> zs[0] + tensor([[1853, 1369, 1150, 1869, 1379, 1789, 519, 710, 1306, 1100, 1229, 519, + 353, 1306, 1379, 1053, 519, 653, 1631, 1467, 1229, 1229, 10, 1647, + 1254, 1229, 1306, 1528, 1789, 216, 1631, 1434, 653, 475, 1150, 1528, + 1804, 541, 1804, 1434]]) + ``` + """ + + top_prior = self.priors[0] + if sample_length is not None: + total_length = sample_length + else: + total_length = ( + int(sample_length_in_seconds * self.config.sampling_rate) // top_prior.raw_to_tokens + ) * top_prior.raw_to_tokens + + if sample_levels is None: + sample_levels = range(len(self.priors)) + + # total length of the signal, might be bit different from the actual generated length + self.total_length = total_length + for level in sample_levels: + sampling_kwargs = { + "temp": 0.99 if level == len(self.priors) - 1 else sampling_temperature, + "chunk_size": chunk_size, + "sample_tokens": sample_tokens, + } + # Set correct total_length, hop_length, labels and sampling_kwargs for level + + total_token_to_sample = total_length // self.priors[level].raw_to_tokens + hop_length = int(self.config.hop_fraction[level] * self.priors[level].n_ctx) + max_batch_size = lower_batch_size if level != sample_levels else max_batch_size + music_tokens = self.sample_level( + music_tokens, + labels[level], + offset, + sampling_kwargs, + level, + total_token_to_sample, + hop_length, + max_batch_size, + ) + + if save_results: + self.vqvae.to(music_tokens[level].device) + # Decode sample + with torch.no_grad(): + start_level = len(self.priors) - level - 1 # vqvae levels are reversed + raw_audio = self.vqvae.decode( + music_tokens[: level + 1], start_level=start_level, bs_chunks=music_tokens[level].shape[0] + ) + logdir = f"jukebox/level_{level}" + if not os.path.exists(logdir): + os.makedirs(logdir) + save_temp_audio(logdir, level, metas=metas, aud=raw_audio.float()) + if compute_alignments and self.priors[0] is not None and self.priors[0].nb_relevant_lyric_tokens > 0: + with torch.no_grad(): + alignments = get_alignment(music_tokens, labels[0], self.priors[0], self.config) + torch.save({"alignments": alignments}, f"{logdir}/lyric_alignments.pt") + + return music_tokens + + @add_start_docstrings( + """ + Generates music tokens based on the provided `labels. Will start at the desired prior level and automatically + upsample the sequence. If you want to create the audio, you should call `model.decode(tokens)`, which will use + the VQ-VAE decoder to convert the music tokens to raw audio. + + Args: + labels (`List[torch.LongTensor]`) : + List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre + + lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens + which are used to condition the generation. + n_samples (`int`, *optional*, default to 1) : + Number of samples to be generated in parallel. + """, + ) + def ancestral_sample(self, labels, n_samples=1, **sampling_kwargs) -> List[torch.LongTensor]: + """ + Example: + + ```python + >>> from transformers import AutoTokenizer, JukeboxModel, set_seed + + >>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval() + >>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics") + + >>> lyrics = "Hey, are you awake? Can you talk to me?" + >>> artist = "Zac Brown Band" + >>> genre = "Country" + >>> metas = tokenizer(artist=artist, genres=genre, lyrics=lyrics) + >>> set_seed(0) + >>> music_tokens = model.ancestral_sample(metas.input_ids, sample_length=400) + + >>> with torch.no_grad(): + ... model.decode(music_tokens)[:, :10].squeeze(-1) + tensor([[-0.0219, -0.0679, -0.1050, -0.1203, -0.1271, -0.0936, -0.0396, -0.0405, + -0.0818, -0.0697]]) + ``` + """ + + sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors)))) + music_tokens = [ + torch.zeros(n_samples, 0, dtype=torch.long, device=labels[0].device) for _ in range(len(self.priors)) + ] + music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) + return music_tokens + + @add_start_docstrings( + """Generates a continuation of the previously generated tokens. + + Args: + music_tokens (`List[torch.LongTensor]` of length `self.levels` ) : + A sequence of music tokens which will be used as context to continue the sampling process. Should have + `self.levels` tensors, each corresponding to the generation at a certain level. + """, + JUKEBOX_SAMPLING_INPUT_DOCSTRING, + ) + def continue_sample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]: + sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors)))) + music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) + return music_tokens + + @add_start_docstrings( + """Upsamples a sequence of music tokens using the prior at level `level`. + + Args: + music_tokens (`List[torch.LongTensor]` of length `self.levels` ) : + A sequence of music tokens which will be used as context to continue the sampling process. Should have + `self.levels` tensors, each corresponding to the generation at a certain level. + """, + JUKEBOX_SAMPLING_INPUT_DOCSTRING, + ) + def upsample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]: + sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors) - 1))) + music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) + return music_tokens + + @add_start_docstrings( + """Generate a raw audio conditioned on the provided `raw_audio` which is used as conditioning at each of the + generation levels. The audio is encoded to music tokens using the 3 levels of the VQ-VAE. These tokens are + used: as conditioning for each level, which means that no ancestral sampling is required. + + Args: + raw_audio (`List[torch.Tensor]` of length `n_samples` ) : + A list of raw audio that will be used as conditioning information for each samples that will be + generated. + """, + JUKEBOX_SAMPLING_INPUT_DOCSTRING, + ) + def primed_sample(self, raw_audio, labels, **sampling_kwargs) -> List[torch.LongTensor]: + sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors)))) + self.vqvae.to(raw_audio.device).float() + with torch.no_grad(): + music_tokens = self.vqvae.encode( + raw_audio, start_level=0, end_level=len(self.priors), bs_chunks=raw_audio.shape[0] + ) + music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) + return music_tokens diff --git a/venv/lib/python3.10/site-packages/transformers/models/jukebox/tokenization_jukebox.py b/venv/lib/python3.10/site-packages/transformers/models/jukebox/tokenization_jukebox.py new file mode 100644 index 0000000000000000000000000000000000000000..cd478d6f6bb14027fea853d587cf8ebf4f8ed314 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/jukebox/tokenization_jukebox.py @@ -0,0 +1,405 @@ +# coding=utf-8 +# Copyright 2022 The Open AI Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for OpenAI Jukebox.""" + + +import json +import os +import re +import unicodedata +from json.encoder import INFINITY +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import regex + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...tokenization_utils_base import BatchEncoding +from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging +from ...utils.generic import _is_jax, _is_numpy + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = { + "artists_file": "artists.json", + "lyrics_file": "lyrics.json", + "genres_file": "genres.json", +} + + +class JukeboxTokenizer(PreTrainedTokenizer): + """ + Constructs a Jukebox tokenizer. Jukebox can be conditioned on 3 different inputs : + - Artists, unique ids are associated to each artist from the provided dictionary. + - Genres, unique ids are associated to each genre from the provided dictionary. + - Lyrics, character based tokenization. Must be initialized with the list of characters that are inside the + vocabulary. + + This tokenizer does not require training. It should be able to process a different number of inputs: + as the conditioning of the model can be done on the three different queries. If None is provided, defaults values will be used.: + + Depending on the number of genres on which the model should be conditioned (`n_genres`). + ```python + >>> from transformers import JukeboxTokenizer + + >>> tokenizer = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics") + >>> tokenizer("Alan Jackson", "Country Rock", "old town road")["input_ids"] + [tensor([[ 0, 0, 0, 6785, 546, 41, 38, 30, 76, 46, 41, 49, + 40, 76, 44, 41, 27, 30]]), tensor([[ 0, 0, 0, 145, 0]]), tensor([[ 0, 0, 0, 145, 0]])] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + + + If nothing is provided, the genres and the artist will either be selected randomly or set to None + + + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to: + this superclass for more information regarding those methods. + + However the code does not allow that and only supports composing from various genres. + + Args: + artists_file (`str`): + Path to the vocabulary file which contains a mapping between artists and ids. The default file supports + both "v2" and "v3" + genres_file (`str`): + Path to the vocabulary file which contain a mapping between genres and ids. + lyrics_file (`str`): + Path to the vocabulary file which contains the accepted characters for the lyrics tokenization. + version (`List[str]`, `optional`, default to `["v3", "v2", "v2"]`) : + List of the tokenizer versions. The `5b-lyrics`'s top level prior model was trained using `v3` instead of + `v2`. + n_genres (`int`, `optional`, defaults to 1): + Maximum number of genres to use for composition. + max_n_lyric_tokens (`int`, `optional`, defaults to 512): + Maximum number of lyric tokens to keep. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + artists_file, + genres_file, + lyrics_file, + version=["v3", "v2", "v2"], + max_n_lyric_tokens=512, + n_genres=5, + unk_token="<|endoftext|>", + **kwargs, + ): + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + self.version = version + self.max_n_lyric_tokens = max_n_lyric_tokens + self.n_genres = n_genres + self._added_tokens_decoder = {0: unk_token} + + with open(artists_file, encoding="utf-8") as vocab_handle: + self.artists_encoder = json.load(vocab_handle) + + with open(genres_file, encoding="utf-8") as vocab_handle: + self.genres_encoder = json.load(vocab_handle) + + with open(lyrics_file, encoding="utf-8") as vocab_handle: + self.lyrics_encoder = json.load(vocab_handle) + + oov = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" + # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. + if len(self.lyrics_encoder) == 79: + oov = oov.replace(r"\-'", r"\-+'") + + self.out_of_vocab = regex.compile(oov) + self.artists_decoder = {v: k for k, v in self.artists_encoder.items()} + self.genres_decoder = {v: k for k, v in self.genres_encoder.items()} + self.lyrics_decoder = {v: k for k, v in self.lyrics_encoder.items()} + super().__init__( + unk_token=unk_token, + n_genres=n_genres, + version=version, + max_n_lyric_tokens=max_n_lyric_tokens, + **kwargs, + ) + + @property + def vocab_size(self): + return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder) + + def get_vocab(self): + return { + "artists_encoder": self.artists_encoder, + "genres_encoder": self.genres_encoder, + "lyrics_encoder": self.lyrics_encoder, + } + + def _convert_token_to_id(self, list_artists, list_genres, list_lyrics): + """Converts the artist, genre and lyrics tokens to their index using the vocabulary. + The total_length, offset and duration have to be provided in order to select relevant lyrics and add padding to + the lyrics token sequence. + """ + artists_id = [self.artists_encoder.get(artist, 0) for artist in list_artists] + for genres in range(len(list_genres)): + list_genres[genres] = [self.genres_encoder.get(genre, 0) for genre in list_genres[genres]] + list_genres[genres] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres])) + + lyric_ids = [[self.lyrics_encoder.get(character, 0) for character in list_lyrics[0]], [], []] + return artists_id, list_genres, lyric_ids + + def _tokenize(self, lyrics): + """ + Converts a string into a sequence of tokens (string), using the tokenizer. Split in words for word-based + vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces). + + Do NOT take care of added tokens. Only the lyrics are split into character for the character-based vocabulary. + """ + # only lyrics are not tokenized, but character based is easily handled + return list(lyrics) + + def tokenize(self, artist, genre, lyrics, **kwargs): + """ + Converts three strings in a 3 sequence of tokens using the tokenizer + """ + artist, genre, lyrics = self.prepare_for_tokenization(artist, genre, lyrics) + lyrics = self._tokenize(lyrics) + return artist, genre, lyrics + + def prepare_for_tokenization( + self, artists: str, genres: str, lyrics: str, is_split_into_words: bool = False + ) -> Tuple[str, str, str, Dict[str, Any]]: + """ + Performs any necessary transformations before tokenization. + + Args: + artist (`str`): + The artist name to prepare. This will mostly lower the string + genres (`str`): + The genre name to prepare. This will mostly lower the string. + lyrics (`str`): + The lyrics to prepare. + is_split_into_words (`bool`, *optional*, defaults to `False`): + Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the + tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) + which it will tokenize. This is useful for NER or token classification. + """ + for idx in range(len(self.version)): + if self.version[idx] == "v3": + artists[idx] = artists[idx].lower() + genres[idx] = [genres[idx].lower()] + else: + artists[idx] = self._normalize(artists[idx]) + ".v2" + genres[idx] = [ + self._normalize(genre) + ".v2" for genre in genres[idx].split("_") + ] # split is for the full dictionary with combined genres + + if self.version[0] == "v2": + self.out_of_vocab = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+") + vocab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n" + self.vocab = {vocab[index]: index + 1 for index in range(len(vocab))} + self.vocab[""] = 0 + self.n_vocab = len(vocab) + 1 + self.lyrics_encoder = self.vocab + self.lyrics_decoder = {v: k for k, v in self.vocab.items()} + self.lyrics_decoder[0] = "" + else: + self.out_of_vocab = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+") + + lyrics = self._run_strip_accents(lyrics) + lyrics = lyrics.replace("\\", "\n") + lyrics = self.out_of_vocab.sub("", lyrics), [], [] + return artists, genres, lyrics + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _normalize(self, text: str) -> str: + """ + Normalizes the input text. This process is for the genres and the artist + + Args: + text (`str`): + Artist or Genre string to normalize + """ + + accepted = ( + [chr(i) for i in range(ord("a"), ord("z") + 1)] + + [chr(i) for i in range(ord("A"), ord("Z") + 1)] + + [chr(i) for i in range(ord("0"), ord("9") + 1)] + + ["."] + ) + accepted = frozenset(accepted) + pattern = re.compile(r"_+") + text = "".join([c if c in accepted else "_" for c in text.lower()]) + text = pattern.sub("_", text).strip("_") + return text + + def convert_lyric_tokens_to_string(self, lyrics: List[str]) -> str: + return " ".join(lyrics) + + def convert_to_tensors( + self, inputs, tensor_type: Optional[Union[str, TensorType]] = None, prepend_batch_axis: bool = False + ): + """ + Convert the inner content to tensors. + + Args: + tensor_type (`str` or [`~utils.TensorType`], *optional*): + The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If + unset, no modification is done. + prepend_batch_axis (`int`, *optional*, defaults to `False`): + Whether or not to add the batch dimension during the conversion. + """ + # Convert to TensorType + if not isinstance(tensor_type, TensorType): + tensor_type = TensorType(tensor_type) + + # Get a function reference for the correct framework + if tensor_type == TensorType.TENSORFLOW: + if not is_tf_available(): + raise ImportError( + "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." + ) + import tensorflow as tf + + as_tensor = tf.constant + is_tensor = tf.is_tensor + elif tensor_type == TensorType.PYTORCH: + if not is_torch_available(): + raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") + import torch + + as_tensor = torch.tensor + is_tensor = torch.is_tensor + elif tensor_type == TensorType.JAX: + if not is_flax_available(): + raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") + import jax.numpy as jnp # noqa: F811 + + as_tensor = jnp.array + is_tensor = _is_jax + else: + as_tensor = np.asarray + is_tensor = _is_numpy + + # Do the tensor conversion in batch + + try: + if prepend_batch_axis: + inputs = [inputs] + + if not is_tensor(inputs): + inputs = as_tensor(inputs) + except: # noqa E722 + raise ValueError( + "Unable to create tensor, you should probably activate truncation and/or padding " + "with 'padding=True' 'truncation=True' to have batched tensors with the same length." + ) + + return inputs + + def __call__(self, artist, genres, lyrics="", return_tensors="pt") -> BatchEncoding: + """Convert the raw string to a list of token ids + + Args: + artist (`str`): + Name of the artist. + genres (`str`): + List of genres that will be mixed to condition the audio + lyrics (`str`, *optional*, defaults to `""`): + Lyrics used to condition the generation + """ + input_ids = [0, 0, 0] + artist = [artist] * len(self.version) + genres = [genres] * len(self.version) + + artists_tokens, genres_tokens, lyrics_tokens = self.tokenize(artist, genres, lyrics) + artists_id, genres_ids, full_tokens = self._convert_token_to_id(artists_tokens, genres_tokens, lyrics_tokens) + + attention_masks = [-INFINITY] * len(full_tokens[-1]) + input_ids = [ + self.convert_to_tensors( + [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=return_tensors + ) + for i in range(len(self.version)) + ] + return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks}) + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + """ + Saves the tokenizer's vocabulary dictionary to the provided save_directory. + + Args: + save_directory (`str`): + A path to the directory where to saved. It will be created if it doesn't exist. + + filename_prefix (`Optional[str]`, *optional*): + A prefix to add to the names of the files saved by the tokenizer. + + """ + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + + artists_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] + ) + with open(artists_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.artists_encoder, ensure_ascii=False)) + + genres_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] + ) + with open(genres_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.genres_encoder, ensure_ascii=False)) + + lyrics_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] + ) + with open(lyrics_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.lyrics_encoder, ensure_ascii=False)) + + return (artists_file, genres_file, lyrics_file) + + def _convert_id_to_token(self, artists_index, genres_index, lyric_index): + """ + Converts an index (integer) in a token (str) using the vocab. + + Args: + artists_index (`int`): + Index of the artist in its corresponding dictionary. + genres_index (`Union[List[int], int]`): + Index of the genre in its corresponding dictionary. + lyric_index (`List[int]`): + List of character indices, which each correspond to a character. + """ + artist = self.artists_decoder.get(artists_index) + genres = [self.genres_decoder.get(genre) for genre in genres_index] + lyrics = [self.lyrics_decoder.get(character) for character in lyric_index] + return artist, genres, lyrics diff --git a/venv/lib/python3.10/site-packages/transformers/models/patchtst/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/patchtst/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8c7db64c198406d458aa6451284249745aa9667f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/patchtst/__init__.py @@ -0,0 +1,66 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +# rely on isort to merge the imports +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = { + "configuration_patchtst": [ + "PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP", + "PatchTSTConfig", + ], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_patchtst"] = [ + "PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST", + "PatchTSTModel", + "PatchTSTPreTrainedModel", + "PatchTSTForPrediction", + "PatchTSTForPretraining", + "PatchTSTForRegression", + "PatchTSTForClassification", + ] + + +if TYPE_CHECKING: + from .configuration_patchtst import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP, PatchTSTConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_patchtst import ( + PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST, + PatchTSTForClassification, + PatchTSTForPrediction, + PatchTSTForPretraining, + PatchTSTForRegression, + PatchTSTModel, + PatchTSTPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..036df4324ceec89d6f9e4313df8f7cbeb49996a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/configuration_patchtst.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/configuration_patchtst.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..157f3d44bf96ae0e6feb8167d08d290253f33c89 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/patchtst/__pycache__/configuration_patchtst.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/patchtst/configuration_patchtst.py b/venv/lib/python3.10/site-packages/transformers/models/patchtst/configuration_patchtst.py new file mode 100644 index 0000000000000000000000000000000000000000..dc95429d90995a386ec121beb897434ea6aa8d00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/patchtst/configuration_patchtst.py @@ -0,0 +1,260 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PatchTST model configuration""" + +from typing import List, Optional, Union + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import PATCHTST_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class PatchTSTConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an + PatchTST model according to the specified arguments, defining the model architecture. + [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture. + + Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_input_channels (`int`, *optional*, defaults to 1): + The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of + multivariate targets. + context_length (`int`, *optional*, defaults to 32): + The context length of the input sequence. + distribution_output (`str`, *optional*, defaults to `"student_t"`): + The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or + "negative_binomial". + loss (`str`, *optional*, defaults to `"mse"`): + The loss function for the model corresponding to the `distribution_output` head. For parametric + distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared + error "mse". + patch_length (`int`, *optional*, defaults to 1): + Define the patch length of the patchification process. + patch_stride (`int`, *optional*, defaults to 1): + Define the stride of the patchification process. + num_hidden_layers (`int`, *optional*, defaults to 3): + Number of hidden layers. + d_model (`int`, *optional*, defaults to 128): + Dimensionality of the transformer layers. + num_attention_heads (`int`, *optional*, defaults to 4): + Number of attention heads for each attention layer in the Transformer encoder. + share_embedding (`bool`, *optional*, defaults to `True`): + Sharing the input embedding across all channels. + channel_attention (`bool`, *optional*, defaults to `False`): + Activate channel attention block in the Transformer to allow channels to attend each other. + ffn_dim (`int`, *optional*, defaults to 512): + Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + norm_type (`str` , *optional*, defaults to `"batchnorm"`): + Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`. + norm_eps (`float`, *optional*, defaults to 1e-05): + A value added to the denominator for numerical stability of normalization. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for the attention probabilities. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the Transformer. + positional_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability in the positional embedding layer. + path_dropout (`float`, *optional*, defaults to 0.0): + The dropout path in the residual block. + ff_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability used between the two layers of the feed-forward networks. + bias (`bool`, *optional*, defaults to `True`): + Whether to add bias in the feed-forward networks. + activation_function (`str`, *optional*, defaults to `"gelu"`): + The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported. + pre_norm (`bool`, *optional*, defaults to `True`): + Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is + applied after residual block. + positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): + Positional encodings. Options `"random"` and `"sincos"` are supported. + use_cls_token (`bool`, *optional*, defaults to `False`): + Whether cls token is used. + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated normal weight initialization distribution. + share_projection (`bool`, *optional*, defaults to `True`): + Sharing the projection layer across different channels in the forecast head. + scaling (`Union`, *optional*, defaults to `"std"`): + Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the + scaler is set to "mean". + do_mask_input (`bool`, *optional*): + Apply masking during the pretraining. + mask_type (`str`, *optional*, defaults to `"random"`): + Masking type. Only `"random"` and `"forecast"` are currently supported. + random_mask_ratio (`float`, *optional*, defaults to 0.5): + Masking ratio applied to mask the input data during random pretraining. + num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): + Number of patches to be masked at the end of each batch sample. If it is an integer, + all the samples in the batch will have the same number of masked patches. If it is a list, + samples in the batch will be randomly masked by numbers defined in the list. This argument is only used + for forecast pretraining. + channel_consistent_masking (`bool`, *optional*, defaults to `False`): + If channel consistent masking is True, all the channels will have the same masking pattern. + unmasked_channel_indices (`list`, *optional*): + Indices of channels that are not masked during pretraining. Values in the list are number between 1 and + `num_input_channels` + mask_value (`int`, *optional*, defaults to 0): + Values in the masked patches will be filled by `mask_value`. + pooling_type (`str`, *optional*, defaults to `"mean"`): + Pooling of the embedding. `"mean"`, `"max"` and `None` are supported. + head_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for head. + prediction_length (`int`, *optional*, defaults to 24): + The prediction horizon that the model will output. + num_targets (`int`, *optional*, defaults to 1): + Number of targets for regression and classification tasks. For classification, it is the number of + classes. + output_range (`list`, *optional*): + Output range for regression task. The range of output values can be set to enforce the model to produce + values within a range. + num_parallel_samples (`int`, *optional*, defaults to 100): + The number of samples is generated in parallel for probabilistic prediction. + + + ```python + >>> from transformers import PatchTSTConfig, PatchTSTModel + + >>> # Initializing an PatchTST configuration with 12 time steps for prediction + >>> configuration = PatchTSTConfig(prediction_length=12) + + >>> # Randomly initializing a model (with random weights) from the configuration + >>> model = PatchTSTModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "patchtst" + attribute_map = { + "hidden_size": "d_model", + "num_attention_heads": "num_attention_heads", + "num_hidden_layers": "num_hidden_layers", + } + + def __init__( + self, + # time series specific configuration + num_input_channels: int = 1, + context_length: int = 32, + distribution_output: str = "student_t", + loss: str = "mse", + # PatchTST arguments + patch_length: int = 1, + patch_stride: int = 1, + # Transformer architecture configuration + num_hidden_layers: int = 3, + d_model: int = 128, + num_attention_heads: int = 4, + share_embedding: bool = True, + channel_attention: bool = False, + ffn_dim: int = 512, + norm_type: str = "batchnorm", + norm_eps: float = 1e-05, + attention_dropout: float = 0.0, + dropout: float = 0.0, + positional_dropout: float = 0.0, + path_dropout: float = 0.0, + ff_dropout: float = 0.0, + bias: bool = True, + activation_function: str = "gelu", + pre_norm: bool = True, + positional_encoding_type: str = "sincos", + use_cls_token: bool = False, + init_std: float = 0.02, + share_projection: bool = True, + scaling: Optional[Union[str, bool]] = "std", + # mask pretraining + do_mask_input: Optional[bool] = None, + mask_type: str = "random", + random_mask_ratio: float = 0.5, + num_forecast_mask_patches: Optional[Union[List[int], int]] = [2], + channel_consistent_masking: Optional[bool] = False, + unmasked_channel_indices: Optional[List[int]] = None, + mask_value: int = 0, + # head + pooling_type: str = "mean", + head_dropout: float = 0.0, + prediction_length: int = 24, + num_targets: int = 1, + output_range: Optional[List] = None, + # distribution head + num_parallel_samples: int = 100, + **kwargs, + ): + # time series specific configuration + self.context_length = context_length + self.num_input_channels = num_input_channels # n_vars + self.loss = loss + self.distribution_output = distribution_output + self.num_parallel_samples = num_parallel_samples + + # Transformer architecture configuration + self.d_model = d_model + self.num_attention_heads = num_attention_heads + self.ffn_dim = ffn_dim + self.num_hidden_layers = num_hidden_layers + self.dropout = dropout + self.attention_dropout = attention_dropout + self.share_embedding = share_embedding + self.channel_attention = channel_attention + self.norm_type = norm_type + self.norm_eps = norm_eps + self.positional_dropout = positional_dropout + self.path_dropout = path_dropout + self.ff_dropout = ff_dropout + self.bias = bias + self.activation_function = activation_function + self.pre_norm = pre_norm + self.positional_encoding_type = positional_encoding_type + self.use_cls_token = use_cls_token + self.init_std = init_std + self.scaling = scaling + + # PatchTST parameters + self.patch_length = patch_length + self.patch_stride = patch_stride + + # Mask pretraining + self.do_mask_input = do_mask_input + self.mask_type = mask_type + self.random_mask_ratio = random_mask_ratio # for random masking + self.num_forecast_mask_patches = num_forecast_mask_patches # for forecast masking + self.channel_consistent_masking = channel_consistent_masking + self.unmasked_channel_indices = unmasked_channel_indices + self.mask_value = mask_value + + # general head params + self.pooling_type = pooling_type + self.head_dropout = head_dropout + + # For prediction head + self.share_projection = share_projection + self.prediction_length = prediction_length + + # For prediction and regression head + self.num_parallel_samples = num_parallel_samples + + # Regression + self.num_targets = num_targets + self.output_range = output_range + + super().__init__(**kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/patchtst/modeling_patchtst.py b/venv/lib/python3.10/site-packages/transformers/models/patchtst/modeling_patchtst.py new file mode 100644 index 0000000000000000000000000000000000000000..22b206726e16d30a7376fa0d178055a6758ceea8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/patchtst/modeling_patchtst.py @@ -0,0 +1,2035 @@ +# coding=utf-8 +# Copyright 2023 IBM & Hugging Face. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch PatchTST model.""" + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +from torch import nn + +from ...activations import ACT2CLS +from ...modeling_outputs import BaseModelOutput +from ...modeling_utils import PreTrainedModel +from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput +from ...utils import ModelOutput, add_start_docstrings, logging +from .configuration_patchtst import PatchTSTConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "PatchTSTConfig" + + +from ..deprecated._archive_maps import PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PatchTST +class PatchTSTAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + is_causal: bool = False, + config: Optional[PatchTSTConfig] = None, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + self.config = config + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + self.is_causal = is_causal + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.reshape(*proj_shape) + value_states = value_states.reshape(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class PatchTSTBatchNorm(nn.Module): + """ + Compute batch normalization over the sequence length (time) dimension. + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps) + + def forward(self, inputs: torch.Tensor): + """ + Parameters: + inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): + input for Batch norm calculation + Returns: + `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` + """ + output = inputs.transpose(1, 2) # output: (batch_size, d_model, sequence_length) + output = self.batchnorm(output) + return output.transpose(1, 2) + + +def random_masking( + inputs: torch.Tensor, + mask_ratio: float, + unmasked_channel_indices: list = None, + channel_consistent_masking: bool = False, + mask_value: int = 0, +): + """random_masking: Mask the input considering the control variables. + + Args: + inputs (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, num_features)`): + The input tensor to mask. + mask_ratio (`float`): + Masking ratio applied to mask the input data during random pretraining. It is the number between 0 and 1. + unmasked_channel_indices (list, *optional*): + Indices of channels that will not be masked. + channel_consistent_masking (bool, *optional*, defaults to `False`): + When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary + across channels. + mask_value (int, *optional*, defaults to 0): + Define the value of masked patches for pretraining. + + Returns: + `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as input Tensor and mask tensor of shape [bs x c x + n] + """ + if mask_ratio < 0 or mask_ratio >= 1: + raise ValueError(f"Mask ratio {mask_ratio} has to be between 0 and 1.") + + batch_size, num_channels, sequence_length, num_features = inputs.shape + device = inputs.device + + len_keep = int(sequence_length * (1 - mask_ratio)) + + if channel_consistent_masking: + noise = torch.rand(batch_size, 1, sequence_length, device=device) # noise in [0, 1], bs x 1 x L + noise = noise.repeat(1, num_channels, 1) # bs x num_channels x time + else: + # noise in [0, 1], bs x num_channels x L + noise = torch.rand(batch_size, num_channels, sequence_length, device=device) + + # mask: [bs x num_channels x num_patch] + mask = torch.ones(batch_size, num_channels, sequence_length, device=device) + mask[:, :, :len_keep] = 0 + + # sort noise for each sample + ids_shuffle = torch.argsort(noise, dim=-1) # ascend: small is keep, large is remove + ids_restore = torch.argsort(ids_shuffle, dim=-1) # ids_restore: [bs x num_channels x L] + + mask = torch.gather(mask, dim=-1, index=ids_restore) + mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patches x patch_length] + if unmasked_channel_indices is not None: + mask[:, unmasked_channel_indices, :, :] = 0 + + inputs_mask = inputs.masked_fill(mask.bool(), mask_value) + return inputs_mask, mask[..., 0] + + +def forecast_masking( + inputs: torch.Tensor, + num_forecast_mask_patches: Union[list, int], + unmasked_channel_indices: list = None, + mask_value: int = 0, +): + """Forecast masking that masks the last K patches where K is from the num_forecast_mask_patches. + If num_forecast_mask_patches is a list, samples in the batch will be randomly masked by numbers defined in the list. + + Parameters: + inputs (`torch.Tensor`): + Input of shape `(bs, num_channels, num_patch, patch_length)` + num_forecast_mask_patches (`list`): + Number of patches to be masked at the end of each batch sample. e.g. 4 or [3, 5]. + unmasked_channel_indices (`list`, *optional*): + Indices of channels that are not masked. + mask_value (`int`, *optional*, defaults to 0): + Values in the masked patches will be filled by `mask_value`. + + Returns: + `tuple(torch.Tensor)`: inputs_mask, masked input, same shape as inputs Tensor and Mask tensor of shape `(bs, + num_channels , num_patch)` or `(bs, tsg1, tsg2, num_channels, num_patch)` + """ + + if isinstance(num_forecast_mask_patches, int): + num_forecast_mask_patches = [num_forecast_mask_patches] + forecast_mask_ratios = [1 for _ in num_forecast_mask_patches] + + batch_size, num_channels, sequence_length, num_features = inputs.shape + mask = torch.zeros(batch_size, num_channels, sequence_length, device=inputs.device) + + t_list = [] + total_length = 0 + total_ratio = sum(forecast_mask_ratios) + + for patch_length, ratio in zip(num_forecast_mask_patches, forecast_mask_ratios): + if patch_length <= 0 or patch_length >= sequence_length: + raise ValueError( + f"num_forecast_mask_patches {patch_length} should be greater than 0 and less than total patches." + ) + temp_len = int(batch_size * ratio / total_ratio) + t_list.append([patch_length, ratio, temp_len]) + total_length += temp_len + + t_list = sorted(t_list, key=lambda x: x[2]) + + if total_length < batch_size: + t_list[0][2] = t_list[0][2] + (batch_size - total_length) + elif total_length > batch_size: + t_list[-1][2] = t_list[-1][2] + (total_length - batch_size) + + batch1 = 0 + for patch_len, _, temp_len in t_list: + batch2 = batch1 + temp_len + mask[batch1:batch2, :, -patch_len:] = 1 + batch1 = batch2 + + perm = torch.randperm(mask.shape[0]) + mask = mask[perm] + + mask = mask.unsqueeze(-1).repeat(1, 1, 1, num_features) # mask: [bs x num_channels x num_patch x patch_len] + if unmasked_channel_indices is not None: + mask[:, unmasked_channel_indices, :, :] = 0 + + inputs_mask = inputs.masked_fill(mask.bool(), mask_value) + return inputs_mask, mask[..., 0] + + +class PatchTSTPatchify(nn.Module): + """ + A class to patchify the time series sequence into different patches + + Returns: + `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + + self.sequence_length = config.context_length + self.patch_length = config.patch_length + self.patch_stride = config.patch_stride + + if self.sequence_length <= self.patch_length: + raise ValueError( + f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})" + ) + + # get the number of patches + self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1 + new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1) + self.sequence_start = self.sequence_length - new_sequence_length + + def forward(self, past_values: torch.Tensor): + """ + Parameters: + past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*): + Input for patchification + + Returns: + `torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)` + """ + sequence_length = past_values.shape[-2] + if sequence_length != self.sequence_length: + raise ValueError( + f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})." + ) + # output: [bs x new_sequence_length x num_channels] + output = past_values[:, self.sequence_start :, :] + # output: [bs x num_patches x num_input_channels x patch_length] + output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride) + # output: [bs x num_input_channels x num_patches x patch_length] + output = output.transpose(-2, -3).contiguous() + return output + + +class PatchTSTMasking(nn.Module): + """ + Class to perform random or forecast masking. + + Parameters: + config (`PatchTSTConfig`): model config + Returns: + x_mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) + Masked patched input + mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) + Bool tensor indicating True on masked points + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.random_mask_ratio = config.random_mask_ratio + self.channel_consistent_masking = config.channel_consistent_masking + self.mask_type = config.mask_type + self.num_forecast_mask_patches = config.num_forecast_mask_patches + self.unmasked_channel_indices = config.unmasked_channel_indices + self.mask_value = config.mask_value + if self.unmasked_channel_indices is not None: + self.unmasked_channel_indices = sorted(self.unmasked_channel_indices) + + def forward(self, patch_input: torch.Tensor): + """ + Parameters: + patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): + Patch input + + Return: + masked_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`) + Masked patched input + mask (`torch.Tensor` of shape `(batch_size, num_channels, num_patches)`) + Bool tensor indicating True on masked points + + """ + if self.mask_type == "random": + masked_input, mask = random_masking( + inputs=patch_input, + mask_ratio=self.random_mask_ratio, + unmasked_channel_indices=self.unmasked_channel_indices, + channel_consistent_masking=self.channel_consistent_masking, + mask_value=self.mask_value, + ) + elif self.mask_type == "forecast": + masked_input, mask = forecast_masking( + inputs=patch_input, + num_forecast_mask_patches=self.num_forecast_mask_patches, + unmasked_channel_indices=self.unmasked_channel_indices, + mask_value=self.mask_value, + ) + else: + raise ValueError(f"Invalid mask type {self.mask_type}.") + + # mask: [bs x num_input_channels x num_patch] + mask = mask.bool() + return masked_input, mask + + +class PatchTSTEncoderLayer(nn.Module): + """ + PatchTST encoder layer + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + + self.channel_attention = config.channel_attention + # Multi-Head attention + self.self_attn = PatchTSTAttention( + embed_dim=config.d_model, + num_heads=config.num_attention_heads, + dropout=config.attention_dropout, + ) + + # Add & Norm of the sublayer 1 + self.dropout_path1 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity() + if config.norm_type == "batchnorm": + self.norm_sublayer1 = PatchTSTBatchNorm(config) + elif config.norm_type == "layernorm": + self.norm_sublayer1 = nn.LayerNorm(config.d_model, eps=config.norm_eps) + else: + raise ValueError(f"{config.norm_type} is not a supported norm layer type.") + + # Add & Norm of the sublayer 2 + if self.channel_attention: + self.dropout_path2 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity() + if config.norm_type == "batchnorm": + self.norm_sublayer2 = PatchTSTBatchNorm(config) + elif config.norm_type == "layernorm": + self.norm_sublayer2 = nn.LayerNorm(config.d_model, eps=config.norm_eps) + else: + raise ValueError(f"{config.norm_type} is not a supported norm layer type.") + + # Position-wise Feed-Forward + self.ff = nn.Sequential( + nn.Linear(config.d_model, config.ffn_dim, bias=config.bias), + ACT2CLS[config.activation_function](), + nn.Dropout(config.ff_dropout) if config.ff_dropout > 0 else nn.Identity(), + nn.Linear(config.ffn_dim, config.d_model, bias=config.bias), + ) + + # Add & Norm of sublayer 3 + self.dropout_path3 = nn.Dropout(config.path_dropout) if config.path_dropout > 0 else nn.Identity() + if config.norm_type == "batchnorm": + self.norm_sublayer3 = PatchTSTBatchNorm(config) + elif config.norm_type == "layernorm": + self.norm_sublayer3 = nn.LayerNorm(config.d_model, eps=config.norm_eps) + else: + raise ValueError(f"{config.norm_type} is not a supported norm layer type.") + + self.pre_norm = config.pre_norm + + def forward(self, hidden_state: torch.Tensor, output_attentions: Optional[bool] = None): + """ + Parameters: + hidden_state (`torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)`, *required*): + Past values of the time series + output_attentions (`bool`, *optional*): + Whether or not to return the output attention of all layers + Return: + `torch.Tensor` of shape `(batch_size, num_channels, sequence_length, d_model)` + + """ + batch_size, num_input_channels, sequence_length, d_model = hidden_state.shape + + # First sublayer: attention across time + # hidden_states: [(bs*num_channels) x sequence_length x d_model] + hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) + + if self.pre_norm: + ## Norm and Multi-Head attention and Add residual connection + attn_output, attn_weights, _ = self.self_attn( + hidden_states=self.norm_sublayer1(hidden_state), output_attentions=output_attentions + ) + # Add: residual connection with residual dropout + hidden_state = hidden_state + self.dropout_path1(attn_output) + else: + ## Multi-Head attention and Add residual connection and Norm - Standard Transformer from BERT + attn_output, attn_weights, _ = self.self_attn( + hidden_states=hidden_state, output_attentions=output_attentions + ) + # hidden_states: [(bs*num_channels) x sequence_length x d_model] + hidden_state = self.norm_sublayer1(hidden_state + self.dropout_path1(attn_output)) + + # hidden_state: [bs x num_channels x sequence_length x d_model] + hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) + + # second sublayer: attention across variable at any given time + if self.channel_attention: + # hidden_state: [bs x sequence_length x num_channels x d_model] + hidden_state = hidden_state.transpose(2, 1).contiguous() + # hidden_state: [(bs*sequence_length) x num_channels x d_model] + hidden_state = hidden_state.view(batch_size * sequence_length, num_input_channels, d_model) + if self.pre_norm: + ## Norm and Multi-Head attention and Add residual connection + attn_output, channel_attn_weights, _ = self.self_attn( + hidden_states=self.norm_sublayer2(hidden_state), output_attentions=output_attentions + ) + # Add: residual connection with residual dropout + hidden_state = hidden_state + self.dropout_path2(attn_output) + else: + ## Multi-Head attention and Add residual connection and Norm + attn_output, channel_attn_weights, _ = self.self_attn( + hidden_states=hidden_state, output_attentions=output_attentions + ) + # hidden_states: [(bs*sequence_length) x num_channels x d_model] + hidden_state = self.norm_sublayer2(hidden_state + self.dropout_path2(attn_output)) + + # Reshape hidden state + # hidden_state: [bs x sequence_length x num_channels x d_model] + hidden_state = hidden_state.reshape(batch_size, sequence_length, num_input_channels, d_model) + # hidden_state: [bs x num_channels x sequence_length x d_model] + hidden_state = hidden_state.transpose(1, 2).contiguous() + + # Third sublayer: mixing across hidden + # hidden_state: [(batch_size*num_channels) x sequence_length x d_model] + hidden_state = hidden_state.view(batch_size * num_input_channels, sequence_length, d_model) + if self.pre_norm: + ## Norm and Position-wise Feed-Forward and Add residual connection + # Add: residual connection with residual dropout + hidden_state = hidden_state + self.dropout_path3(self.ff(self.norm_sublayer3(hidden_state))) + else: + ## Position-wise Feed-Forward and Add residual connection and Norm + # Add: residual connection with residual dropout + hidden_state = self.norm_sublayer3(hidden_state + self.dropout_path3(self.ff(hidden_state))) + + # [bs x num_channels x sequence_length x d_model] + hidden_state = hidden_state.reshape(batch_size, num_input_channels, sequence_length, d_model) + + outputs = (hidden_state,) + if output_attentions: + outputs += (attn_weights, channel_attn_weights) if self.channel_attention else (attn_weights,) + + return outputs + + +class PatchTSTPreTrainedModel(PreTrainedModel): + config_class = PatchTSTConfig + base_model_prefix = "model" + main_input_name = "past_values" + supports_gradient_checkpointing = False + + def _init_weights(self, module): + """ + Initialize weights + """ + if isinstance(module, PatchTSTPositionalEncoding): + # initialize cls_token + if self.config.use_cls_token: + nn.init.normal_(module.cls_token, std=0.02) + # initialize positional encoding + if self.config.positional_encoding_type == "random": + nn.init.normal_(module.position_enc, mean=0.0, std=0.1) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, PatchTSTBatchNorm): + module.batchnorm.bias.data.zero_() + module.batchnorm.weight.data.fill_(1.0) + elif isinstance(module, (nn.Linear, nn.Conv1d)): + module.weight.data.normal_(mean=0.0, std=self.config.init_std) + if module.bias is not None: + module.bias.data.zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (PatchTSTEncoder)): + module.gradient_checkpointing = value + + +class PatchTSTEmbedding(nn.Module): + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.num_input_channels = config.num_input_channels + self.share_embedding = config.share_embedding + # Input encoding: projection of feature vectors onto a d-dim vector space + if self.share_embedding: + self.input_embedding = nn.Linear(config.patch_length, config.d_model) + else: + self.input_embedding = nn.ModuleList() + for _ in range(config.num_input_channels): + self.input_embedding.append(nn.Linear(config.patch_length, config.d_model)) + + def forward(self, patch_input: torch.Tensor): + """ + Parameters: + patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): + Patch input for embedding + return: + `torch.Tensor` of shape `(batch_size, num_channels, num_patches, d_model)` + """ + # Input encoding + num_input_channels = patch_input.shape[1] + if num_input_channels != self.num_input_channels: + raise ValueError( + f"The defined number of input channels ({self.num_input_channels}) in the config " + f"has to be the same as the number of channels in the batch input ({num_input_channels})" + ) + if self.share_embedding: + embeddings = self.input_embedding(patch_input) # x: [bs x num_channels x num_patches x d_model] + else: + embeddings = [self.input_embedding[i](patch_input[:, i, :, :]) for i in range(num_input_channels)] + embeddings = torch.stack(embeddings, dim=1) + return embeddings + + +class PatchTSTPositionalEncoding(nn.Module): + """ + Class for positional encoding + """ + + def __init__(self, config: PatchTSTConfig, num_patches: int): + super().__init__() + self.use_cls_token = config.use_cls_token + self.num_input_channels = config.num_input_channels + if config.use_cls_token: + # cls_token: [1 x num_input_channels x 1 x d_model] + self.cls_token = nn.Parameter(torch.zeros(1, 1, 1, config.d_model)) + num_patches += 1 + # postional encoding: [num_patches x d_model] + self.position_enc = self._init_pe(config, num_patches) + # Positional dropout + self.positional_dropout = ( + nn.Dropout(config.positional_dropout) if config.positional_dropout > 0 else nn.Identity() + ) + + @staticmethod + def _init_pe(config: PatchTSTConfig, num_patches: int) -> nn.Parameter: + # Positional encoding + if config.positional_encoding_type == "random": + position_enc = nn.Parameter(torch.randn(num_patches, config.d_model), requires_grad=True) + elif config.positional_encoding_type == "sincos": + position_enc = torch.zeros(num_patches, config.d_model) + position = torch.arange(0, num_patches).unsqueeze(1) + div_term = torch.exp(torch.arange(0, config.d_model, 2) * -(math.log(10000.0) / config.d_model)) + position_enc[:, 0::2] = torch.sin(position * div_term) + position_enc[:, 1::2] = torch.cos(position * div_term) + position_enc = position_enc - position_enc.mean() + position_enc = position_enc / (position_enc.std() * 10) + position_enc = nn.Parameter(position_enc, requires_grad=False) + else: + raise ValueError( + f"{config.positional_encoding_type} is not a valid positional encoder. Available types are 'random' and 'sincos'." + ) + return position_enc + + def forward(self, patch_input: torch.Tensor): + if self.use_cls_token: + # patch_input: [bs x num_channels x num_patches x d_model] + patch_input = self.positional_dropout(patch_input + self.position_enc[1:, :]) + # append cls token where cls_token: [1 x num_channels x 1 x d_model] + cls_token = self.cls_token + self.position_enc[:1, :] + # get the same copy of cls_token for all the samples in batch: [bs x num_channels x 1 x d_model] + cls_tokens = cls_token.expand(patch_input.shape[0], self.num_input_channels, -1, -1) + # hidden_state: [bs x num_channels x (num_patches+1) x d_model] + hidden_state = torch.cat((cls_tokens, patch_input), dim=2) + else: + # hidden_state: [bs x num_channels x num_patches x d_model] + hidden_state = self.positional_dropout(patch_input + self.position_enc) + return hidden_state + + +class PatchTSTEncoder(PatchTSTPreTrainedModel): + """ + PatchTST Encoder + """ + + def __init__(self, config: PatchTSTConfig, num_patches: int): + super().__init__(config) + self.gradient_checkpointing = False + + # Input embedding: projection of feature vectors onto a d-dim vector space + self.embedder = PatchTSTEmbedding(config) + # Positional encoding + self.positional_encoder = PatchTSTPositionalEncoding(config, num_patches) + # Encoder + self.layers = nn.ModuleList([PatchTSTEncoderLayer(config) for i in range(config.num_hidden_layers)]) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + patch_input: torch.Tensor, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + ) -> BaseModelOutput: + """ + Parameters: + patch_input (`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`, *required*): + Past values of the time series + output_hidden_states (bool, optional): Indicates if hidden states should be outputted. + output_attentions (bool, optional): Indicates if attentions should be outputted. + + return: + `BaseModelOutput` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + # Input embedding + patch_input = self.embedder(patch_input) + # Positional encoding + hidden_state = self.positional_encoder(patch_input) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + for encoder_layer in self.layers: + if output_hidden_states: + encoder_states = encoder_states + (hidden_state,) + + layer_outputs = encoder_layer(hidden_state=hidden_state, output_attentions=output_attentions) + # get hidden state. hidden_state shape is [bs x num_channels x num_patches x d_model] + # or [bs x num_channels x (num_patches+1) x d_model] if use cls_token + hidden_state = layer_outputs[0] + # append attention matrix at each layer + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + # return past_values, hidden_states + return BaseModelOutput(last_hidden_state=hidden_state, hidden_states=encoder_states, attentions=all_attentions) + + +PATCHTST_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`PatchTSTConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@dataclass +class PatchTSTModelOutput(ModelOutput): + """ + Base class for model's outputs, with potential hidden states. + + Parameters: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of + the model at the output of each layer plus the optional initial embedding outputs. + mask: (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches)`, *optional*) + Bool masked tensor indicating which patches are masked + loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) + Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length + scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) + Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length + patch_input (`torch.FloatTensor` of shape `(batch_size, num_channels, num_patches, patch_length)`): + Patched input to the Transformer + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + mask: torch.FloatTensor = None + loc: torch.FloatTensor = None + scale: torch.FloatTensor = None + patch_input: torch.FloatTensor = None + + +@dataclass +class PatchTSTForPretrainingOutput(ModelOutput): + """ + Output type of [`PatchTSTForPretraining`]. + + Parameters: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + MSE loss. + prediction_outputs (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction outputs of the time series modeling heads. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class PatchTSTForRegressionOutput(ModelOutput): + """ + Output type of [`PatchTSTForRegression`]. + + Parameters: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + MSE loss. + regression_outputs (`torch.FloatTensor` of shape `(batch_size, num_targets)`): + Regression outputs of the time series modeling heads. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + regression_outputs: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class PatchTSTForPredictionOutput(ModelOutput): + """ + Output type of [`PatchTSTForPrediction`]. + + Parameters: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + MSE loss. + prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, -1)`): + Prediction outputs of the time series modeling heads. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + loc: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) + Mean of the input data (batch_size, sequence_length, num_channels) over the sequence_length + scale: (`torch.FloatTensor` of shape `(batch_size, 1, num_channels)`, *optional*) + Std of the input data (batch_size, sequence_length, num_channels) over the sequence_length + """ + + loss: Optional[torch.FloatTensor] = None + prediction_outputs: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + loc: torch.FloatTensor = None + scale: torch.FloatTensor = None + + +@dataclass +class PatchTSTForClassificationOutput(ModelOutput): + """ + Output type of [`PatchTSTForClassification`]. + + Parameters: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (`torch.FloatTensor` of shape `(batch_size, num_targets)`): + Prediction scores of the PatchTST modeling head (scores before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class SamplePatchTSTOutput(ModelOutput): + """ + Base class for time series model's predictions outputs that contains the sampled values from the chosen + distribution. + + Parameters: + sequences `(batch_size, num_samples, prediction_length, num_targets)`): + Sampled values from the chosen distribution. + """ + + sequences: torch.FloatTensor = None + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll +def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: + """ + Computes the negative log likelihood loss from input distribution with respect to target. + """ + return -input.log_prob(target) + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average +def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: + """ + Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, + meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. + + Args: + input_tensor (`torch.FloatTensor`): + Input tensor, of which the average must be computed. + weights (`torch.FloatTensor`, *optional*): + Weights tensor, of the same shape as `input_tensor`. + dim (`int`, *optional*): + The dim along which to average `input_tensor`. + + Returns: + `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. + """ + if weights is not None: + weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) + sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) + return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights + else: + return input_tensor.mean(dim=dim) + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST +class PatchTSTStdScaler(nn.Module): + """ + Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by + subtracting from the mean and dividing by the standard deviation. + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5 + + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ + denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) + denominator = denominator.clamp_min(1.0) + loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator + + variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator + scale = torch.sqrt(variance + self.minimum_scale) + return (data - loc) / scale, loc, scale + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST +class PatchTSTMeanScaler(nn.Module): + """ + Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data + accordingly. + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 + self.default_scale = config.default_scale if hasattr(config, "default_scale") else None + + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ + ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) + num_observed = observed_indicator.sum(self.dim, keepdim=True) + + scale = ts_sum / torch.clamp(num_observed, min=1) + + # If `default_scale` is provided, we use it, otherwise we use the scale + # of the batch. + if self.default_scale is None: + batch_sum = ts_sum.sum(dim=0) + batch_observations = torch.clamp(num_observed.sum(0), min=1) + default_scale = torch.squeeze(batch_sum / batch_observations) + else: + default_scale = self.default_scale * torch.ones_like(scale) + + # apply default scale where there are no observations + scale = torch.where(num_observed > 0, scale, default_scale) + + # ensure the scale is at least `self.minimum_scale` + scale = torch.clamp(scale, min=self.minimum_scale) + scaled_data = data / scale + + if not self.keepdim: + scale = scale.squeeze(dim=self.dim) + + return scaled_data, torch.zeros_like(scale), scale + + +# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST +class PatchTSTNOPScaler(nn.Module): + """ + Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 + self.keepdim = config.keepdim if hasattr(config, "keepdim") else True + + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor = None + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + input for Batch norm calculation + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, num_input_channels)`) + """ + scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) + loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) + return data, loc, scale + + +class PatchTSTScaler(nn.Module): + def __init__(self, config: PatchTSTConfig): + super().__init__() + if config.scaling == "mean" or config.scaling is True: + self.scaler = PatchTSTMeanScaler(config) + elif config.scaling == "std": + self.scaler = PatchTSTStdScaler(config) + else: + self.scaler = PatchTSTNOPScaler(config) + + def forward( + self, data: torch.Tensor, observed_indicator: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Parameters: + data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Input for scaler calculation + observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Calculating the scale on the observed indicator. + Returns: + tuple of `torch.Tensor` of shapes + (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, + `(batch_size, 1, um_input_channels)`) + """ + data, loc, scale = self.scaler(data, observed_indicator) + return data, loc, scale + + +@add_start_docstrings( + "The bare PatchTST Model outputting raw hidden-states without any specific head.", + PATCHTST_START_DOCSTRING, +) +class PatchTSTModel(PatchTSTPreTrainedModel): + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + + self.scaler = PatchTSTScaler(config) + self.patchifier = PatchTSTPatchify(config) + self.do_mask_input = config.do_mask_input + # get num_patches information from PatchTSTPatchify + num_patches = self.patchifier.num_patches + + if self.do_mask_input: + self.masking = PatchTSTMasking(config) + else: + self.masking = nn.Identity() + self.encoder = PatchTSTEncoder(config, num_patches=num_patches) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + future_values: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, PatchTSTModelOutput]: + r""" + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + future_values (`torch.BoolTensor` of shape `(batch_size, prediction_length, num_input_channels)`, *optional*): + Future target values associated with the `past_values` + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + output_attentions (`bool`, *optional*): + Whether or not to return the output attention of all layers + return_dict (`bool`, *optional*): + Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTModelOutput` or tuple of `torch.Tensor` (if `return_dict`=False or `config.return_dict`=False) + + Examples: + + ```python + >>> from huggingface_hub import hf_hub_download + >>> import torch + >>> from transformers import PatchTSTModel + + >>> file = hf_hub_download( + ... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset" + ... ) + >>> batch = torch.load(file) + + >>> model = PatchTSTModel.from_pretrained("namctin/patchtst_etth1_pretrain") + + >>> # during training, one provides both past and future values + >>> outputs = model( + ... past_values=batch["past_values"], + ... future_values=batch["future_values"], + ... ) + + >>> last_hidden_state = outputs.last_hidden_state + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + if past_observed_mask is None: + past_observed_mask = torch.ones_like(past_values) + + # x: tensor [bs x sequence_length x num_input_channels] + scaled_past_values, loc, scale = self.scaler(past_values, past_observed_mask) + + # patched_values: [bs x num_input_channels x num_patches x patch_length] for pretrain + patched_values = self.patchifier(scaled_past_values) + if self.do_mask_input: + masked_values, mask = self.masking(patched_values) + else: + masked_values, mask = self.masking(patched_values), None + + encoder_output = self.encoder( + patch_input=masked_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions + ) + + if not return_dict: + outputs = (encoder_output.last_hidden_state, encoder_output.hidden_states, encoder_output.attentions) + outputs = outputs + (mask, loc, scale, patched_values) + return tuple(v for v in outputs if v is not None) + + return PatchTSTModelOutput( + last_hidden_state=encoder_output.last_hidden_state, + hidden_states=encoder_output.hidden_states, + attentions=encoder_output.attentions, + mask=mask, + loc=loc, + scale=scale, + patch_input=patched_values, + ) + + +class PatchTSTMaskPretrainHead(nn.Module): + """ + Pretraining head for mask modelling + """ + + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.dropout = nn.Dropout(config.dropout) + self.linear = nn.Linear(config.d_model, config.patch_length) + self.use_cls_token = config.use_cls_token + + def forward(self, embedding: torch.Tensor) -> torch.Tensor: + """ + Parameters: + embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or + `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): + Embedding from the model + Returns: + `torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or + `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True + + """ + embedding = self.linear(self.dropout(embedding)) # [bs x num_channels x num_patches x patch_length] + if self.use_cls_token: + embedding = embedding[:, :, 1:, :] # remove the first cls token + return embedding + + +@add_start_docstrings( + "The PatchTST for pretrain model.", + PATCHTST_START_DOCSTRING, +) +class PatchTSTForPretraining(PatchTSTPreTrainedModel): + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + + config.do_mask_input = True + self.model = PatchTSTModel(config=config) + self.head = PatchTSTMaskPretrainHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, PatchTSTForPretrainingOutput]: + r""" + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + output_attentions (`bool`, *optional*): + Whether or not to return the output attention of all layers + return_dict (`bool`, *optional*): Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTForPretrainingOutput` or tuple of `torch.Tensor` (if `return_dict`=False or + `config.return_dict`=False) + + Examples: + + ```python + >>> from huggingface_hub import hf_hub_download + >>> import torch + >>> from transformers import PatchTSTConfig, PatchTSTForPretraining + + >>> file = hf_hub_download( + ... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset" + ... ) + >>> batch = torch.load(file) + + >>> # Config for random mask pretraining + >>> config = PatchTSTConfig( + ... num_input_channels=7, + ... context_length=512, + ... patch_length=12, + ... stride=12, + ... mask_type='random', + ... random_mask_ratio=0.4, + ... use_cls_token=True, + ... ) + >>> # Config for forecast mask pretraining + >>> config = PatchTSTConfig( + ... num_input_channels=7, + ... context_length=512, + ... patch_length=12, + ... stride=12, + ... mask_type='forecast', + ... num_forecast_mask_patches=5, + ... use_cls_token=True, + ... ) + >>> model = PatchTSTForPretraining(config) + + >>> # during training, one provides both past and future values + >>> outputs = model(past_values=batch["past_values"]) + + >>> loss = outputs.loss + >>> loss.backward() + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # past_values: [bs x num_channels x num_patches x d_model] or + # [bs x num_channels x (num_patches+1) x d_model] if use cls_token + model_output = self.model( + past_values=past_values, + past_observed_mask=past_observed_mask, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + return_dict=True, + ) + + # last_hidden_state: [bs x num_channels x num_patches x patch_length] or + # [bs x num_channels x (num_patches+1) x patch_length] if use cls_token + x_hat = self.head(model_output.last_hidden_state) + + # calculate masked_loss + loss = nn.MSELoss(reduction="none") + loss_val = loss(x_hat, model_output.patch_input) + masked_loss = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) + + encoder_states = model_output.hidden_states + if not return_dict: + outputs = (x_hat,) + model_output[1:-4] + outputs = (masked_loss,) + outputs if masked_loss is not None else outputs + return outputs + return PatchTSTForPretrainingOutput( + loss=masked_loss, prediction_output=x_hat, hidden_states=encoder_states, attentions=model_output.attentions + ) + + +class PatchTSTClassificationHead(nn.Module): + def __init__(self, config: PatchTSTConfig): + super().__init__() + self.use_cls_token = config.use_cls_token + self.pooling_type = config.pooling_type + self.flatten = nn.Flatten(start_dim=1) + self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() + self.linear = nn.Linear(config.num_input_channels * config.d_model, config.num_targets) + + def forward(self, embedding: torch.Tensor): + """ + Parameters: + embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or + `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): + Embedding from the model + Returns: + `torch.Tensor` of shape `(bs, num_targets)` + + """ + if self.use_cls_token: + # use the first output token, pooled_embedding: bs x num_channels x d_model + pooled_embedding = embedding[:, :, 0, :] + elif self.pooling_type == "mean": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.mean(dim=2) + elif self.pooling_type == "max": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.max(dim=2).values + else: + raise ValueError(f"pooling operator {self.pooling_type} is not implemented yet") + # pooled_embedding: bs x num_channels * d_model + pooled_embedding = self.flatten(pooled_embedding) + # output: bs x n_classes + output = self.linear(self.dropout(pooled_embedding)) + return output + + +@add_start_docstrings( + "The PatchTST for classification model.", + PATCHTST_START_DOCSTRING, +) +class PatchTSTForClassification(PatchTSTPreTrainedModel): + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + + # Turn off masking + if config.do_mask_input: + logger.warning("Setting `do_mask_input` parameter to False.") + config.do_mask_input = False + + self.model = PatchTSTModel(config) + self.head = PatchTSTClassificationHead(config) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + target_values: torch.Tensor = None, + past_observed_mask: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, PatchTSTForClassificationOutput]: + r""" + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + target_values (`torch.Tensor`, *optional*): + Labels associates with the `past_values` + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + output_attentions (`bool`, *optional*): + Whether or not to return the output attention of all layers + return_dict (`bool`, *optional*): + Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTForClassificationOutput` or tuple of `torch.Tensor` (if `return_dict`=False or + `config.return_dict`=False) + + Examples: + + ```python + >>> from transformers import PatchTSTConfig, PatchTSTForClassification + + >>> # classification task with two input channel2 and 3 classes + >>> config = PatchTSTConfig( + ... num_input_channels=2, + ... num_targets=3, + ... context_length=512, + ... patch_length=12, + ... stride=12, + ... use_cls_token=True, + ... ) + >>> model = PatchTSTForClassification(config=config) + + >>> # during inference, one only provides past values + >>> past_values = torch.randn(20, 512, 2) + >>> outputs = model(past_values=past_values) + >>> labels = outputs.prediction_logits + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_output = self.model( + past_values=past_values, + past_observed_mask=past_observed_mask, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + return_dict=True, + ) + y_hat = self.head(model_output.last_hidden_state) + + loss_val = None + if target_values is not None: + loss = nn.CrossEntropyLoss() + loss_val = loss(y_hat, target_values) + + if not return_dict: + outputs = (y_hat,) + model_output[1:-3] + outputs = (loss_val,) + outputs if loss_val is not None else outputs + return outputs + return PatchTSTForClassificationOutput( + loss=loss_val, + prediction_logits=y_hat, + hidden_states=model_output.hidden_states, + attentions=model_output.attentions, + ) + + +@add_start_docstrings( + "The PatchTST for regression Model.", + PATCHTST_START_DOCSTRING, +) +class PatchTSTPredictionHead(nn.Module): + def __init__(self, config: PatchTSTConfig, num_patches, distribution_output=None): + super().__init__() + + self.share_projection = config.share_projection + self.num_input_channels = config.num_input_channels + self.use_cls_token = config.use_cls_token + self.pooling_type = config.pooling_type + if self.pooling_type or self.use_cls_token: + head_dim = config.d_model + else: + head_dim = config.d_model * num_patches + + if not self.share_projection: + # if each channel has its own head + self.projections = nn.ModuleList() + self.dropouts = nn.ModuleList() + self.flattens = nn.ModuleList() + for i in range(self.num_input_channels): + self.flattens.append(nn.Flatten(start_dim=2)) + if distribution_output is None: + # use linear head + self.projections.append(nn.Linear(head_dim, config.prediction_length)) + else: + # use distribution head + self.projections.append(distribution_output.get_parameter_projection(head_dim)) + self.dropouts.append(nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity()) + else: + # all the channels share the same head + self.flatten = nn.Flatten(start_dim=2) + if distribution_output is None: + # use linear head + self.projection = nn.Linear(head_dim, config.prediction_length) + else: + # use distribution head + self.projection = distribution_output.get_parameter_projection(head_dim) + self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() + + def forward(self, embedding: torch.Tensor): + """ + Parameters: + embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or + `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): + Embedding from the model + Returns: + `torch.Tensor` of shape `(bs, forecast_len, num_channels)` + + """ + if self.use_cls_token: + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding[:, :, 0, :] + else: + if self.pooling_type == "mean": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.mean(dim=2) + elif self.pooling_type == "max": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.max(dim=2).values + else: + # pooled_embedding: [bs x num_channels x num_patches x d_model] + pooled_embedding = embedding + + if not self.share_projection: + output = [] + for i in range(self.num_input_channels): + # pooled_embedding: [bs x (d_model * num_patches)] or [bs x d_model)] + pooled_embedding = self.flattens[i](pooled_embedding[:, i, :]) + pooled_embedding = self.dropouts[i](pooled_embedding) + # pooled_embedding: [bs x forecast_len] + # or tuple ([bs x forecast_len], [bs x forecast_len]) if using distribution head + pooled_embedding = self.projections[i](pooled_embedding) + output.append(pooled_embedding) + # output: [bs x num_channels x forecast_len] + output = torch.stack(output, dim=1) + else: + # pooled_embedding: [bs x num_channels x (d_model * num_patches)] or [bs x num_channels x d_model)] + pooled_embedding = self.flatten(pooled_embedding) + pooled_embedding = self.dropout(pooled_embedding) + # output: [bs x num_channels x forecast_len] or + # tuple ([bs x num_channels x forecast_len], [bs x num_channels x forecast_len]) if using distribution head + output = self.projection(pooled_embedding) + + if isinstance(output, tuple): + # output: ([bs x forecast_len x num_channels], [bs x forecast_len x num_channels]) + output = tuple(z.transpose(2, 1) for z in output) + else: + output = output.transpose(2, 1) # [bs x forecast_len x num_channels] + return output + + +@add_start_docstrings( + "The PatchTST for prediction model.", + PATCHTST_START_DOCSTRING, +) +class PatchTSTForPrediction(PatchTSTPreTrainedModel): + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + + # Turn off masking + if config.do_mask_input: + logger.warning("Setting `do_mask_input` parameter to False.") + config.do_mask_input = False + + self.model = PatchTSTModel(config) + + if config.loss == "mse": + self.distribution_output = None + else: + if config.distribution_output == "student_t": + self.distribution_output = StudentTOutput(dim=config.prediction_length) + elif config.distribution_output == "normal": + self.distribution_output = NormalOutput(dim=config.prediction_length) + elif config.distribution_output == "negative_binomial": + self.distribution_output = NegativeBinomialOutput(dim=config.prediction_length) + else: + raise ValueError(f"Unknown distribution output {config.distribution_output}") + + self.head = PatchTSTPredictionHead( + config, self.model.patchifier.num_patches, distribution_output=self.distribution_output + ) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + future_values: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, PatchTSTForPredictionOutput]: + r""" + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + future_values (`torch.Tensor` of shape `(bs, forecast_len, num_input_channels)`, *optional*): + Future target values associated with the `past_values` + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + output_attentions (`bool`, *optional*): + Whether or not to return the output attention of all layers + return_dict (`bool`, *optional*): + Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTForPredictionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or + `config.return_dict`=False) + + Examples: + + ```python + >>> from huggingface_hub import hf_hub_download + >>> import torch + >>> from transformers import PatchTSTConfig, PatchTSTForPrediction + + >>> file = hf_hub_download( + ... repo_id="hf-internal-testing/etth1-hourly-batch", filename="train-batch.pt", repo_type="dataset" + ... ) + >>> batch = torch.load(file) + + >>> # Prediction task with 7 input channels and prediction length is 96 + >>> model = PatchTSTForPrediction.from_pretrained("namctin/patchtst_etth1_forecast") + + >>> # during training, one provides both past and future values + >>> outputs = model( + ... past_values=batch["past_values"], + ... future_values=batch["future_values"], + ... ) + + >>> loss = outputs.loss + >>> loss.backward() + + >>> # during inference, one only provides past values, the model outputs future values + >>> outputs = model(past_values=batch["past_values"]) + >>> prediction_outputs = outputs.prediction_outputs + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # get model output + model_output = self.model( + past_values=past_values, + past_observed_mask=past_observed_mask, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + return_dict=True, + ) + # get output head + y_hat = self.head(model_output.last_hidden_state) + + loss_val = None + + if self.distribution_output: + y_hat_out = y_hat + else: + y_hat_out = y_hat * model_output.scale + model_output.loc + + if future_values is not None: + if self.distribution_output: + distribution = self.distribution_output.distribution( + y_hat, loc=model_output.loc, scale=model_output.scale + ) + loss_val = nll(distribution, future_values) + # take average of the loss + loss_val = weighted_average(loss_val) + else: + loss = nn.MSELoss(reduction="mean") + loss_val = loss(y_hat_out, future_values) + + loc = model_output.loc + scale = model_output.scale + + if not return_dict: + outputs = (y_hat_out,) + model_output[1:-1] + outputs = (loss_val,) + outputs if loss_val is not None else outputs + return outputs + return PatchTSTForPredictionOutput( + loss=loss_val, + prediction_outputs=y_hat_out, + hidden_states=model_output.hidden_states, + attentions=model_output.attentions, + loc=loc, + scale=scale, + ) + + def generate( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + ) -> SamplePatchTSTOutput: + """ + Generate sequences of sample predictions from a model with a probability distribution head. + + Parameters: + past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Past values of the time series that serves as context in order to predict the future. + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + + Return: + [`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of + samples, prediction_length, 1)` or `(batch_size, number of samples, prediction_length, num_input_channels)` + for multivariate predictions. + """ + # get number of samples + num_parallel_samples = self.config.num_parallel_samples + + # get model output + outputs = self( + past_values=past_values, + future_values=None, + past_observed_mask=past_observed_mask, + output_hidden_states=False, + ) + if self.distribution_output: + # get distribution + distribution = self.distribution_output.distribution( + outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale + ) + # get samples: list of [bs x forecast_len x num_channels] + samples = [distribution.sample() for _ in range(num_parallel_samples)] + # samples: [bs x num_samples x forecast_len x num_channels] + samples = torch.stack(samples, dim=1) + else: + samples = outputs.prediction_outputs.unsqueeze(1) + + return SamplePatchTSTOutput(sequences=samples) + + +class PatchTSTRegressionHead(nn.Module): + """ + Regression head + """ + + def __init__(self, config: PatchTSTConfig, distribution_output=None): + super().__init__() + self.y_range = config.output_range + self.use_cls_token = config.use_cls_token + self.pooling_type = config.pooling_type + self.distribution_output = distribution_output + + head_dim = config.num_input_channels * config.d_model + + self.flatten = nn.Flatten(start_dim=1) + self.dropout = nn.Dropout(config.head_dropout) if config.head_dropout > 0 else nn.Identity() + + if distribution_output is None: + self.projection = nn.Linear(head_dim, config.num_targets) + else: + self.projection = distribution_output.get_parameter_projection(head_dim) + + def forward(self, embedding: torch.Tensor): + """ + Parameters: + embedding (`torch.Tensor` of shape `(bs, num_channels, num_patches, d_model)` or + `(bs, num_channels, num_patches+1, d_model)` if `cls_token` is set to True, *required*): + Embedding from the model + Returns: + `torch.Tensor` of shape `(bs, output_dim)` + + """ + if self.use_cls_token: + # use the first output token, pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding[:, :, 0, :] + elif self.pooling_type == "mean": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.mean(dim=2) + elif self.pooling_type == "max": + # pooled_embedding: [bs x num_channels x d_model] + pooled_embedding = embedding.max(dim=2).values + else: + raise ValueError(f"pooling operator {self.pooling_type} is not implemented yet") + # flatten the input + # pooled_embedding: bs x (num_channels * d_model) + pooled_embedding = self.dropout(self.flatten(pooled_embedding)) + # projection + # output: bs x output_dim or a tuple of this shape for distribution head + output = self.projection(pooled_embedding) + # apply sigmoid to bound the output if required + if (self.distribution_output is None) & (self.y_range is not None): # linear head + output = torch.sigmoid(output) * (self.y_range[1] - self.y_range[0]) + self.y_range[0] + return output + + +@add_start_docstrings( + "The PatchTST for regression model.", + PATCHTST_START_DOCSTRING, +) +class PatchTSTForRegression(PatchTSTPreTrainedModel): + def __init__(self, config: PatchTSTConfig): + super().__init__(config) + + # Turn off masking + if config.do_mask_input: + logger.warning("Setting `do_mask_input` parameter to False.") + config.do_mask_input = False + + self.model = PatchTSTModel(config) + if config.loss == "mse": + self.distribution_output = None + else: + if config.distribution_output == "student_t": + self.distribution_output = StudentTOutput(dim=config.num_targets) + elif config.distribution_output == "normal": + self.distribution_output = NormalOutput(dim=config.num_targets) + elif config.distribution_output == "negative_binomial": + self.distribution_output = NegativeBinomialOutput(dim=config.num_targets) + else: + raise ValueError(f"Unknown distribution output {config.distribution_output}") + + self.head = PatchTSTRegressionHead(config, self.distribution_output) + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + past_values: torch.Tensor, + target_values: torch.Tensor = None, + past_observed_mask: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, PatchTSTForRegressionOutput]: + r""" + Parameters: + past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*): + Input sequence to the model + target_values (`torch.Tensor` of shape `(bs, num_input_channels)`): + Target values associates with the `past_values` + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers + output_attentions (`bool`, *optional*): + Whether or not to return the output attention of all layers + return_dict (`bool`, *optional*): + Whether or not to return a `ModelOutput` instead of a plain tuple. + + Returns: + `PatchTSTForRegressionOutput` or tuple of `torch.Tensor` (if `return_dict`=False or + `config.return_dict`=False) + + Examples: + + ```python + >>> from transformers import PatchTSTConfig, PatchTSTForRegression + + >>> # Regression task with 6 input channels and regress 2 targets + >>> model = PatchTSTForRegression.from_pretrained("namctin/patchtst_etth1_regression") + + >>> # during inference, one only provides past values, the model outputs future values + >>> past_values = torch.randn(20, 512, 6) + >>> outputs = model(past_values=past_values) + >>> regression_outputs = outputs.regression_outputs + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + model_output = self.model( + past_values=past_values, + past_observed_mask=past_observed_mask, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + return_dict=True, + ) + # get output head. y_hat is of shape [bs x num_targets] or tuple of this shape + y_hat = self.head(model_output.last_hidden_state) + + loss = None + if target_values is not None: + if self.distribution_output: + distribution = self.distribution_output.distribution(y_hat) + # y_hat should be a 2-tuple, each with dimension [bs, num_targets] + y_hat = tuple([item.view(-1, self.config.num_targets) for item in y_hat]) + loss = nll(distribution, target_values) + # take average of the loss + loss = weighted_average(loss) + else: + loss = nn.MSELoss(reduction="mean") + loss = loss(y_hat, target_values) + + if not return_dict: + # hidden_states, attentions, mask + outputs = (y_hat,) + model_output[1:-3] + outputs = (loss,) + outputs if loss is not None else outputs + return outputs + return PatchTSTForRegressionOutput( + loss=loss, + regression_outputs=y_hat, + hidden_states=model_output.hidden_states, + attentions=model_output.attentions, + ) + + def generate( + self, + past_values: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + ) -> SamplePatchTSTOutput: + """ + Generate sequences of sample predictions from a model with a probability distribution head. + + Parameters: + past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): + Past values of the time series that serves as context in order to predict the future. + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + + Return: + [`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of + samples, num_targets)`. + """ + # get number of samples + num_parallel_samples = self.config.num_parallel_samples + + # get model output + outputs = self( + past_values=past_values, + target_values=None, + past_observed_mask=past_observed_mask, + output_hidden_states=False, + ) + + # get distribution + distribution = self.distribution_output.distribution(outputs.regression_outputs) + # get samples: list of [bs x num_targets] + samples = [distribution.sample() for _ in range(num_parallel_samples)] + # samples: [bs x num_samples x num_targets] + samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) + return SamplePatchTSTOutput(sequences=samples) diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4825eda165050afc19e190a56f5da6ac847e6e78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__init__.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, +# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_vision_available, +) + + +_import_structure = { + "configuration_pvt_v2": ["PvtV2Config"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_pvt_v2"] = [ + "PvtV2ForImageClassification", + "PvtV2Model", + "PvtV2PreTrainedModel", + "PvtV2Backbone", + ] + + +if TYPE_CHECKING: + from .configuration_pvt_v2 import PvtV2Config + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_pvt_v2 import ( + PvtV2Backbone, + PvtV2ForImageClassification, + PvtV2Model, + PvtV2PreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8a0236cea4b41ad6b3f4672272cb04236a56c41 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/configuration_pvt_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/configuration_pvt_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b941dd2e4e48c0b16453e3758835a08dbb675d1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/configuration_pvt_v2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/convert_pvt_v2_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/convert_pvt_v2_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6d4b49425422a490296d9cdc081dffbe7662b9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/convert_pvt_v2_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/modeling_pvt_v2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/modeling_pvt_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aaf6f6bca4cbd19c4ee14dfba897e966b8312cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/__pycache__/modeling_pvt_v2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/configuration_pvt_v2.py b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/configuration_pvt_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..f6d7de299ba37dc108e2007c193ae76bd2567285 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/configuration_pvt_v2.py @@ -0,0 +1,153 @@ +# coding=utf-8 +# Copyright 2024 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, +# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Pvt V2 model configuration""" + +from typing import Callable, List, Tuple, Union + +from ...configuration_utils import PretrainedConfig +from ...utils import logging +from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices + + +logger = logging.get_logger(__name__) + + +class PvtV2Config(BackboneConfigMixin, PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`PvtV2Model`]. It is used to instantiate a Pvt V2 + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Pvt V2 B0 + [OpenGVLab/pvt_v2_b0](https://huggingface.co/OpenGVLab/pvt_v2_b0) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + image_size (`Union[int, Tuple[int, int]]`, *optional*, defaults to 224): + The input image size. Pass int value for square image, or tuple of (height, width). + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + num_encoder_blocks (`[int]`, *optional*, defaults to 4): + The number of encoder blocks (i.e. stages in the Mix Transformer encoder). + depths (`List[int]`, *optional*, defaults to `[2, 2, 2, 2]`): + The number of layers in each encoder block. + sr_ratios (`List[int]`, *optional*, defaults to `[8, 4, 2, 1]`): + Spatial reduction ratios in each encoder block. + hidden_sizes (`List[int]`, *optional*, defaults to `[32, 64, 160, 256]`): + Dimension of each of the encoder blocks. + patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`): + Patch size for overlapping patch embedding before each encoder block. + strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`): + Stride for overlapping patch embedding before each encoder block. + num_attention_heads (`List[int]`, *optional*, defaults to `[1, 2, 5, 8]`): + Number of attention heads for each attention layer in each block of the Transformer encoder. + mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`): + Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the + encoder blocks. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + drop_path_rate (`float`, *optional*, defaults to 0.0): + The dropout probability for stochastic depth, used in the blocks of the Transformer encoder. + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether or not a learnable bias should be added to the queries, keys and values. + linear_attention (`bool`, *optional*, defaults to `False`): + Use linear attention complexity. If set to True, `sr_ratio` is ignored and average pooling is used for + dimensionality reduction in the attention layers rather than strided convolution. + out_features (`List[str]`, *optional*): + If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. + (depending on how many stages the model has). If unset and `out_indices` is set, will default to the + corresponding stages. If unset and `out_indices` is unset, will default to the last stage. + out_indices (`List[int]`, *optional*): + If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how + many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. + If unset and `out_features` is unset, will default to the last stage. + Example: + + ```python + >>> from transformers import PvtV2Model, PvtV2Config + + >>> # Initializing a pvt_v2_b0 style configuration + >>> configuration = PvtV2Config() + + >>> # Initializing a model from the OpenGVLab/pvt_v2_b0 style configuration + >>> model = PvtV2Model(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "pvt_v2" + + def __init__( + self, + image_size: Union[int, Tuple[int, int]] = 224, + num_channels: int = 3, + num_encoder_blocks: int = 4, + depths: List[int] = [2, 2, 2, 2], + sr_ratios: List[int] = [8, 4, 2, 1], + hidden_sizes: List[int] = [32, 64, 160, 256], + patch_sizes: List[int] = [7, 3, 3, 3], + strides: List[int] = [4, 2, 2, 2], + num_attention_heads: List[int] = [1, 2, 5, 8], + mlp_ratios: List[int] = [8, 8, 4, 4], + hidden_act: Union[str, Callable] = "gelu", + hidden_dropout_prob: float = 0.0, + attention_probs_dropout_prob: float = 0.0, + initializer_range: float = 0.02, + drop_path_rate: float = 0.0, + layer_norm_eps: float = 1e-6, + qkv_bias: bool = True, + linear_attention: bool = False, + out_features=None, + out_indices=None, + **kwargs, + ): + super().__init__(**kwargs) + + image_size = (image_size, image_size) if isinstance(image_size, int) else image_size + + self.image_size = image_size + self.num_channels = num_channels + self.num_encoder_blocks = num_encoder_blocks + self.depths = depths + self.sr_ratios = sr_ratios + self.hidden_sizes = hidden_sizes + self.patch_sizes = patch_sizes + self.strides = strides + self.mlp_ratios = mlp_ratios + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.drop_path_rate = drop_path_rate + self.layer_norm_eps = layer_norm_eps + self.qkv_bias = qkv_bias + self.linear_attention = linear_attention + self.stage_names = [f"stage{idx}" for idx in range(1, len(depths) + 1)] + self._out_features, self._out_indices = get_aligned_output_features_output_indices( + out_features=out_features, out_indices=out_indices, stage_names=self.stage_names + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/convert_pvt_v2_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/convert_pvt_v2_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..e397cb244c0e0d1b460cd3b801d4c9a519f60d5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/convert_pvt_v2_to_pytorch.py @@ -0,0 +1,295 @@ +# coding=utf-8 +# Copyright 2023 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, +# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert PvtV2 checkpoints from the original library.""" + +import argparse +from pathlib import Path + +import requests +import torch +from PIL import Image + +from transformers import PvtImageProcessor, PvtV2Config, PvtV2ForImageClassification +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +# here we list all keys to be renamed (original name on the left, our name on the right) +def create_rename_keys(config): + rename_keys = [] + for i in range(config.num_encoder_blocks): + # Remane embedings' paramters + rename_keys.append( + (f"patch_embed{i + 1}.proj.weight", f"pvt_v2.encoder.layers.{i}.patch_embedding.proj.weight") + ) + rename_keys.append((f"patch_embed{i + 1}.proj.bias", f"pvt_v2.encoder.layers.{i}.patch_embedding.proj.bias")) + rename_keys.append( + (f"patch_embed{i + 1}.norm.weight", f"pvt_v2.encoder.layers.{i}.patch_embedding.layer_norm.weight") + ) + rename_keys.append( + (f"patch_embed{i + 1}.norm.bias", f"pvt_v2.encoder.layers.{i}.patch_embedding.layer_norm.bias") + ) + rename_keys.append((f"norm{i + 1}.weight", f"pvt_v2.encoder.layers.{i}.layer_norm.weight")) + rename_keys.append((f"norm{i + 1}.bias", f"pvt_v2.encoder.layers.{i}.layer_norm.bias")) + + for j in range(config.depths[i]): + # Rename blocks' parameters + rename_keys.append( + (f"block{i + 1}.{j}.attn.q.weight", f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.query.weight") + ) + rename_keys.append( + (f"block{i + 1}.{j}.attn.q.bias", f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.query.bias") + ) + rename_keys.append( + (f"block{i + 1}.{j}.attn.kv.weight", f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.kv.weight") + ) + rename_keys.append( + (f"block{i + 1}.{j}.attn.kv.bias", f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.kv.bias") + ) + + if config.linear_attention or config.sr_ratios[i] > 1: + rename_keys.append( + ( + f"block{i + 1}.{j}.attn.norm.weight", + f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.layer_norm.weight", + ) + ) + rename_keys.append( + ( + f"block{i + 1}.{j}.attn.norm.bias", + f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.layer_norm.bias", + ) + ) + rename_keys.append( + ( + f"block{i + 1}.{j}.attn.sr.weight", + f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.spatial_reduction.weight", + ) + ) + rename_keys.append( + ( + f"block{i + 1}.{j}.attn.sr.bias", + f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.spatial_reduction.bias", + ) + ) + + rename_keys.append( + (f"block{i + 1}.{j}.attn.proj.weight", f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.proj.weight") + ) + rename_keys.append( + (f"block{i + 1}.{j}.attn.proj.bias", f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.proj.bias") + ) + + rename_keys.append( + (f"block{i + 1}.{j}.norm1.weight", f"pvt_v2.encoder.layers.{i}.blocks.{j}.layer_norm_1.weight") + ) + rename_keys.append( + (f"block{i + 1}.{j}.norm1.bias", f"pvt_v2.encoder.layers.{i}.blocks.{j}.layer_norm_1.bias") + ) + + rename_keys.append( + (f"block{i + 1}.{j}.norm2.weight", f"pvt_v2.encoder.layers.{i}.blocks.{j}.layer_norm_2.weight") + ) + rename_keys.append( + (f"block{i + 1}.{j}.norm2.bias", f"pvt_v2.encoder.layers.{i}.blocks.{j}.layer_norm_2.bias") + ) + + rename_keys.append( + (f"block{i + 1}.{j}.mlp.fc1.weight", f"pvt_v2.encoder.layers.{i}.blocks.{j}.mlp.dense1.weight") + ) + rename_keys.append( + (f"block{i + 1}.{j}.mlp.fc1.bias", f"pvt_v2.encoder.layers.{i}.blocks.{j}.mlp.dense1.bias") + ) + rename_keys.append( + ( + f"block{i + 1}.{j}.mlp.dwconv.dwconv.weight", + f"pvt_v2.encoder.layers.{i}.blocks.{j}.mlp.dwconv.dwconv.weight", + ) + ) + rename_keys.append( + ( + f"block{i + 1}.{j}.mlp.dwconv.dwconv.bias", + f"pvt_v2.encoder.layers.{i}.blocks.{j}.mlp.dwconv.dwconv.bias", + ) + ) + rename_keys.append( + (f"block{i + 1}.{j}.mlp.fc2.weight", f"pvt_v2.encoder.layers.{i}.blocks.{j}.mlp.dense2.weight") + ) + rename_keys.append( + (f"block{i + 1}.{j}.mlp.fc2.bias", f"pvt_v2.encoder.layers.{i}.blocks.{j}.mlp.dense2.bias") + ) + + rename_keys.extend( + [ + ("head.weight", "classifier.weight"), + ("head.bias", "classifier.bias"), + ] + ) + + return rename_keys + + +# we split up the matrix of each encoder layer into queries, keys and values +def read_in_k_v(state_dict, config): + # for each of the encoder blocks: + for i in range(config.num_encoder_blocks): + for j in range(config.depths[i]): + # read in weights + bias of keys and values (which is a single matrix in the original implementation) + kv_weight = state_dict.pop(f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.kv.weight") + kv_bias = state_dict.pop(f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.kv.bias") + # next, add keys and values (in that order) to the state dict + state_dict[f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.key.weight"] = kv_weight[ + : config.hidden_sizes[i], : + ] + state_dict[f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.key.bias"] = kv_bias[: config.hidden_sizes[i]] + + state_dict[f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.value.weight"] = kv_weight[ + config.hidden_sizes[i] :, : + ] + state_dict[f"pvt_v2.encoder.layers.{i}.blocks.{j}.attention.value.bias"] = kv_bias[ + config.hidden_sizes[i] : + ] + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + im = Image.open(requests.get(url, stream=True).raw) + return im + + +@torch.no_grad() +def convert_pvt_v2_checkpoint(pvt_v2_size, pvt_v2_checkpoint, pytorch_dump_folder_path, verify_imagenet_weights=False): + """ + Copy/paste/tweak model's weights to our PVT structure. + """ + + # define default PvtV2 configuration + if pvt_v2_size == "b0": + config_path = "OpenGVLab/pvt_v2_b0" + elif pvt_v2_size == "b1": + config_path = "OpenGVLab/pvt_v2_b1" + elif pvt_v2_size == "b2": + config_path = "OpenGVLab/pvt_v2_b2" + elif pvt_v2_size == "b2-linear": + config_path = "OpenGVLab/pvt_v2_b2_linear" + elif pvt_v2_size == "b3": + config_path = "OpenGVLab/pvt_v2_b3" + elif pvt_v2_size == "b4": + config_path = "OpenGVLab/pvt_v2_b4" + elif pvt_v2_size == "b5": + config_path = "OpenGVLab/pvt_v2_b5" + else: + raise ValueError( + f"Available model sizes: 'b0', 'b1', 'b2', 'b2-linear', 'b3', 'b4', 'b5', but " + f"'{pvt_v2_size}' was given" + ) + config = PvtV2Config.from_pretrained(config_path) + # load original model from https://github.com/whai362/PVT + state_dict = torch.load(pvt_v2_checkpoint, map_location="cpu") + + rename_keys = create_rename_keys(config) + for src, dest in rename_keys: + rename_key(state_dict, src, dest) + read_in_k_v(state_dict, config) + + # load HuggingFace model + model = PvtV2ForImageClassification(config).eval() + model.load_state_dict(state_dict) + image_processor = PvtImageProcessor(size=config.image_size) + + if verify_imagenet_weights: + # Check outputs on an image, prepared by PvtImageProcessor + print("Verifying conversion of pretrained ImageNet weights...") + encoding = image_processor(images=prepare_img(), return_tensors="pt") + pixel_values = encoding["pixel_values"] + outputs = model(pixel_values) + logits = outputs.logits.detach().cpu() + + if pvt_v2_size == "b0": + expected_slice_logits = torch.tensor([-1.1939, -1.4547, -0.1076]) + elif pvt_v2_size == "b1": + expected_slice_logits = torch.tensor([-0.4716, -0.7335, -0.4600]) + elif pvt_v2_size == "b2": + expected_slice_logits = torch.tensor([0.0795, -0.3170, 0.2247]) + elif pvt_v2_size == "b2-linear": + expected_slice_logits = torch.tensor([0.0968, 0.3937, -0.4252]) + elif pvt_v2_size == "b3": + expected_slice_logits = torch.tensor([-0.4595, -0.2870, 0.0940]) + elif pvt_v2_size == "b4": + expected_slice_logits = torch.tensor([-0.1769, -0.1747, -0.0143]) + elif pvt_v2_size == "b5": + expected_slice_logits = torch.tensor([-0.2943, -0.1008, 0.6812]) + else: + raise ValueError( + f"Available model sizes: 'b0', 'b1', 'b2', 'b2-linear', 'b3', 'b4', 'b5', but " + f"'{pvt_v2_size}' was given" + ) + + assert torch.allclose( + logits[0, :3], expected_slice_logits, atol=1e-4 + ), "ImageNet weights not converted successfully." + + print("ImageNet weights verified, conversion successful.") + + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + print(f"Saving model pytorch_model.bin to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + print(f"Saving image processor to {pytorch_dump_folder_path}") + image_processor.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--pvt_v2_size", + default="b0", + type=str, + help="Size of the PVTv2 pretrained model you'd like to convert.", + ) + parser.add_argument( + "--pvt_v2_checkpoint", + default="pvt_v2_b0.pth", + type=str, + help="Checkpoint of the PVTv2 pretrained model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--verify-imagenet-weights", + action="store_true", + default=False, + help="Verifies the correct conversion of author-published pretrained ImageNet weights.", + ) + + args = parser.parse_args() + convert_pvt_v2_checkpoint( + pvt_v2_size=args.pvt_v2_size, + pvt_v2_checkpoint=args.pvt_v2_checkpoint, + pytorch_dump_folder_path=args.pytorch_dump_folder_path, + verify_imagenet_weights=args.verify_imagenet_weights, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/modeling_pvt_v2.py b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/modeling_pvt_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..a2e1e7a674524f52be90e8d05b05db462d8aab3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/pvt_v2/modeling_pvt_v2.py @@ -0,0 +1,700 @@ +# coding=utf-8 +# Copyright 2024 Authors: Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, +# Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao and The HuggingFace Inc. team. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch PVTv2 model.""" + +import math +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import BackboneOutput, BaseModelOutput, ImageClassifierOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from ...utils.backbone_utils import BackboneMixin +from .configuration_pvt_v2 import PvtV2Config + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "PvtV2Config" + +_CHECKPOINT_FOR_DOC = "OpenGVLab/pvt_v2_b0" +_EXPECTED_OUTPUT_SHAPE = [1, 256, 7, 7] + +_IMAGE_CLASS_CHECKPOINT = "OpenGVLab/pvt_v2_b0" +_IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_281" # ImageNet ID for "tabby, tabby cat" + + +# Copied from transformers.models.beit.modeling_beit.drop_path +def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +# Copied from transformers.models.convnext.modeling_convnext.ConvNextDropPath with ConvNext->Pvt +class PvtV2DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + return drop_path(hidden_states, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class PvtV2OverlapPatchEmbeddings(nn.Module): + """Image to Patch Embedding""" + + def __init__(self, config: PvtV2Config, layer_idx: int): + super().__init__() + patch_size = config.patch_sizes[layer_idx] + patch_size = (patch_size, patch_size) if isinstance(patch_size, int) else patch_size + stride = config.strides[layer_idx] + num_channels = config.num_channels if layer_idx == 0 else config.hidden_sizes[layer_idx - 1] + hidden_size = config.hidden_sizes[layer_idx] + self.patch_size = patch_size + self.proj = nn.Conv2d( + num_channels, + hidden_size, + kernel_size=patch_size, + stride=stride, + padding=(patch_size[0] // 2, patch_size[1] // 2), + ) + self.layer_norm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) + + def forward(self, pixel_values): + embeddings = self.proj(pixel_values) + _, _, height, width = embeddings.shape + embeddings = embeddings.flatten(2).transpose(1, 2) + embeddings = self.layer_norm(embeddings) + return embeddings, height, width + + +class PvtV2DepthWiseConv(nn.Module): + """ + Depth-wise (DW) convolution to infuse positional information using zero-padding. Depth-wise convolutions + have an equal number of groups to the number of input channels, meaning one filter per input channel. This + reduces the overall parameters and compute costs since the key purpose of this layer is position encoding. + """ + + def __init__(self, config: PvtV2Config, dim: int = 768): + super().__init__() + self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) + + def forward(self, hidden_states, height, width): + batch_size, seq_len, num_channels = hidden_states.shape + hidden_states = hidden_states.transpose(1, 2).view(batch_size, num_channels, height, width) + hidden_states = self.dwconv(hidden_states) + hidden_states = hidden_states.flatten(2).transpose(1, 2) + + return hidden_states + + +class PvtV2SelfAttention(nn.Module): + """Efficient self-attention mechanism.""" + + def __init__(self, config: PvtV2Config, hidden_size: int, num_attention_heads: int, spatial_reduction_ratio: int): + super().__init__() + self.linear_attention = config.linear_attention + self.pruned_heads = set() + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + + if self.hidden_size % self.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({self.hidden_size}) is not a multiple of the number of attention " + f"heads ({self.num_attention_heads})" + ) + + self.attention_head_size = int(self.hidden_size / self.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.attn_drop = nn.Dropout(config.attention_probs_dropout_prob) + self.proj = nn.Linear(self.hidden_size, self.hidden_size) + self.proj_drop = nn.Dropout(config.hidden_dropout_prob) + + self.spatial_reduction_ratio = spatial_reduction_ratio + if self.linear_attention: + self.pool = nn.AdaptiveAvgPool2d(7) + self.spatial_reduction = nn.Conv2d(self.hidden_size, self.hidden_size, kernel_size=1, stride=1) + self.layer_norm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps) + self.act = nn.GELU() + elif spatial_reduction_ratio > 1: + self.spatial_reduction = nn.Conv2d( + self.hidden_size, self.hidden_size, kernel_size=spatial_reduction_ratio, stride=spatial_reduction_ratio + ) + self.layer_norm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps) + + def transpose_for_scores(self, hidden_states) -> torch.Tensor: + new_shape = hidden_states.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + hidden_states = hidden_states.view(new_shape) + return hidden_states.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + height: int, + width: int, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor]: + batch_size, seq_len, num_channels = hidden_states.shape + query_layer = self.transpose_for_scores(self.query(hidden_states)) + + if self.linear_attention: + hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) + hidden_states = ( + self.spatial_reduction(self.pool(hidden_states)).reshape(batch_size, num_channels, -1).permute(0, 2, 1) + ) + hidden_states = self.act(self.layer_norm(hidden_states)) + elif self.spatial_reduction_ratio > 1: + hidden_states = hidden_states.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) + hidden_states = ( + self.spatial_reduction(hidden_states).reshape(batch_size, num_channels, -1).permute(0, 2, 1) + ) + hidden_states = self.layer_norm(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.attn_drop(attention_probs) + context_layer = (attention_probs @ value_layer).transpose(1, 2).reshape(batch_size, seq_len, num_channels) + context_layer = self.proj(context_layer) + context_layer = self.proj_drop(context_layer) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.query = prune_linear_layer(self.query, index) + self.key = prune_linear_layer(self.key, index) + self.value = prune_linear_layer(self.value, index) + self.proj = prune_linear_layer(self.proj, index, dim=1) + + # Update hyper params and store pruned heads + self.num_attention_heads = self.num_attention_heads - len(heads) + self.all_head_size = self.attention_head_size * self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + +class PvtV2ConvFeedForwardNetwork(nn.Module): + def __init__( + self, + config: PvtV2Config, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + ): + super().__init__() + out_features = out_features if out_features is not None else in_features + self.dense1 = nn.Linear(in_features, hidden_features) + self.dwconv = PvtV2DepthWiseConv(config, hidden_features) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + self.dense2 = nn.Linear(hidden_features, out_features) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.relu = nn.ReLU() if config.linear_attention else nn.Identity() + + def forward(self, hidden_states: torch.Tensor, height, width) -> torch.Tensor: + hidden_states = self.dense1(hidden_states) + hidden_states = self.relu(hidden_states) + hidden_states = self.dwconv(hidden_states, height, width) + hidden_states = self.intermediate_act_fn(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.dense2(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class PvtV2BlockLayer(nn.Module): + def __init__(self, config: PvtV2Config, layer_idx: int, drop_path: float = 0.0): + super().__init__() + hidden_size: int = config.hidden_sizes[layer_idx] + num_attention_heads: int = config.num_attention_heads[layer_idx] + spatial_reduction_ratio: int = config.sr_ratios[layer_idx] + mlp_ratio: float = config.mlp_ratios[layer_idx] + self.layer_norm_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) + self.attention = PvtV2SelfAttention( + config=config, + hidden_size=hidden_size, + num_attention_heads=num_attention_heads, + spatial_reduction_ratio=spatial_reduction_ratio, + ) + self.drop_path = PvtV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.layer_norm_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps) + mlp_hidden_size = int(hidden_size * mlp_ratio) + self.mlp = PvtV2ConvFeedForwardNetwork(config=config, in_features=hidden_size, hidden_features=mlp_hidden_size) + + def forward(self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False): + self_attention_outputs = self.attention( + hidden_states=self.layer_norm_1(hidden_states), + height=height, + width=width, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] + + attention_output = self.drop_path(attention_output) + hidden_states = attention_output + hidden_states + + mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width) + + mlp_output = self.drop_path(mlp_output) + layer_output = hidden_states + mlp_output + + outputs = (layer_output,) + outputs + + return outputs + + +class PvtV2EncoderLayer(nn.Module): + def __init__(self, config: PvtV2Config, layer_idx: int): + super().__init__() + self.patch_embedding = PvtV2OverlapPatchEmbeddings( + config=config, + layer_idx=layer_idx, + ) + # Transformer block + # stochastic depth decay rule + drop_path_decays = torch.linspace(0, config.drop_path_rate, sum(config.depths)).tolist() + block_layers = [] + for block_idx in range(config.depths[layer_idx]): + block_layers.append( + PvtV2BlockLayer( + config=config, + layer_idx=layer_idx, + drop_path=drop_path_decays[sum(config.depths[:layer_idx]) + block_idx], + ) + ) + self.blocks = nn.ModuleList(block_layers) + + # Layer norm + self.layer_norm = nn.LayerNorm(config.hidden_sizes[layer_idx], eps=config.layer_norm_eps) + + def forward(self, hidden_states, output_attentions): + all_self_attentions = () if output_attentions else None + # first, obtain patch embeddings + hidden_states, height, width = self.patch_embedding(hidden_states) + # second, send embeddings through blocks + for block in self.blocks: + layer_outputs = block(hidden_states, height, width, output_attentions) + hidden_states = layer_outputs[0] + if output_attentions: + all_self_attentions += (layer_outputs[1],) + # third, apply layer norm + hidden_states = self.layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (all_self_attentions,) + + return outputs, height, width + + +class PvtV2Encoder(nn.Module): + def __init__(self, config: PvtV2Config): + super().__init__() + self.config = config + self.gradient_checkpointing = False + + # encoder layers + self.layers = nn.ModuleList([PvtV2EncoderLayer(config, i) for i in range(config.num_encoder_blocks)]) + + def forward( + self, + pixel_values: torch.FloatTensor, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple, BaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + batch_size = pixel_values.shape[0] + hidden_states = pixel_values + for idx, layer in enumerate(self.layers): + if self.gradient_checkpointing and self.training: + layer_output = self._gradient_checkpointing_func(layer.__call__, hidden_states, output_attentions) + else: + layer_output = layer(hidden_states, output_attentions) + outputs, height, width = layer_output + hidden_states = outputs[0] + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[1],) + # reshape back to (batch_size, num_channels, height, width) + hidden_states = hidden_states.reshape(batch_size, height, width, -1).permute(0, 3, 1, 2).contiguous() + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class PvtV2PreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = PvtV2Config + base_model_prefix = "pvt_v2" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid + # `trunc_normal_cpu` not implemented in `half` issues + module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + elif isinstance(module, nn.Conv2d): + fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + fan_out //= module.groups + module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) + if module.bias is not None: + module.bias.data.zero_() + + +PVT_V2_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`~PvtV2Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +PVT_V2_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`PvtImageProcessor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Pvt-v2 encoder outputting raw hidden-states without any specific head on top.", + PVT_V2_START_DOCSTRING, +) +class PvtV2Model(PvtV2PreTrainedModel): + def __init__(self, config: PvtV2Config): + super().__init__(config) + self.config = config + + # hierarchical Transformer encoder + self.encoder = PvtV2Encoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(PVT_V2_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutput, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + pixel_values: torch.FloatTensor, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_outputs = self.encoder( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + + if not return_dict: + return (sequence_output,) + encoder_outputs[1:] + + return BaseModelOutput( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + """ + Pvt-v2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state + of the [CLS] token) e.g. for ImageNet. + """, + PVT_V2_START_DOCSTRING, +) +class PvtV2ForImageClassification(PvtV2PreTrainedModel): + def __init__(self, config: PvtV2Config) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.pvt_v2 = PvtV2Model(config) + + # Classifier head + self.classifier = ( + nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity() + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(PVT_V2_INPUTS_DOCSTRING.format("(batch_size, channels, height, width)")) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=ImageClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def forward( + self, + pixel_values: Optional[torch.Tensor], + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, ImageClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.pvt_v2( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + # convert last hidden states to (batch_size, height*width, hidden_size) + batch_size = sequence_output.shape[0] + # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels) + sequence_output = sequence_output.permute(0, 2, 3, 1) + sequence_output = sequence_output.reshape(batch_size, -1, self.config.hidden_sizes[-1]) + + # global average pooling + sequence_output = sequence_output.mean(dim=1) + + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + PVTv2 backbone, to be used with frameworks like DETR and MaskFormer. + """, + PVT_V2_START_DOCSTRING, +) +class PvtV2Backbone(PvtV2Model, BackboneMixin): + def __init__(self, config: PvtV2Config): + super().__init__(config) + super()._init_backbone(config) + self.num_features = config.hidden_sizes + + @add_start_docstrings_to_model_forward(PVT_V2_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: torch.FloatTensor, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> BackboneOutput: + """ + Returns: + + Examples: + + ```python + >>> from transformers import AutoImageProcessor, AutoBackbone + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0") + >>> model = AutoBackbone.from_pretrained( + ... "OpenGVLab/pvt_v2_b0", out_features=["stage1", "stage2", "stage3", "stage4"] + ... ) + + >>> inputs = processor(image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> feature_maps = outputs.feature_maps + >>> list(feature_maps[-1].shape) + [1, 256, 7, 7] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + + outputs = self.encoder( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=True, + return_dict=return_dict, + ) + + hidden_states = outputs.hidden_states + + feature_maps = () + for idx, stage in enumerate(self.stage_names): + if stage in self.out_features: + feature_maps += (hidden_states[idx],) + + if not return_dict: + output = (feature_maps,) + if output_hidden_states: + output += (outputs.hidden_states,) + return output + + return BackboneOutput( + feature_maps=feature_maps, + hidden_states=outputs.hidden_states if output_hidden_states else None, + attentions=None, + )